remove hw_endpoint_t rx, rename/change signature of helper functions

This commit is contained in:
hathach
2026-01-07 22:49:18 +07:00
parent a124edf9f6
commit 475e97cc8b
4 changed files with 239 additions and 232 deletions

View File

@ -41,83 +41,49 @@
// Current implementation force vbus detection as always present, causing device think it is always plugged into host.
// Therefore it cannot detect disconnect event, mistaken it as suspend.
// Note: won't work if change to 0 (for now)
#define FORCE_VBUS_DETECT 1
#define FORCE_VBUS_DETECT 1
#define USB_INTS_ERROR_BITS \
(USB_INTS_ERROR_DATA_SEQ_BITS | USB_INTS_ERROR_BIT_STUFF_BITS | USB_INTS_ERROR_CRC_BITS | \
USB_INTS_ERROR_RX_OVERFLOW_BITS | USB_INTS_ERROR_RX_TIMEOUT_BITS)
/*------------------------------------------------------------------*/
/* Low level controller
*------------------------------------------------------------------*/
// Init these in dcd_init
static uint8_t* next_buffer_ptr;
// HW buffer pointer from USB buffer space (max 3840 bytes)
static uint8_t *hw_buffer_ptr;
// USB_MAX_ENDPOINTS Endpoints, direction TUSB_DIR_OUT for out and TUSB_DIR_IN for in.
static struct hw_endpoint hw_endpoints[USB_MAX_ENDPOINTS][2];
// SOF may be used by remote wakeup as RESUME, this indicate whether SOF is actually used by usbd
// SOF may be used by remote wakeup as RESUME, this indicates whether SOF is actually used by usbd
static bool _sof_enable = false;
TU_ATTR_ALWAYS_INLINE static inline struct hw_endpoint *hw_endpoint_get(uint8_t num, tusb_dir_t dir) {
return &hw_endpoints[num][dir];
TU_ATTR_ALWAYS_INLINE static inline hw_endpoint_t *hw_endpoint_get(uint8_t epnum, tusb_dir_t dir) {
return &hw_endpoints[epnum][dir];
}
TU_ATTR_ALWAYS_INLINE static inline struct hw_endpoint* hw_endpoint_get_by_addr(uint8_t ep_addr) {
uint8_t num = tu_edpt_number(ep_addr);
tusb_dir_t dir = tu_edpt_dir(ep_addr);
TU_ATTR_ALWAYS_INLINE static inline hw_endpoint_t *hw_endpoint_get_by_addr(uint8_t ep_addr) {
const uint8_t num = tu_edpt_number(ep_addr);
const tusb_dir_t dir = tu_edpt_dir(ep_addr);
return hw_endpoint_get(num, dir);
}
// Allocate from the USB buffer space (max 3840 bytes)
static void hw_endpoint_alloc(struct hw_endpoint* ep, size_t size) {
// round up size to multiple of 64
size = tu_round_up(ep->wMaxPacketSize, 64);
// double buffered Bulk endpoint
if (ep->transfer_type == TUSB_XFER_BULK) {
size *= 2u;
}
// assign buffer
ep->hw_data_buf = next_buffer_ptr;
next_buffer_ptr += size;
hard_assert(next_buffer_ptr < usb_dpram->epx_data + sizeof(usb_dpram->epx_data));
pico_info(" Allocated %d bytes (0x%p)\r\n", size, ep->hw_data_buf);
}
// Enable endpoint
TU_ATTR_ALWAYS_INLINE static inline void hw_endpoint_enable(struct hw_endpoint* ep) {
uint32_t const reg = EP_CTRL_ENABLE_BITS | ((uint) ep->transfer_type << EP_CTRL_BUFFER_TYPE_LSB) | hw_data_offset(ep->hw_data_buf);
*hwep_ctrl_reg(ep) = reg;
}
// main processing for dcd_edpt_iso_activate
static void hw_endpoint_init(uint8_t ep_addr, uint16_t wMaxPacketSize, uint8_t transfer_type) {
const uint8_t num = tu_edpt_number(ep_addr);
const tusb_dir_t dir = tu_edpt_dir(ep_addr);
hw_endpoint_t* ep = hw_endpoint_get(num, dir);
static void hw_endpoint_init(hw_endpoint_t *ep, uint8_t ep_addr, uint16_t wMaxPacketSize, uint8_t transfer_type) {
const uint8_t epnum = tu_edpt_number(ep_addr);
ep->ep_addr = ep_addr;
// For device, IN is a tx transfer and OUT is an rx transfer
ep->rx = (dir == TUSB_DIR_OUT);
ep->next_pid = 0u;
ep->ep_addr = ep_addr;
ep->next_pid = 0u;
ep->wMaxPacketSize = wMaxPacketSize;
ep->transfer_type = transfer_type;
// Every endpoint has a buffer control register in dpram
// if (dir == TUSB_DIR_IN) {
// ep->buffer_control = &usb_dpram->ep_buf_ctrl[num].in;
// } else {
// ep->buffer_control = &usb_dpram->ep_buf_ctrl[num].out;
// }
ep->transfer_type = transfer_type;
// Clear existing buffer control state
*hwep_buf_ctrl_reg(ep) = 0;
if (num == 0) {
// EP0 has no endpoint control register because the buffer offsets are fixed
// ep->endpoint_control = NULL;
io_rw_32 *buf_ctrl_reg = hwep_buf_ctrl_reg_device(ep);
*buf_ctrl_reg = 0;
// allocated hw buffer
if (epnum == 0) {
// Buffer offset is fixed (also double buffered)
ep->hw_data_buf = (uint8_t*) &usb_dpram->ep0_buf_a[0];
} else {
@ -127,33 +93,49 @@ static void hw_endpoint_init(uint8_t ep_addr, uint16_t wMaxPacketSize, uint8_t t
// } else {
// ep->endpoint_control = &usb_dpram->ep_ctrl[num - 1].out;
// }
// round up size to multiple of 64
uint16_t size = (uint16_t)tu_round_up(wMaxPacketSize, 64);
// double buffered Bulk endpoint
if (transfer_type == TUSB_XFER_BULK) {
size *= 2u;
}
// assign buffer
ep->hw_data_buf = hw_buffer_ptr;
hw_buffer_ptr += size;
hard_assert(hw_buffer_ptr < usb_dpram->epx_data + sizeof(usb_dpram->epx_data));
pico_info(" Allocated %d bytes (0x%p)\r\n", size, ep->hw_data_buf);
}
}
// Init, allocate buffer and enable endpoint
static void hw_endpoint_open(uint8_t ep_addr, uint16_t wMaxPacketSize, uint8_t transfer_type) {
struct hw_endpoint* ep = hw_endpoint_get_by_addr(ep_addr);
hw_endpoint_init(ep_addr, wMaxPacketSize, transfer_type);
const uint8_t num = tu_edpt_number(ep_addr);
if (num != 0) {
// EP0 is already enabled
hw_endpoint_alloc(ep, ep->wMaxPacketSize);
hw_endpoint_enable(ep);
}
}
const uint8_t epnum = tu_edpt_number(ep_addr);
const tusb_dir_t dir = tu_edpt_dir(ep_addr);
hw_endpoint_t *ep = hw_endpoint_get(epnum, dir);
static void hw_endpoint_xfer(uint8_t ep_addr, uint8_t* buffer, uint16_t total_bytes) {
struct hw_endpoint* ep = hw_endpoint_get_by_addr(ep_addr);
hw_endpoint_xfer_start(ep, buffer, total_bytes);
hw_endpoint_init(ep, ep_addr, wMaxPacketSize, transfer_type);
// Set endpoint control register to enable (EP0 has no endpoint control register)
io_rw_32 *ctrl_reg = hwep_ctrl_reg_device(ep);
if (ctrl_reg != NULL) {
const uint32_t ctrl_value =
EP_CTRL_ENABLE_BITS | ((uint32_t)transfer_type << EP_CTRL_BUFFER_TYPE_LSB) | hw_data_offset(ep->hw_data_buf);
*ctrl_reg = ctrl_value;
}
}
static void hw_endpoint_abort_xfer(struct hw_endpoint* ep) {
// Abort any pending transfer
// Due to Errata RP2040-E2: ABORT flag is only applicable for B2 and later (unusable for B0, B1).
// Which means we are not guaranteed to safely abort pending transfer on B0 and B1.
const uint8_t dir = tu_edpt_dir(ep->ep_addr);
const uint8_t dir = (uint8_t)tu_edpt_dir(ep->ep_addr);
const uint8_t epnum = tu_edpt_number(ep->ep_addr);
const uint32_t abort_mask = TU_BIT((epnum << 1) | (dir ? 0 : 1));
if (rp2040_chip_version() >= 2) {
usb_hw_set->abort = abort_mask;
while ((usb_hw->abort_done & abort_mask) != abort_mask) {}
@ -173,7 +155,7 @@ static void hw_endpoint_abort_xfer(struct hw_endpoint* ep) {
}
}
static void __tusb_irq_path_func(hw_handle_buff_status)(void) {
static void __tusb_irq_path_func(handle_hw_buff_status)(void) {
uint32_t remaining_buffers = usb_hw->buf_status;
pico_trace("buf_status = 0x%08lx\r\n", remaining_buffers);
uint bit = 1u;
@ -183,12 +165,13 @@ static void __tusb_irq_path_func(hw_handle_buff_status)(void) {
usb_hw_clear->buf_status = bit;
// IN transfer for even i, OUT transfer for odd i
struct hw_endpoint *ep = hw_endpoint_get(i >> 1u, (i & 1u) ? TUSB_DIR_OUT : TUSB_DIR_IN);
const uint8_t epnum = i >> 1u;
const tusb_dir_t dir = (i & 1u) ? TUSB_DIR_OUT : TUSB_DIR_IN;
hw_endpoint_t *ep = hw_endpoint_get(epnum, dir);
// Continue xfer
bool done = hw_endpoint_xfer_continue(ep);
const bool done = hw_endpoint_xfer_continue(ep);
if (done) {
// Notify
// Notify usbd
const uint16_t xferred_len = ep->xferred_len;
hw_endpoint_reset_transfer(ep);
dcd_event_xfer_complete(0, ep->ep_addr, xferred_len, XFER_RESULT_SUCCESS, true);
@ -222,11 +205,11 @@ static void __tusb_irq_path_func(reset_non_control_endpoints)(void) {
tu_memclr(hw_endpoints[1], sizeof(hw_endpoints) - 2 * sizeof(hw_endpoint_t));
// reclaim buffer space
next_buffer_ptr = &usb_dpram->epx_data[0];
hw_buffer_ptr = &usb_dpram->epx_data[0];
}
static void __tusb_irq_path_func(dcd_rp2040_irq)(void) {
uint32_t const status = usb_hw->ints;
const uint32_t status = usb_hw->ints;
uint32_t handled = 0;
if (status & USB_INTF_DEV_SOF_BITS) {
@ -259,7 +242,9 @@ static void __tusb_irq_path_func(dcd_rp2040_irq)(void) {
#endif
// disable SOF interrupt if it is used for RESUME in remote wakeup
if (!keep_sof_alive && !_sof_enable) usb_hw_clear->inte = USB_INTS_DEV_SOF_BITS;
if (!keep_sof_alive && !_sof_enable) {
usb_hw_clear->inte = USB_INTS_DEV_SOF_BITS;
}
dcd_event_sof(0, usb_hw->sof_rd & USB_SOF_RD_BITS, true);
}
@ -268,7 +253,7 @@ static void __tusb_irq_path_func(dcd_rp2040_irq)(void) {
// before closing the EP, the events will be delivered in same order.
if (status & USB_INTS_BUFF_STATUS_BITS) {
handled |= USB_INTS_BUFF_STATUS_BITS;
hw_handle_buff_status();
handle_hw_buff_status();
}
if (status & USB_INTS_SETUP_REQ_BITS) {
@ -345,13 +330,6 @@ static void __tusb_irq_path_func(dcd_rp2040_irq)(void) {
}
}
#define USB_INTS_ERROR_BITS ( \
USB_INTS_ERROR_DATA_SEQ_BITS | \
USB_INTS_ERROR_BIT_STUFF_BITS | \
USB_INTS_ERROR_CRC_BITS | \
USB_INTS_ERROR_RX_OVERFLOW_BITS | \
USB_INTS_ERROR_RX_TIMEOUT_BITS)
/*------------------------------------------------------------------*/
/* Controller API
*------------------------------------------------------------------*/
@ -424,12 +402,11 @@ void dcd_int_disable(__unused uint8_t rhport) {
irq_set_enabled(USBCTRL_IRQ, false);
}
void dcd_set_address(__unused uint8_t rhport, __unused uint8_t dev_addr) {
assert(rhport == 0);
void dcd_set_address(uint8_t rhport, uint8_t dev_addr) {
(void)dev_addr;
// Can't set device address in hardware until status xfer has complete
// Send 0len complete response on EP0 IN
hw_endpoint_xfer(0x80, NULL, 0);
dcd_edpt_xfer(rhport, 0x80, NULL, 0, false);
}
void dcd_remote_wakeup(__unused uint8_t rhport) {
@ -474,7 +451,6 @@ void dcd_sof_enable(uint8_t rhport, bool en) {
/*------------------------------------------------------------------*/
/* DCD Endpoint port
*------------------------------------------------------------------*/
void dcd_edpt0_status_complete(uint8_t rhport, tusb_control_request_t const* request) {
(void) rhport;
@ -496,25 +472,32 @@ bool dcd_edpt_open(uint8_t rhport, tusb_desc_endpoint_t const* desc_edpt) {
// New API: Allocate packet buffer used by ISO endpoints
// Some MCU need manual packet buffer allocation, we allocate the largest size to avoid clustering
bool dcd_edpt_iso_alloc(uint8_t rhport, uint8_t ep_addr, uint16_t largest_packet_size) {
(void) rhport;
struct hw_endpoint* ep = hw_endpoint_get_by_addr(ep_addr);
hw_endpoint_init(ep_addr, largest_packet_size, TUSB_XFER_ISOCHRONOUS);
hw_endpoint_alloc(ep, largest_packet_size);
(void)rhport;
struct hw_endpoint *ep = hw_endpoint_get_by_addr(ep_addr);
hw_endpoint_init(ep, ep_addr, largest_packet_size, TUSB_XFER_ISOCHRONOUS);
return true;
}
// New API: Configure and enable an ISO endpoint according to descriptor
bool dcd_edpt_iso_activate(uint8_t rhport, tusb_desc_endpoint_t const * ep_desc) {
(void) rhport;
struct hw_endpoint* ep = hw_endpoint_get_by_addr(ep_desc->bEndpointAddress);
bool dcd_edpt_iso_activate(uint8_t rhport, const tusb_desc_endpoint_t *ep_desc) {
(void)rhport;
const uint8_t epnum = tu_edpt_number(ep_desc->bEndpointAddress);
const tusb_dir_t dir = tu_edpt_dir(ep_desc->bEndpointAddress);
struct hw_endpoint *ep = hw_endpoint_get(epnum, dir);
TU_ASSERT(ep->hw_data_buf != NULL); // must be inited and allocated previously
if (ep->active) {
hw_endpoint_abort_xfer(ep); // abort any pending transfer
}
ep->wMaxPacketSize = ep_desc->wMaxPacketSize;
hw_endpoint_enable(ep);
// Set control register to enable endpoint
io_rw_32 *ctrl_reg = hwep_ctrl_reg_device(ep);
if (ctrl_reg != NULL) {
const uint32_t ctrl_value = EP_CTRL_ENABLE_BITS | ((uint32_t)TUSB_XFER_ISOCHRONOUS << EP_CTRL_BUFFER_TYPE_LSB) |
hw_data_offset(ep->hw_data_buf);
*ctrl_reg = ctrl_value;
}
return true;
}
@ -525,26 +508,26 @@ void dcd_edpt_close_all(uint8_t rhport) {
reset_non_control_endpoints();
}
bool dcd_edpt_xfer(__unused uint8_t rhport, uint8_t ep_addr, uint8_t* buffer, uint16_t total_bytes, bool is_isr) {
(void) is_isr;
assert(rhport == 0);
hw_endpoint_xfer(ep_addr, buffer, total_bytes);
bool dcd_edpt_xfer(uint8_t rhport, uint8_t ep_addr, uint8_t *buffer, uint16_t total_bytes, bool is_isr) {
(void)rhport;
(void)is_isr;
hw_endpoint_t *ep = hw_endpoint_get_by_addr(ep_addr);
hw_endpoint_xfer_start(ep, buffer, total_bytes);
return true;
}
void dcd_edpt_stall(uint8_t rhport, uint8_t ep_addr) {
(void) rhport;
(void)rhport;
const uint8_t epnum = tu_edpt_number(ep_addr);
const tusb_dir_t dir = tu_edpt_dir(ep_addr);
hw_endpoint_t *ep = hw_endpoint_get(epnum, dir);
if (tu_edpt_number(ep_addr) == 0) {
if (epnum == 0) {
// A stall on EP0 has to be armed so it can be cleared on the next setup packet
usb_hw_set->ep_stall_arm = (tu_edpt_dir(ep_addr) == TUSB_DIR_IN) ? USB_EP_STALL_ARM_EP0_IN_BITS
: USB_EP_STALL_ARM_EP0_OUT_BITS;
usb_hw_set->ep_stall_arm = (dir == TUSB_DIR_IN) ? USB_EP_STALL_ARM_EP0_IN_BITS : USB_EP_STALL_ARM_EP0_OUT_BITS;
}
struct hw_endpoint* ep = hw_endpoint_get_by_addr(ep_addr);
// stall and clear current pending buffer
// may need to use EP_ABORT
// stall and clear current pending buffer, may need to use EP_ABORT
hwep_buf_ctrl_set(ep, USB_BUF_CTRL_STALL);
}

View File

@ -122,7 +122,7 @@ static void __tusb_irq_path_func(hw_handle_buff_status)(void)
remaining_buffers &= ~bit;
struct hw_endpoint * ep = &epx;
uint32_t ep_ctrl = *hwep_ctrl_reg(ep);
uint32_t ep_ctrl = *hwep_ctrl_reg_host(ep);
if ( ep_ctrl & EP_CTRL_DOUBLE_BUFFERED_BITS )
{
TU_LOG(3, "Double Buffered: ");
@ -316,9 +316,6 @@ static void _hw_endpoint_init(struct hw_endpoint *ep, uint8_t dev_addr, uint8_t
ep->ep_addr = ep_addr;
ep->dev_addr = dev_addr;
// For host, IN to host == RX, anything else rx == false
ep->rx = (dir == TUSB_DIR_IN);
// Response to a setup packet on EP0 starts with pid of 1
ep->next_pid = (num == 0 ? 1u : 0u);
ep->wMaxPacketSize = wMaxPacketSize;
@ -339,7 +336,7 @@ static void _hw_endpoint_init(struct hw_endpoint *ep, uint8_t dev_addr, uint8_t
{
ep_reg |= (uint32_t) ((bmInterval - 1) << EP_CTRL_HOST_INTERRUPT_INTERVAL_LSB);
}
*hwep_ctrl_reg(ep) = ep_reg;
*hwep_ctrl_reg_host(ep) = ep_reg;
// pico_trace("endpoint control (0x%p) <- 0x%lx\n", ep->endpoint_control, ep_reg);
ep->configured = true;
@ -465,8 +462,8 @@ void hcd_device_close(uint8_t rhport, uint8_t dev_addr) {
// reset epx if it is currently active with unplugged device
if (epx.configured && epx.active && epx.dev_addr == dev_addr) {
epx.configured = false;
*hwep_ctrl_reg(&epx) = 0;
*hwep_buf_ctrl_reg(&epx) = 0;
*hwep_ctrl_reg_host(&epx) = 0;
*hwep_buf_ctrl_reg_host(&epx) = 0;
hw_endpoint_reset_transfer(&epx);
}
@ -481,8 +478,8 @@ void hcd_device_close(uint8_t rhport, uint8_t dev_addr) {
// unconfigure the endpoint
ep->configured = false;
*hwep_ctrl_reg(ep) = 0;
*hwep_buf_ctrl_reg(ep) = 0;
*hwep_ctrl_reg_host(ep) = 0;
*hwep_buf_ctrl_reg_host(ep) = 0;
hw_endpoint_reset_transfer(ep);
}
}

View File

@ -35,20 +35,15 @@
//--------------------------------------------------------------------+
// MACRO CONSTANT TYPEDEF PROTOTYPE
//--------------------------------------------------------------------+
static void _hw_endpoint_xfer_sync(struct hw_endpoint* ep);
static void hwep_xfer_sync(hw_endpoint_t *ep);
#if TUD_OPT_RP2040_USB_DEVICE_UFRAME_FIX
static bool e15_is_bulkin_ep(struct hw_endpoint* ep);
static bool e15_is_critical_frame_period(struct hw_endpoint* ep);
#else
#define e15_is_bulkin_ep(x) (false)
#define e15_is_critical_frame_period(x) (false)
#endif
// if usb hardware is in host mode
TU_ATTR_ALWAYS_INLINE static inline bool is_host_mode(void) {
return (usb_hw->main_ctrl & USB_MAIN_CTRL_HOST_NDEVICE_BITS) ? true : false;
}
#if TUD_OPT_RP2040_USB_DEVICE_UFRAME_FIX
static bool e15_is_bulkin_ep(struct hw_endpoint *ep);
static bool e15_is_critical_frame_period(struct hw_endpoint *ep);
#else
#define e15_is_bulkin_ep(x) (false)
#define e15_is_critical_frame_period(x) (false)
#endif
//--------------------------------------------------------------------+
// Implementation
@ -94,8 +89,18 @@ void __tusb_irq_path_func(hw_endpoint_reset_transfer)(struct hw_endpoint* ep) {
}
void __tusb_irq_path_func(hwep_buf_ctrl_update)(struct hw_endpoint *ep, uint32_t and_mask, uint32_t or_mask) {
const bool is_host = rp2usb_is_host_mode();
uint32_t value = 0;
io_rw_32 *buf_ctrl = hwep_buf_ctrl_reg(ep);
io_rw_32 *buf_ctrl;
#if CFG_TUH_ENABLED
if (is_host) {
buf_ctrl = hwep_buf_ctrl_reg_host(ep);
} else
#endif
{
buf_ctrl = hwep_buf_ctrl_reg_device(ep);
}
if (and_mask) {
value = *buf_ctrl & and_mask;
@ -110,7 +115,7 @@ void __tusb_irq_path_func(hwep_buf_ctrl_update)(struct hw_endpoint *ep, uint32_t
*buf_ctrl = value & ~USB_BUF_CTRL_AVAIL;
// 4.1.2.5.1 Con-current access: 12 cycles (should be good for 48*12Mhz = 576Mhz) after write to buffer control
// Don't need delay in host mode as host is in charge
if (!is_host_mode()) {
if (!is_host) {
busy_wait_at_least_cycles(12);
}
}
@ -119,9 +124,9 @@ void __tusb_irq_path_func(hwep_buf_ctrl_update)(struct hw_endpoint *ep, uint32_t
*buf_ctrl = value;
}
// prepare buffer, return buffer control
static uint32_t __tusb_irq_path_func(prepare_ep_buffer)(struct hw_endpoint* ep, uint8_t buf_id) {
uint16_t const buflen = tu_min16(ep->remaining_len, ep->wMaxPacketSize);
// prepare buffer, move data if tx, return buffer control
static uint32_t __tusb_irq_path_func(prepare_ep_buffer)(struct hw_endpoint *ep, uint8_t buf_id, bool is_rx) {
const uint16_t buflen = tu_min16(ep->remaining_len, ep->wMaxPacketSize);
ep->remaining_len = (uint16_t) (ep->remaining_len - buflen);
uint32_t buf_ctrl = buflen | USB_BUF_CTRL_AVAIL;
@ -130,7 +135,7 @@ static uint32_t __tusb_irq_path_func(prepare_ep_buffer)(struct hw_endpoint* ep,
buf_ctrl |= ep->next_pid ? USB_BUF_CTRL_DATA1_PID : USB_BUF_CTRL_DATA0_PID;
ep->next_pid ^= 1u;
if (!ep->rx) {
if (!is_rx) {
// Copy data from user buffer to hw buffer
unaligned_memcpy(ep->hw_data_buf + buf_id * 64, ep->user_buf, buflen);
ep->user_buf += buflen;
@ -146,44 +151,66 @@ static uint32_t __tusb_irq_path_func(prepare_ep_buffer)(struct hw_endpoint* ep,
buf_ctrl |= USB_BUF_CTRL_LAST;
}
if (buf_id) buf_ctrl = buf_ctrl << 16;
if (buf_id) {
buf_ctrl = buf_ctrl << 16;
}
return buf_ctrl;
}
// Prepare buffer control register value
void __tusb_irq_path_func(hw_endpoint_start_next_buffer)(struct hw_endpoint* ep) {
io_rw_32 *ep_ctrl_reg = hwep_ctrl_reg(ep);
uint32_t ep_ctrl = *ep_ctrl_reg;
const tusb_dir_t dir = tu_edpt_dir(ep->ep_addr);
bool is_rx;
// always compute and start with buffer 0
uint32_t buf_ctrl = prepare_ep_buffer(ep, 0) | USB_BUF_CTRL_SEL;
io_rw_32 *ep_ctrl_reg;
// io_rw_32 *buf_ctrl_reg;
// For now: skip double buffered for OUT endpoint in Device mode, since
// host could send < 64 bytes and cause short packet on buffer0
// NOTE: this could happen to Host mode IN endpoint
// Also, Host mode "interrupt" endpoint hardware is only single buffered,
// NOTE2: Currently Host bulk is implemented using "interrupt" endpoint
bool const is_host = is_host_mode();
bool const force_single = (!is_host && !tu_edpt_dir(ep->ep_addr)) ||
(is_host && tu_edpt_number(ep->ep_addr) != 0);
if (ep->remaining_len && !force_single) {
// Use buffer 1 (double buffered) if there is still data
// TODO: Isochronous for buffer1 bit-field is different than CBI (control bulk, interrupt)
buf_ctrl |= prepare_ep_buffer(ep, 1);
// Set endpoint control double buffered bit if needed
ep_ctrl &= ~EP_CTRL_INTERRUPT_PER_BUFFER;
ep_ctrl |= EP_CTRL_DOUBLE_BUFFERED_BITS | EP_CTRL_INTERRUPT_PER_DOUBLE_BUFFER;
} else {
// Single buffered since 1 is enough
ep_ctrl &= ~(EP_CTRL_DOUBLE_BUFFERED_BITS | EP_CTRL_INTERRUPT_PER_DOUBLE_BUFFER);
ep_ctrl |= EP_CTRL_INTERRUPT_PER_BUFFER;
#if CFG_TUH_ENABLED
const bool is_host = rp2usb_is_host_mode();
if (is_host) {
// buf_ctrl_reg = hwep_buf_ctrl_reg_host(ep);
ep_ctrl_reg = hwep_ctrl_reg_host(ep);
is_rx = (dir == TUSB_DIR_IN);
} else
#endif
{
// buf_ctrl_reg = hwep_buf_ctrl_reg_device(ep);
ep_ctrl_reg = hwep_ctrl_reg_device(ep);
is_rx = (dir == TUSB_DIR_OUT);
}
*ep_ctrl_reg = ep_ctrl;
// always compute and start with buffer 0
uint32_t buf_ctrl = prepare_ep_buffer(ep, 0, is_rx) | USB_BUF_CTRL_SEL;
// EP0 has no endpoint control register, also usbd only schedule 1 packet at a time (single buffer)
if (ep_ctrl_reg != NULL) {
uint32_t ep_ctrl = *ep_ctrl_reg;
// For now: skip double buffered for RX e.g OUT endpoint in Device mode, since host could send < 64 bytes and cause
// short packet on buffer0
// NOTE: this could happen to Host mode IN endpoint Also, Host mode "interrupt" endpoint hardware is only single
// buffered,
// NOTE2: Currently Host bulk is implemented using "interrupt" endpoint
const bool force_single = is_rx;
if (ep->remaining_len && !force_single) {
// Use buffer 1 (double buffered) if there is still data
// TODO: Isochronous for buffer1 bit-field is different than CBI (control bulk, interrupt)
buf_ctrl |= prepare_ep_buffer(ep, 1, is_rx);
// Set endpoint control double buffered bit if needed
ep_ctrl &= ~EP_CTRL_INTERRUPT_PER_BUFFER;
ep_ctrl |= EP_CTRL_DOUBLE_BUFFERED_BITS | EP_CTRL_INTERRUPT_PER_DOUBLE_BUFFER;
} else {
// Single buffered since 1 is enough
ep_ctrl &= ~(EP_CTRL_DOUBLE_BUFFERED_BITS | EP_CTRL_INTERRUPT_PER_DOUBLE_BUFFER);
ep_ctrl |= EP_CTRL_INTERRUPT_PER_BUFFER;
}
*ep_ctrl_reg = ep_ctrl;
}
TU_LOG(3, " Prepare BufCtrl: [0] = 0x%04x [1] = 0x%04x\r\n", tu_u32_low16(buf_ctrl), tu_u32_high16(buf_ctrl));
@ -221,13 +248,16 @@ void hw_endpoint_xfer_start(struct hw_endpoint* ep, uint8_t* buffer, uint16_t to
}
// sync endpoint buffer and return transferred bytes
static uint16_t __tusb_irq_path_func(sync_ep_buffer)(struct hw_endpoint* ep, uint8_t buf_id) {
uint32_t buf_ctrl = hwep_buf_ctrl_get(ep);
if (buf_id) buf_ctrl = buf_ctrl >> 16;
static uint16_t __tusb_irq_path_func(sync_ep_buffer)(hw_endpoint_t *ep, io_rw_32 *buf_ctrl_reg, uint8_t buf_id,
bool is_rx) {
uint32_t buf_ctrl = *buf_ctrl_reg;
if (buf_id) {
buf_ctrl = buf_ctrl >> 16;
}
uint16_t xferred_bytes = buf_ctrl & USB_BUF_CTRL_LEN_MASK;
const uint16_t xferred_bytes = buf_ctrl & USB_BUF_CTRL_LEN_MASK;
if (!ep->rx) {
if (!is_rx) {
// We are continuing a transfer here. If we are TX, we have successfully
// sent some data can increase the length we have sent
assert(!(buf_ctrl & USB_BUF_CTRL_FULL));
@ -245,7 +275,6 @@ static uint16_t __tusb_irq_path_func(sync_ep_buffer)(struct hw_endpoint* ep, uin
// Short packet
if (xferred_bytes < ep->wMaxPacketSize) {
pico_trace(" Short packet on buffer %d with %u bytes\r\n", buf_id, xferred_bytes);
// Reduce total length as this is last packet
ep->remaining_len = 0;
}
@ -253,21 +282,38 @@ static uint16_t __tusb_irq_path_func(sync_ep_buffer)(struct hw_endpoint* ep, uin
return xferred_bytes;
}
static void __tusb_irq_path_func(_hw_endpoint_xfer_sync)(struct hw_endpoint* ep) {
// Update hw endpoint struct with info from hardware
// after a buff status interrupt
// Update hw endpoint struct with info from hardware after a buff status interrupt
static void __tusb_irq_path_func(hwep_xfer_sync)(hw_endpoint_t *ep) {
// const uint8_t ep_num = tu_edpt_number(ep->ep_addr);
const tusb_dir_t dir = tu_edpt_dir(ep->ep_addr);
uint32_t __unused buf_ctrl = hwep_buf_ctrl_get(ep);
TU_LOG(3, " Sync BufCtrl: [0] = 0x%04x [1] = 0x%04x\r\n", tu_u32_low16(buf_ctrl), tu_u32_high16(buf_ctrl));
io_rw_32 *buf_ctrl_reg;
io_rw_32 *ep_ctrl_reg;
bool is_rx;
// always sync buffer 0
uint16_t buf0_bytes = sync_ep_buffer(ep, 0);
#if CFG_TUH_ENABLED
const bool is_host = rp2usb_is_host_mode();
if (is_host) {
buf_ctrl_reg = hwep_buf_ctrl_reg_host(ep);
ep_ctrl_reg = hwep_ctrl_reg_host(ep);
is_rx = (dir == TUSB_DIR_IN);
} else
#endif
{
buf_ctrl_reg = hwep_buf_ctrl_reg_device(ep);
ep_ctrl_reg = hwep_ctrl_reg_device(ep);
is_rx = (dir == TUSB_DIR_OUT);
}
TU_LOG(3, " Sync BufCtrl: [0] = 0x%04x [1] = 0x%04x\r\n", tu_u32_low16(*buf_ctrl_reg),
tu_u32_high16(*buf_ctrl_reg));
uint16_t buf0_bytes = sync_ep_buffer(ep, buf_ctrl_reg, 0, is_rx); // always sync buffer 0
// sync buffer 1 if double buffered
if ((*hwep_ctrl_reg(ep)) & EP_CTRL_DOUBLE_BUFFERED_BITS) {
if (ep_ctrl_reg != NULL && (*ep_ctrl_reg) & EP_CTRL_DOUBLE_BUFFERED_BITS) {
if (buf0_bytes == ep->wMaxPacketSize) {
// sync buffer 1 if not short packet
sync_ep_buffer(ep, 1);
sync_ep_buffer(ep, buf_ctrl_reg, 1, is_rx);
} else {
// short packet on buffer 0
// TODO couldn't figure out how to handle this case which happen with net_lwip_webserver example
@ -309,7 +355,7 @@ bool __tusb_irq_path_func(hw_endpoint_xfer_continue)(struct hw_endpoint* ep) {
}
// Update EP struct from hardware state
_hw_endpoint_xfer_sync(ep);
hwep_xfer_sync(ep);
// Now we have synced our state with the hardware. Is there more data to transfer?
// If we are done then notify tinyusb
@ -336,6 +382,7 @@ bool __tusb_irq_path_func(hw_endpoint_xfer_continue)(struct hw_endpoint* ep) {
//--------------------------------------------------------------------+
#if TUD_OPT_RP2040_USB_DEVICE_UFRAME_FIX
// E15 is fixed with RP2350
/* Don't mark IN buffers as available during the last 200us of a full-speed
frame. This avoids a situation seen with the USB2.0 hub on a Raspberry
@ -356,9 +403,8 @@ bool __tusb_irq_path_func(hw_endpoint_xfer_continue)(struct hw_endpoint* ep) {
volatile uint32_t e15_last_sof = 0;
// check if Errata 15 is needed for this endpoint i.e device bulk-in
static bool __tusb_irq_path_func(e15_is_bulkin_ep)(struct hw_endpoint* ep) {
return (!is_host_mode() && tu_edpt_dir(ep->ep_addr) == TUSB_DIR_IN &&
ep->transfer_type == TUSB_XFER_BULK);
static bool __tusb_irq_path_func(e15_is_bulkin_ep)(struct hw_endpoint *ep) {
return (!rp2usb_is_host_mode() && tu_edpt_dir(ep->ep_addr) == TUSB_DIR_IN && ep->transfer_type == TUSB_XFER_BULK);
}
// check if we need to apply Errata 15 workaround : i.e

View File

@ -48,10 +48,6 @@ typedef struct hw_endpoint
// Is this a valid struct
bool configured;
// Transfer direction (i.e. IN is rx for host but tx for device)
// allows us to common up transfer functions
bool rx;
uint8_t ep_addr;
uint8_t next_pid;
@ -115,61 +111,46 @@ TU_ATTR_ALWAYS_INLINE static inline void hw_endpoint_lock_update(__unused struct
// sense to have worker and IRQ on same core, however I think using critsec is about equivalent.
}
TU_ATTR_ALWAYS_INLINE static inline io_rw_32 *hwep_ctrl_reg(struct hw_endpoint *ep) {
(void)ep;
#if CFG_TUH_ENABLED
if (rp2usb_is_host_mode()) {
if (ep->transfer_type == TUSB_XFER_CONTROL) {
return &usbh_dpram->epx_ctrl;
}
return &usbh_dpram->int_ep_ctrl[ep->interrupt_num].ctrl;
// #if CFG_TUD_ENABLED
TU_ATTR_ALWAYS_INLINE static inline io_rw_32 *hwep_ctrl_reg_device(struct hw_endpoint *ep) {
uint8_t const epnum = tu_edpt_number(ep->ep_addr);
const uint8_t dir = (uint8_t)tu_edpt_dir(ep->ep_addr);
if (epnum == 0) {
// EP0 has no endpoint control register because the buffer offsets are fixed and always enabled
return NULL;
}
#endif
#if CFG_TUD_ENABLED
if (!rp2usb_is_host_mode()) {
const uint8_t num = tu_edpt_number(ep->ep_addr);
if (num == 0) {
return NULL;
}
return (tu_edpt_dir(ep->ep_addr) == TUSB_DIR_IN) ? &usb_dpram->ep_ctrl[num - 1].in
: &usb_dpram->ep_ctrl[num - 1].out;
}
#endif
return NULL;
return (dir == TUSB_DIR_IN) ? &usb_dpram->ep_ctrl[epnum - 1].in : &usb_dpram->ep_ctrl[epnum - 1].out;
}
TU_ATTR_ALWAYS_INLINE static inline io_rw_32 *hwep_buf_ctrl_reg(struct hw_endpoint *ep) {
(void)ep;
TU_ATTR_ALWAYS_INLINE static inline io_rw_32 *hwep_buf_ctrl_reg_device(struct hw_endpoint *ep) {
const uint8_t epnum = tu_edpt_number(ep->ep_addr);
const uint8_t dir = (uint8_t)tu_edpt_dir(ep->ep_addr);
return (dir == TUSB_DIR_IN) ? &usb_dpram->ep_buf_ctrl[epnum].in : &usb_dpram->ep_buf_ctrl[epnum].out;
}
// #endif
#if CFG_TUH_ENABLED
if (rp2usb_is_host_mode()) {
if (ep->transfer_type == TUSB_XFER_CONTROL) {
return &usbh_dpram->epx_buf_ctrl;
}
return &usbh_dpram->int_ep_buffer_ctrl[ep->interrupt_num].ctrl;
TU_ATTR_ALWAYS_INLINE static inline io_rw_32 *hwep_ctrl_reg_host(struct hw_endpoint *ep) {
if (ep->transfer_type == TUSB_XFER_CONTROL) {
return &usbh_dpram->epx_ctrl;
}
return &usbh_dpram->int_ep_ctrl[ep->interrupt_num].ctrl;
}
TU_ATTR_ALWAYS_INLINE static inline io_rw_32 *hwep_buf_ctrl_reg_host(struct hw_endpoint *ep) {
if (ep->transfer_type == TUSB_XFER_CONTROL) {
return &usbh_dpram->epx_buf_ctrl;
}
return &usbh_dpram->int_ep_buffer_ctrl[ep->interrupt_num].ctrl;
}
#endif
#if CFG_TUD_ENABLED
if (!rp2usb_is_host_mode()) {
const uint8_t num = tu_edpt_number(ep->ep_addr);
return (tu_edpt_dir(ep->ep_addr) == TUSB_DIR_IN) ? &usb_dpram->ep_buf_ctrl[num].in
: &usb_dpram->ep_buf_ctrl[num].out;
}
#endif
return NULL;
}
//--------------------------------------------------------------------+
//
//--------------------------------------------------------------------+
void hwep_buf_ctrl_update(struct hw_endpoint *ep, uint32_t and_mask, uint32_t or_mask);
TU_ATTR_ALWAYS_INLINE static inline uint32_t hwep_buf_ctrl_get(struct hw_endpoint *ep) {
return *hwep_buf_ctrl_reg(ep);
}
TU_ATTR_ALWAYS_INLINE static inline void hwep_buf_ctrl_set(struct hw_endpoint *ep, uint32_t value) {
hwep_buf_ctrl_update(ep, 0, value);
}