mirror of
https://github.com/hathach/tinyusb.git
synced 2026-02-04 13:45:32 +00:00
change signature for hwep_buf_ctrl_* to take pointer to buf control instead of hwep
This commit is contained in:
@ -87,13 +87,6 @@ static void hw_endpoint_init(hw_endpoint_t *ep, uint8_t ep_addr, uint16_t wMaxPa
|
||||
// Buffer offset is fixed (also double buffered)
|
||||
ep->hw_data_buf = (uint8_t*) &usb_dpram->ep0_buf_a[0];
|
||||
} else {
|
||||
// Set the endpoint control register (starts at EP1, hence num-1)
|
||||
// if (dir == TUSB_DIR_IN) {
|
||||
// ep->endpoint_control = &usb_dpram->ep_ctrl[num - 1].in;
|
||||
// } else {
|
||||
// ep->endpoint_control = &usb_dpram->ep_ctrl[num - 1].out;
|
||||
// }
|
||||
|
||||
// round up size to multiple of 64
|
||||
uint16_t size = (uint16_t)tu_round_up(wMaxPacketSize, 64);
|
||||
|
||||
@ -146,7 +139,8 @@ static void hw_endpoint_abort_xfer(struct hw_endpoint* ep) {
|
||||
buf_ctrl |= USB_BUF_CTRL_DATA1_PID;
|
||||
}
|
||||
|
||||
hwep_buf_ctrl_set(ep, buf_ctrl);
|
||||
io_rw_32 *buf_ctrl_reg = hwep_buf_ctrl_reg_device(ep);
|
||||
hwep_buf_ctrl_set(buf_ctrl_reg, buf_ctrl);
|
||||
hw_endpoint_reset_transfer(ep);
|
||||
|
||||
if (rp2040_chip_version() >= 2) {
|
||||
@ -528,7 +522,8 @@ void dcd_edpt_stall(uint8_t rhport, uint8_t ep_addr) {
|
||||
}
|
||||
|
||||
// stall and clear current pending buffer, may need to use EP_ABORT
|
||||
hwep_buf_ctrl_set(ep, USB_BUF_CTRL_STALL);
|
||||
io_rw_32 *buf_ctrl_reg = hwep_buf_ctrl_reg_device(ep);
|
||||
hwep_buf_ctrl_set(buf_ctrl_reg, USB_BUF_CTRL_STALL);
|
||||
}
|
||||
|
||||
void dcd_edpt_clear_stall(uint8_t rhport, uint8_t ep_addr) {
|
||||
@ -539,7 +534,8 @@ void dcd_edpt_clear_stall(uint8_t rhport, uint8_t ep_addr) {
|
||||
|
||||
// clear stall also reset toggle to DATA0, ready for next transfer
|
||||
ep->next_pid = 0;
|
||||
hwep_buf_ctrl_clear_mask(ep, USB_BUF_CTRL_STALL);
|
||||
io_rw_32 *buf_ctrl_reg = hwep_buf_ctrl_reg_device(ep);
|
||||
hwep_buf_ctrl_clear_mask(buf_ctrl_reg, USB_BUF_CTRL_STALL);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -284,8 +284,6 @@ static struct hw_endpoint *_hw_endpoint_allocate(uint8_t transfer_type)
|
||||
ep = _next_free_interrupt_ep();
|
||||
pico_info("Allocate %s ep %d\n", tu_edpt_type_str(transfer_type), ep->interrupt_num);
|
||||
assert(ep);
|
||||
// ep->buffer_control = &usbh_dpram->int_ep_buffer_ctrl[ep->interrupt_num].ctrl;
|
||||
// ep->endpoint_control = &usbh_dpram->int_ep_ctrl[ep->interrupt_num].ctrl;
|
||||
// 0 for epx (double buffered): TODO increase to 1024 for ISO
|
||||
// 2x64 for intep0
|
||||
// 3x64 for intep1
|
||||
@ -295,8 +293,6 @@ static struct hw_endpoint *_hw_endpoint_allocate(uint8_t transfer_type)
|
||||
else
|
||||
{
|
||||
ep = &epx;
|
||||
// ep->buffer_control = &usbh_dpram->epx_buf_ctrl;
|
||||
// ep->endpoint_control = &usbh_dpram->epx_ctrl;
|
||||
ep->hw_data_buf = &usbh_dpram->epx_data[0];
|
||||
}
|
||||
|
||||
@ -306,8 +302,6 @@ static struct hw_endpoint *_hw_endpoint_allocate(uint8_t transfer_type)
|
||||
static void _hw_endpoint_init(struct hw_endpoint *ep, uint8_t dev_addr, uint8_t ep_addr, uint16_t wMaxPacketSize, uint8_t transfer_type, uint8_t bmInterval)
|
||||
{
|
||||
// Already has data buffer, endpoint control, and buffer control allocated at this point
|
||||
// assert(ep->endpoint_control);
|
||||
// assert(ep->buffer_control);
|
||||
assert(ep->hw_data_buf);
|
||||
|
||||
uint8_t const num = tu_edpt_number(ep_addr);
|
||||
|
||||
@ -88,32 +88,25 @@ void __tusb_irq_path_func(hw_endpoint_reset_transfer)(struct hw_endpoint* ep) {
|
||||
ep->user_buf = 0;
|
||||
}
|
||||
|
||||
void __tusb_irq_path_func(hwep_buf_ctrl_update)(struct hw_endpoint *ep, uint32_t and_mask, uint32_t or_mask) {
|
||||
void __tusb_irq_path_func(hwep_buf_ctrl_update)(io_rw_32 *buf_ctrl_reg, uint32_t and_mask, uint32_t or_mask) {
|
||||
const bool is_host = rp2usb_is_host_mode();
|
||||
uint32_t value = 0;
|
||||
io_rw_32 *buf_ctrl;
|
||||
|
||||
#if CFG_TUH_ENABLED
|
||||
if (is_host) {
|
||||
buf_ctrl = hwep_buf_ctrl_reg_host(ep);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
buf_ctrl = hwep_buf_ctrl_reg_device(ep);
|
||||
}
|
||||
uint32_t buf_ctrl = *buf_ctrl_reg;
|
||||
|
||||
if (and_mask) {
|
||||
value = *buf_ctrl & and_mask;
|
||||
value = buf_ctrl & and_mask;
|
||||
}
|
||||
|
||||
if (or_mask) {
|
||||
value |= or_mask;
|
||||
if (or_mask & USB_BUF_CTRL_AVAIL) {
|
||||
if (*buf_ctrl & USB_BUF_CTRL_AVAIL) {
|
||||
panic("ep %02X was already available", ep->ep_addr);
|
||||
if (buf_ctrl & USB_BUF_CTRL_AVAIL) {
|
||||
panic("buf_ctrl @%lX already available", (uintptr_t)buf_ctrl_reg);
|
||||
}
|
||||
*buf_ctrl = value & ~USB_BUF_CTRL_AVAIL;
|
||||
// 4.1.2.5.1 Con-current access: 12 cycles (should be good for 48*12Mhz = 576Mhz) after write to buffer control
|
||||
*buf_ctrl_reg = value & ~USB_BUF_CTRL_AVAIL;
|
||||
|
||||
// Section 4.1.2.7.1 (rp2040) / 12.7.3.7.1 (rp2350) Concurrent access: after write to buffer control, we need to
|
||||
// wait at least 1/48 mhz (usb clock), 12 cycles should be good for 48*12Mhz = 576Mhz.
|
||||
// Don't need delay in host mode as host is in charge
|
||||
if (!is_host) {
|
||||
busy_wait_at_least_cycles(12);
|
||||
@ -121,7 +114,7 @@ void __tusb_irq_path_func(hwep_buf_ctrl_update)(struct hw_endpoint *ep, uint32_t
|
||||
}
|
||||
}
|
||||
|
||||
*buf_ctrl = value;
|
||||
*buf_ctrl_reg = value;
|
||||
}
|
||||
|
||||
// prepare buffer, move data if tx, return buffer control
|
||||
@ -161,21 +154,21 @@ static uint32_t __tusb_irq_path_func(prepare_ep_buffer)(struct hw_endpoint *ep,
|
||||
// Prepare buffer control register value
|
||||
void __tusb_irq_path_func(hw_endpoint_start_next_buffer)(struct hw_endpoint* ep) {
|
||||
const tusb_dir_t dir = tu_edpt_dir(ep->ep_addr);
|
||||
bool is_rx;
|
||||
|
||||
bool is_rx;
|
||||
io_rw_32 *ep_ctrl_reg;
|
||||
// io_rw_32 *buf_ctrl_reg;
|
||||
io_rw_32 *buf_ctrl_reg;
|
||||
|
||||
#if CFG_TUH_ENABLED
|
||||
const bool is_host = rp2usb_is_host_mode();
|
||||
if (is_host) {
|
||||
// buf_ctrl_reg = hwep_buf_ctrl_reg_host(ep);
|
||||
buf_ctrl_reg = hwep_buf_ctrl_reg_host(ep);
|
||||
ep_ctrl_reg = hwep_ctrl_reg_host(ep);
|
||||
is_rx = (dir == TUSB_DIR_IN);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
// buf_ctrl_reg = hwep_buf_ctrl_reg_device(ep);
|
||||
buf_ctrl_reg = hwep_buf_ctrl_reg_device(ep);
|
||||
ep_ctrl_reg = hwep_ctrl_reg_device(ep);
|
||||
is_rx = (dir == TUSB_DIR_OUT);
|
||||
}
|
||||
@ -216,7 +209,7 @@ void __tusb_irq_path_func(hw_endpoint_start_next_buffer)(struct hw_endpoint* ep)
|
||||
|
||||
// Finally, write to buffer_control which will trigger the transfer
|
||||
// the next time the controller polls this dpram address
|
||||
hwep_buf_ctrl_set(ep, buf_ctrl);
|
||||
hwep_buf_ctrl_set(buf_ctrl_reg, buf_ctrl);
|
||||
}
|
||||
|
||||
void hw_endpoint_xfer_start(struct hw_endpoint* ep, uint8_t* buffer, uint16_t total_len) {
|
||||
@ -334,7 +327,8 @@ static void __tusb_irq_path_func(hwep_xfer_sync)(hw_endpoint_t *ep) {
|
||||
ep_ctrl &= ~(EP_CTRL_DOUBLE_BUFFERED_BITS | EP_CTRL_INTERRUPT_PER_DOUBLE_BUFFER);
|
||||
ep_ctrl |= EP_CTRL_INTERRUPT_PER_BUFFER;
|
||||
|
||||
hwep_buf_ctrl_set(ep, 0);
|
||||
io_rw_32 *buf_ctrl_reg = is_host ? hwep_buf_ctrl_reg_host(ep) : hwep_buf_ctrl_reg_device(ep);
|
||||
hwep_buf_ctrl_set(buf_ctrl_reg, 0);
|
||||
|
||||
usb_hw->abort &= ~TU_BIT(ep_id);
|
||||
|
||||
|
||||
@ -45,41 +45,27 @@
|
||||
// Hardware information per endpoint
|
||||
typedef struct hw_endpoint
|
||||
{
|
||||
// Is this a valid struct
|
||||
bool configured;
|
||||
|
||||
uint8_t ep_addr;
|
||||
uint8_t next_pid;
|
||||
// Interrupt, bulk, etc
|
||||
uint8_t transfer_type;
|
||||
|
||||
// Endpoint control register
|
||||
// io_rw_32 *endpoint_control;
|
||||
bool active; // Endpoint is in use
|
||||
uint8_t pending; // Transfer scheduled but not active
|
||||
|
||||
// Buffer control register
|
||||
// io_rw_32 *buffer_control;
|
||||
uint16_t wMaxPacketSize;
|
||||
|
||||
// Buffer pointer in usb dpram
|
||||
uint8_t *hw_data_buf;
|
||||
|
||||
// User buffer in main memory
|
||||
uint8_t *user_buf;
|
||||
uint8_t *hw_data_buf; // Buffer pointer in usb dpram
|
||||
uint8_t *user_buf; // User buffer in main memory
|
||||
|
||||
// Current transfer information
|
||||
uint16_t remaining_len;
|
||||
uint16_t xferred_len;
|
||||
|
||||
// Data needed from EP descriptor
|
||||
uint16_t wMaxPacketSize;
|
||||
|
||||
// Endpoint is in use
|
||||
bool active;
|
||||
|
||||
// Interrupt, bulk, etc
|
||||
uint8_t transfer_type;
|
||||
|
||||
// Transfer scheduled but not active
|
||||
uint8_t pending;
|
||||
|
||||
#if CFG_TUH_ENABLED
|
||||
// Is this a valid struct
|
||||
bool configured;
|
||||
|
||||
// Only needed for host
|
||||
uint8_t dev_addr;
|
||||
|
||||
@ -131,7 +117,7 @@ TU_ATTR_ALWAYS_INLINE static inline io_rw_32 *hwep_buf_ctrl_reg_device(struct hw
|
||||
|
||||
#if CFG_TUH_ENABLED
|
||||
TU_ATTR_ALWAYS_INLINE static inline io_rw_32 *hwep_ctrl_reg_host(struct hw_endpoint *ep) {
|
||||
if (ep->transfer_type == TUSB_XFER_CONTROL) {
|
||||
if (tu_edpt_number(ep->ep_addr) == 0) {
|
||||
return &usbh_dpram->epx_ctrl;
|
||||
}
|
||||
return &usbh_dpram->int_ep_ctrl[ep->interrupt_num].ctrl;
|
||||
@ -149,18 +135,18 @@ TU_ATTR_ALWAYS_INLINE static inline io_rw_32 *hwep_buf_ctrl_reg_host(struct hw_e
|
||||
//--------------------------------------------------------------------+
|
||||
//
|
||||
//--------------------------------------------------------------------+
|
||||
void hwep_buf_ctrl_update(struct hw_endpoint *ep, uint32_t and_mask, uint32_t or_mask);
|
||||
void hwep_buf_ctrl_update(io_rw_32 *buf_ctrl_reg, uint32_t and_mask, uint32_t or_mask);
|
||||
|
||||
TU_ATTR_ALWAYS_INLINE static inline void hwep_buf_ctrl_set(struct hw_endpoint *ep, uint32_t value) {
|
||||
hwep_buf_ctrl_update(ep, 0, value);
|
||||
TU_ATTR_ALWAYS_INLINE static inline void hwep_buf_ctrl_set(io_rw_32 *buf_ctrl_reg, uint32_t value) {
|
||||
hwep_buf_ctrl_update(buf_ctrl_reg, 0, value);
|
||||
}
|
||||
|
||||
TU_ATTR_ALWAYS_INLINE static inline void hwep_buf_ctrl_set_mask32(struct hw_endpoint *ep, uint32_t value) {
|
||||
hwep_buf_ctrl_update(ep, ~value, value);
|
||||
TU_ATTR_ALWAYS_INLINE static inline void hwep_buf_ctrl_set_mask(io_rw_32 *buf_ctrl_reg, uint32_t value) {
|
||||
hwep_buf_ctrl_update(buf_ctrl_reg, ~value, value);
|
||||
}
|
||||
|
||||
TU_ATTR_ALWAYS_INLINE static inline void hwep_buf_ctrl_clear_mask(struct hw_endpoint *ep, uint32_t value) {
|
||||
hwep_buf_ctrl_update(ep, ~value, 0);
|
||||
TU_ATTR_ALWAYS_INLINE static inline void hwep_buf_ctrl_clear_mask(io_rw_32 *buf_ctrl_reg, uint32_t value) {
|
||||
hwep_buf_ctrl_update(buf_ctrl_reg, ~value, 0);
|
||||
}
|
||||
|
||||
static inline uintptr_t hw_data_offset(uint8_t *buf) {
|
||||
|
||||
Reference in New Issue
Block a user