This patch includes streams implementation changes. The current changes has been validated on FPGA platform. Enabled streams related interrupts only for streams capable endpoints. Processed PRIME and IOT interrupts related to streams capable endpoints. Based on PRIME interrupt prime_flag is set and transfer is armed otherwise just adding request to the deferred request queue. For streams capable endpoints preparing TD with correct stream ID. TDL calculation: Updated tdl calculation based on controller versions. 1. For controller version DEV_VER_V2 :We have enabled USB_CONF2_EN_TDL_TRB bit in usb_conf2 register in DMULT configuration. This enables TDL calculation based on TRB, hence setting TDL in TRB. 2. For controller Version < DEV_VER_V2 : Writing TDL and STDL in ep_cmd register 3. For controller version > DEV_VER_V2 : Writing TDL in ep_tdl register. Writing ERDY with correct Stream ID to ep_cmd register. Added stream id related information to trace logs. Signed-off-by: Jayshri Pawar <jpawar@xxxxxxxxxxx> Signed-off-by: Pawel Laszczak <pawell@xxxxxxxxxxx> Signed-off-by: Rahul Kumar <kurahul@xxxxxxxxxxx> --- Version History: v2: Patch generated on top of linux-next.git master branch drivers/usb/cdns3/gadget.c | 533 +++++++++++++++++++++++++++++++++---- drivers/usb/cdns3/gadget.h | 26 +- drivers/usb/cdns3/trace.h | 93 ++++++- 3 files changed, 596 insertions(+), 56 deletions(-) diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index e4820bd4b579..736b0c6e27fe 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -71,6 +71,23 @@ static int __cdns3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, gfp_t gfp_flags); +static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, + struct usb_request *request); + +static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, + struct usb_request *request); + +/** + * cdns3_clear_register_bit - clear bit in given register. + * @ptr: address of device controller register to be read and changed + * @mask: bits requested to clar + */ +void cdns3_clear_register_bit(void __iomem *ptr, u32 mask) +{ + mask = readl(ptr) & ~mask; + writel(mask, ptr); +} + /** * cdns3_set_register_bit - set bit in given register. * @ptr: address of device controller register to be read and changed @@ -150,6 +167,21 @@ void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep) writel(ep, &priv_dev->regs->ep_sel); } +/** + * cdns3_get_tdl - gets current tdl for selected endpoint. + * @priv_dev: extended gadget object + * + * Before calling this function the appropriate endpoint must + * be selected by means of cdns3_select_ep function. + */ +static int cdns3_get_tdl(struct cdns3_device *priv_dev) +{ + if (priv_dev->dev_ver < DEV_VER_V3) + return EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); + else + return readl(&priv_dev->regs->ep_tdl); +} + dma_addr_t cdns3_trb_virt_to_dma(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb) { @@ -166,7 +198,22 @@ int cdns3_ring_size(struct cdns3_endpoint *priv_ep) case USB_ENDPOINT_XFER_CONTROL: return TRB_CTRL_RING_SIZE; default: - return TRB_RING_SIZE; + if (priv_ep->use_streams) + return TRB_STREAM_RING_SIZE; + else + return TRB_RING_SIZE; + } +} + +static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + + if (priv_ep->trb_pool) { + dma_free_coherent(priv_dev->sysdev, + cdns3_ring_size(priv_ep), + priv_ep->trb_pool, priv_ep->trb_pool_dma); + priv_ep->trb_pool = NULL; } } @@ -180,8 +227,12 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) { struct cdns3_device *priv_dev = priv_ep->cdns3_dev; int ring_size = cdns3_ring_size(priv_ep); + int num_trbs = ring_size / TRB_SIZE; struct cdns3_trb *link_trb; + if (priv_ep->trb_pool && priv_ep->alloc_ring_size < ring_size) + cdns3_free_trb_pool(priv_ep); + if (!priv_ep->trb_pool) { priv_ep->trb_pool = dma_alloc_coherent(priv_dev->sysdev, ring_size, @@ -189,32 +240,30 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) GFP_DMA32 | GFP_ATOMIC); if (!priv_ep->trb_pool) return -ENOMEM; - } else { + + priv_ep->alloc_ring_size = ring_size; memset(priv_ep->trb_pool, 0, ring_size); } + priv_ep->num_trbs = num_trbs; + if (!priv_ep->num) return 0; - priv_ep->num_trbs = ring_size / TRB_SIZE; - /* Initialize the last TRB as Link TRB. */ + /* Initialize the last TRB as Link TRB */ link_trb = (priv_ep->trb_pool + (priv_ep->num_trbs - 1)); - link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma); - link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE; - return 0; -} - -static void cdns3_free_trb_pool(struct cdns3_endpoint *priv_ep) -{ - struct cdns3_device *priv_dev = priv_ep->cdns3_dev; - - if (priv_ep->trb_pool) { - dma_free_coherent(priv_dev->sysdev, - cdns3_ring_size(priv_ep), - priv_ep->trb_pool, priv_ep->trb_pool_dma); - priv_ep->trb_pool = NULL; + if (priv_ep->use_streams) { + /* + * For stream capable endpoints driver use single correct TRB. + * The last trb has zeroed cycle bit + */ + link_trb->control = 0; + } else { + link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma); + link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE; } + return 0; } /** @@ -253,6 +302,7 @@ void cdns3_hw_reset_eps_config(struct cdns3_device *priv_dev) priv_dev->onchip_used_size = 0; priv_dev->out_mem_is_allocated = 0; priv_dev->wait_for_setup = 0; + priv_dev->using_streams = 0; } /** @@ -356,17 +406,43 @@ static int cdns3_start_all_request(struct cdns3_device *priv_dev, { struct usb_request *request; int ret = 0; + u8 pending_empty = list_empty(&priv_ep->pending_req_list); + + /* + * If the last pending transfer is INTERNAL + * OR streams are enabled for this endpoint + * do NOT start new transfer till the last one is pending + */ + if (!pending_empty) { + struct cdns3_request *priv_req; + + request = cdns3_next_request(&priv_ep->pending_req_list); + priv_req = to_cdns3_request(request); + if ((priv_req->flags & REQUEST_INTERNAL) || + (priv_ep->flags & EP_TDLCHK_EN) || + priv_ep->use_streams) { + trace_printk("Blocking external request\n"); + return ret; + } + } while (!list_empty(&priv_ep->deferred_req_list)) { request = cdns3_next_request(&priv_ep->deferred_req_list); - ret = cdns3_ep_run_transfer(priv_ep, request); + if (!priv_ep->use_streams) { + ret = cdns3_ep_run_transfer(priv_ep, request); + } else { + priv_ep->stream_sg_idx = 0; + ret = cdns3_ep_run_stream_transfer(priv_ep, request); + } if (ret) return ret; list_del(&request->list); list_add_tail(&request->list, &priv_ep->pending_req_list); + if (request->stream_id != 0 || (priv_ep->flags & EP_TDLCHK_EN)) + break; } priv_ep->flags &= ~EP_RING_FULL; @@ -379,7 +455,7 @@ static int cdns3_start_all_request(struct cdns3_device *priv_dev, * buffer for unblocking on-chip FIFO buffer. This flag will be cleared * if before first DESCMISS interrupt the DMA will be armed. */ -#define cdns3_wa2_enable_detection(priv_dev, ep_priv, reg) do { \ +#define cdns3_wa2_enable_detection(priv_dev, priv_ep, reg) do { \ if (!priv_ep->dir && priv_ep->type != USB_ENDPOINT_XFER_ISOC) { \ priv_ep->flags |= EP_QUIRK_EXTRA_BUF_DET; \ (reg) |= EP_STS_EN_DESCMISEN; \ @@ -450,10 +526,17 @@ struct usb_request *cdns3_wa2_gadget_giveback(struct cdns3_device *priv_dev, if (!req) return NULL; + /* unmap the gadget request before copying data */ + usb_gadget_unmap_request_by_dev(priv_dev->sysdev, req, + priv_ep->dir); + cdns3_wa2_descmiss_copy_data(priv_ep, req); if (!(priv_ep->flags & EP_QUIRK_END_TRANSFER) && req->length != req->actual) { /* wait for next part of transfer */ + /* re-map the gadget request buffer*/ + usb_gadget_map_request_by_dev(priv_dev->sysdev, req, + usb_endpoint_dir_in(priv_ep->endpoint.desc)); return NULL; } @@ -570,6 +653,13 @@ static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep) { struct cdns3_request *priv_req; struct usb_request *request; + u8 pending_empty = list_empty(&priv_ep->pending_req_list); + + /* check for pending transfer */ + if (!pending_empty) { + trace_cdns3_wa2(priv_ep, "Ignoring Descriptor missing IRQ\n"); + return; + } if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET) { priv_ep->flags &= ~EP_QUIRK_EXTRA_BUF_DET; @@ -578,8 +668,10 @@ static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep) trace_cdns3_wa2(priv_ep, "Description Missing detected\n"); - if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) + if (priv_ep->wa2_counter >= CDNS3_WA2_NUM_BUFFERS) { + trace_cdns3_wa2(priv_ep, "WA2 overflow\n"); cdns3_wa2_remove_old_request(priv_ep); + } request = cdns3_gadget_ep_alloc_request(&priv_ep->endpoint, GFP_ATOMIC); @@ -621,6 +713,78 @@ static void cdns3_wa2_descmissing_packet(struct cdns3_endpoint *priv_ep) "Failed: No sufficient memory for DESCMIS\n"); } +static void cdns3_wa2_reset_tdl(struct cdns3_device *priv_dev) +{ + u16 tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); + + if (tdl) { + u16 reset_val = EP_CMD_TDL_MAX + 1 - tdl; + + writel(EP_CMD_TDL_SET(reset_val) | EP_CMD_STDL, + &priv_dev->regs->ep_cmd); + } +} + +static void cdns3_wa2_check_outq_status(struct cdns3_device *priv_dev) +{ + u32 ep_sts_reg; + + /* select EP0-out */ + cdns3_select_ep(priv_dev, 0); + + ep_sts_reg = readl(&priv_dev->regs->ep_sts); + + if (EP_STS_OUTQ_VAL(ep_sts_reg)) { + u32 outq_ep_num = EP_STS_OUTQ_NO(ep_sts_reg); + struct cdns3_endpoint *outq_ep = priv_dev->eps[outq_ep_num]; + + if ((outq_ep->flags & EP_ENABLED) && !(outq_ep->use_streams) && + outq_ep->type != USB_ENDPOINT_XFER_ISOC && outq_ep_num) { + u8 pending_empty = list_empty(&outq_ep->pending_req_list); + + if ((outq_ep->flags & EP_QUIRK_EXTRA_BUF_DET) || + (outq_ep->flags & EP_QUIRK_EXTRA_BUF_EN) || + !pending_empty) { + } else { + u32 ep_sts_en_reg; + u32 ep_cmd_reg; + + cdns3_select_ep(priv_dev, outq_ep->num | + outq_ep->dir); + ep_sts_en_reg = readl(&priv_dev->regs->ep_sts_en); + ep_cmd_reg = readl(&priv_dev->regs->ep_cmd); + + outq_ep->flags |= EP_TDLCHK_EN; + cdns3_set_register_bit(&priv_dev->regs->ep_cfg, + EP_CFG_TDL_CHK); + + cdns3_wa2_enable_detection(priv_dev, outq_ep, + ep_sts_en_reg); + writel(ep_sts_en_reg, + &priv_dev->regs->ep_sts_en); + /* reset tdl value to zero */ + cdns3_wa2_reset_tdl(priv_dev); + /* + * Memory barrier - Reset tdl before ringing the + * doorbell. + */ + wmb(); + if (EP_CMD_DRDY & ep_cmd_reg) { + trace_cdns3_wa2(outq_ep, "Enabling WA2 skipping doorbell\n"); + + } else { + trace_cdns3_wa2(outq_ep, "Enabling WA2 ringing doorbell\n"); + /* + * ring doorbell to generate DESCMIS irq + */ + writel(EP_CMD_DRDY, + &priv_dev->regs->ep_cmd); + } + } + } + } +} + /** * cdns3_gadget_giveback - call struct usb_request's ->complete callback * @priv_ep: The endpoint to whom the request belongs to @@ -807,14 +971,120 @@ static void cdns3_wa1_tray_restore_cycle_bit(struct cdns3_device *priv_dev, cdns3_wa1_restore_cycle_bit(priv_ep); } +static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, + struct usb_request *request) +{ + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + struct cdns3_request *priv_req; + struct cdns3_trb *trb; + dma_addr_t trb_dma; + int address; + u32 control; + u32 length; + u32 tdl; + unsigned int sg_idx = priv_ep->stream_sg_idx; + + priv_req = to_cdns3_request(request); + address = priv_ep->endpoint.desc->bEndpointAddress; + + priv_ep->flags |= EP_PENDING_REQUEST; + + /* must allocate buffer aligned to 8 */ + if (priv_req->flags & REQUEST_UNALIGNED) + trb_dma = priv_req->aligned_buf->dma; + else + trb_dma = request->dma; + + /* For stream capable endpoints driver use only single TD. */ + trb = priv_ep->trb_pool + priv_ep->enqueue; + priv_req->start_trb = priv_ep->enqueue; + priv_req->end_trb = priv_req->start_trb; + priv_req->trb = trb; + + cdns3_select_ep(priv_ep->cdns3_dev, address); + + control = TRB_TYPE(TRB_NORMAL) | TRB_CYCLE | + TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP; + + if (!request->num_sgs) { + trb->buffer = TRB_BUFFER(trb_dma); + length = request->length; + } else { + trb->buffer = TRB_BUFFER(request->sg[sg_idx].dma_address); + length = request->sg[sg_idx].length; + } + + tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); + + trb->length = TRB_BURST_LEN(16 /*priv_ep->trb_burst_size*/) | + TRB_LEN(length); + + /* + * For DEV_VER_V2 controller version we have enabled + * USB_CONF2_EN_TDL_TRB in DMULT configuration. + * This enables TDL calculation based on TRB, hence setting TDL in TRB. + */ + if (priv_dev->dev_ver >= DEV_VER_V2) { + if (priv_dev->gadget.speed == USB_SPEED_SUPER) + trb->length |= TRB_TDL_SS_SIZE(tdl); + } + priv_req->flags |= REQUEST_PENDING; + + trb->control = control; + + trace_cdns3_prepare_trb(priv_ep, priv_req->trb); + + /* + * Memory barrier - Cycle Bit must be set before trb->length and + * trb->buffer fields. + */ + wmb(); + + /* always first element */ + writel(EP_TRADDR_TRADDR(priv_ep->trb_pool_dma), + &priv_dev->regs->ep_traddr); + + if (!(priv_ep->flags & EP_STALLED)) { + trace_cdns3_ring(priv_ep); + /*clearing TRBERR and EP_STS_DESCMIS before seting DRDY*/ + writel(EP_STS_TRBERR | EP_STS_DESCMIS, &priv_dev->regs->ep_sts); + + priv_ep->prime_flag = false; + + /* + * Controller version DEV_VER_V2 tdl calculation + * is based on TRB + */ + + if (priv_dev->dev_ver < DEV_VER_V2) + writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, + &priv_dev->regs->ep_cmd); + else if (priv_dev->dev_ver > DEV_VER_V2) + writel(tdl, &priv_dev->regs->ep_tdl); + + priv_ep->last_stream_id = priv_req->request.stream_id; + writel(EP_CMD_DRDY, &priv_dev->regs->ep_cmd); + writel(EP_CMD_ERDY_SID(priv_req->request.stream_id) | + EP_CMD_ERDY, &priv_dev->regs->ep_cmd); + + trace_cdns3_doorbell_epx(priv_ep->name, + readl(&priv_dev->regs->ep_traddr)); + } + + /* WORKAROUND for transition to L0 */ + __cdns3_gadget_wakeup(priv_dev); + + return 0; +} + /** * cdns3_ep_run_transfer - start transfer on no-default endpoint hardware * @priv_ep: endpoint object * * Returns zero on success or negative value on failure */ -int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, - struct usb_request *request) +static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, + struct usb_request *request) { struct cdns3_device *priv_dev = priv_ep->cdns3_dev; struct cdns3_request *priv_req; @@ -826,6 +1096,7 @@ int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, int address; u32 control; int pcs; + u16 total_tdl = 0; if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) num_trb = priv_ep->interval; @@ -910,6 +1181,9 @@ int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, if (likely(priv_dev->dev_ver >= DEV_VER_V2)) td_size = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); + else if (priv_ep->flags & EP_TDLCHK_EN) + total_tdl += DIV_ROUND_UP(length, + priv_ep->endpoint.maxpacket); trb->length = TRB_BURST_LEN(priv_ep->trb_burst_size) | TRB_LEN(length); @@ -954,6 +1228,23 @@ int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, if (sg_iter == 1) trb->control |= TRB_IOC | TRB_ISP; + if (priv_dev->dev_ver < DEV_VER_V2 && + (priv_ep->flags & EP_TDLCHK_EN)) { + u16 tdl = total_tdl; + u16 old_tdl = EP_CMD_TDL_GET(readl(&priv_dev->regs->ep_cmd)); + + if (tdl > EP_CMD_TDL_MAX) { + tdl = EP_CMD_TDL_MAX; + priv_ep->pending_tdl = total_tdl - EP_CMD_TDL_MAX; + } + + if (old_tdl < tdl) { + tdl -= old_tdl; + writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, + &priv_dev->regs->ep_cmd); + } + } + /* * Memory barrier - cycle bit must be set before other filds in trb. */ @@ -1153,29 +1444,56 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, cdns3_move_deq_to_next_trb(priv_req); } - /* Re-select endpoint. It could be changed by other CPU during - * handling usb_gadget_giveback_request. - */ - cdns3_select_ep(priv_dev, priv_ep->endpoint.address); + if (!request->stream_id) { + /* Re-select endpoint. It could be changed by other CPU + * during handling usb_gadget_giveback_request. + */ + cdns3_select_ep(priv_dev, priv_ep->endpoint.address); - if (!cdns3_request_handled(priv_ep, priv_req)) - goto prepare_next_td; + if (!cdns3_request_handled(priv_ep, priv_req)) + goto prepare_next_td; - trb = priv_ep->trb_pool + priv_ep->dequeue; - trace_cdns3_complete_trb(priv_ep, trb); + trb = priv_ep->trb_pool + priv_ep->dequeue; + trace_cdns3_complete_trb(priv_ep, trb); + + if (trb != priv_req->trb) + dev_warn(priv_dev->dev, + "request_trb=0x%p, queue_trb=0x%p\n", + priv_req->trb, trb); + + request->actual = TRB_LEN(le32_to_cpu(trb->length)); + cdns3_move_deq_to_next_trb(priv_req); + cdns3_gadget_giveback(priv_ep, priv_req, 0); + + if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && + TRBS_PER_SEGMENT == 2) + break; + } else { + /* Re-select endpoint. It could be changed by other CPU + * during handling usb_gadget_giveback_request. + */ + cdns3_select_ep(priv_dev, priv_ep->endpoint.address); + + trb = priv_ep->trb_pool; + trace_cdns3_complete_trb(priv_ep, trb); - if (trb != priv_req->trb) - dev_warn(priv_dev->dev, - "request_trb=0x%p, queue_trb=0x%p\n", - priv_req->trb, trb); + if (trb != priv_req->trb) + dev_warn(priv_dev->dev, + "request_trb=0x%p, queue_trb=0x%p\n", + priv_req->trb, trb); - request->actual = TRB_LEN(le32_to_cpu(trb->length)); - cdns3_move_deq_to_next_trb(priv_req); - cdns3_gadget_giveback(priv_ep, priv_req, 0); + request->actual += TRB_LEN(le32_to_cpu(trb->length)); - if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && - TRBS_PER_SEGMENT == 2) + if (!request->num_sgs || + (request->num_sgs == (priv_ep->stream_sg_idx + 1))) { + priv_ep->stream_sg_idx = 0; + cdns3_gadget_giveback(priv_ep, priv_req, 0); + } else { + priv_ep->stream_sg_idx++; + cdns3_ep_run_stream_transfer(priv_ep, request); + } break; + } } priv_ep->flags &= ~EP_PENDING_REQUEST; @@ -1205,6 +1523,21 @@ void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm) } } +static void cdns3_reprogram_tdl(struct cdns3_endpoint *priv_ep) +{ + u16 tdl = priv_ep->pending_tdl; + struct cdns3_device *priv_dev = priv_ep->cdns3_dev; + + if (tdl > EP_CMD_TDL_MAX) { + tdl = EP_CMD_TDL_MAX; + priv_ep->pending_tdl -= EP_CMD_TDL_MAX; + } else { + priv_ep->pending_tdl = 0; + } + + writel(EP_CMD_TDL_SET(tdl) | EP_CMD_STDL, &priv_dev->regs->ep_cmd); +} + /** * cdns3_check_ep_interrupt_proceed - Processes interrupt related to endpoint * @priv_ep: endpoint object @@ -1215,6 +1548,9 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) { struct cdns3_device *priv_dev = priv_ep->cdns3_dev; u32 ep_sts_reg; + struct usb_request *deferred_request; + struct usb_request *pending_request; + u32 tdl = 0; cdns3_select_ep(priv_dev, priv_ep->endpoint.address); @@ -1223,6 +1559,36 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) ep_sts_reg = readl(&priv_dev->regs->ep_sts); writel(ep_sts_reg, &priv_dev->regs->ep_sts); + if ((ep_sts_reg & EP_STS_PRIME) && priv_ep->use_streams) { + bool dbusy = !!(ep_sts_reg & EP_STS_DBUSY); + + tdl = cdns3_get_tdl(priv_dev); + + /* + * Continue the previous transfer: + * There is some racing between ERDY and PRIME. The device send + * ERDY and almost in the same time Host send PRIME. It cause + * that host ignore the ERDY packet and driver has to send it + * again. + */ + if (tdl && (dbusy | !EP_STS_BUFFEMPTY(ep_sts_reg) | + EP_STS_HOSTPP(ep_sts_reg))) { + writel(EP_CMD_ERDY | + EP_CMD_ERDY_SID(priv_ep->last_stream_id), + &priv_dev->regs->ep_cmd); + ep_sts_reg &= ~(EP_STS_MD_EXIT | EP_STS_IOC); + } else { + priv_ep->prime_flag = true; + + pending_request = cdns3_next_request(&priv_ep->pending_req_list); + deferred_request = cdns3_next_request(&priv_ep->deferred_req_list); + + if (deferred_request && !pending_request) { + cdns3_start_all_request(priv_dev, priv_ep); + } + } + } + if (ep_sts_reg & EP_STS_TRBERR) { if (priv_ep->flags & EP_STALL_PENDING && !(ep_sts_reg & EP_STS_DESCMIS && @@ -1259,7 +1625,8 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) } } - if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP)) { + if ((ep_sts_reg & EP_STS_IOC) || (ep_sts_reg & EP_STS_ISP) || + (ep_sts_reg & EP_STS_IOT)) { if (priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN) { if (ep_sts_reg & EP_STS_ISP) priv_ep->flags |= EP_QUIRK_END_TRANSFER; @@ -1267,6 +1634,29 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) priv_ep->flags &= ~EP_QUIRK_END_TRANSFER; } + if (!priv_ep->use_streams) { + if ((ep_sts_reg & EP_STS_IOC) || + (ep_sts_reg & EP_STS_ISP)) { + cdns3_transfer_completed(priv_dev, priv_ep); + } else if ((priv_ep->flags & EP_TDLCHK_EN) & + priv_ep->pending_tdl) { + /* handle IOT with pending tdl */ + cdns3_reprogram_tdl(priv_ep); + } + } else if (priv_ep->dir == USB_DIR_OUT) { + priv_ep->ep_sts_pending |= ep_sts_reg; + } else if (ep_sts_reg & EP_STS_IOT) { + cdns3_transfer_completed(priv_dev, priv_ep); + } + } + + /* + * MD_EXIT interrupt sets when stream capable endpoint exits + * from MOVE DATA state of Bulk IN/OUT stream protocol state machine + */ + if (priv_ep->dir == USB_DIR_OUT && (ep_sts_reg & EP_STS_MD_EXIT) && + (priv_ep->ep_sts_pending & EP_STS_IOT) && priv_ep->use_streams) { + priv_ep->ep_sts_pending = 0; cdns3_transfer_completed(priv_dev, priv_ep); } @@ -1274,7 +1664,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) * WA2: this condition should only be meet when * priv_ep->flags & EP_QUIRK_EXTRA_BUF_DET or * priv_ep->flags & EP_QUIRK_EXTRA_BUF_EN. - * In other cases this interrupt will be disabled/ + * In other cases this interrupt will be disabled. */ if (ep_sts_reg & EP_STS_DESCMIS && priv_dev->dev_ver < DEV_VER_V2 && !(priv_ep->flags & EP_STALLED)) @@ -1457,6 +1847,9 @@ static irqreturn_t cdns3_device_thread_irq_handler(int irq, void *data) ret = IRQ_HANDLED; } + if (priv_dev->dev_ver < DEV_VER_V2 && priv_dev->using_streams) + cdns3_wa2_check_outq_status(priv_dev); + irqend: writel(~0, &priv_dev->regs->ep_ien); spin_unlock_irqrestore(&priv_dev->lock, flags); @@ -1511,6 +1904,27 @@ static int cdns3_ep_onchip_buffer_reserve(struct cdns3_device *priv_dev, return 0; } +void cdns3_stream_ep_reconfig(struct cdns3_device *priv_dev, + struct cdns3_endpoint *priv_ep) +{ + if (!priv_ep->use_streams || priv_dev->gadget.speed < USB_SPEED_SUPER) + return; + + if (priv_dev->dev_ver >= DEV_VER_V3) { + u32 mask = BIT(priv_ep->num + (priv_ep->dir ? 16 : 0)); + + /* + * Stream capable endpoints are handled by using ep_tdl + * register. Other endpoints use TDL from TRB feature. + */ + cdns3_clear_register_bit(&priv_dev->regs->tdl_from_trb, mask); + } + + /* Enable Stream Bit TDL chk and SID chk */ + cdns3_set_register_bit(&priv_dev->regs->ep_cfg, EP_CFG_STREAM_EN | + EP_CFG_TDL_CHK | EP_CFG_SID_CHK); +} + void cdns3_configure_dmult(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep) { @@ -1772,6 +2186,7 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep, { struct cdns3_endpoint *priv_ep; struct cdns3_device *priv_dev; + const struct usb_ss_ep_comp_descriptor *comp_desc; u32 reg = EP_STS_EN_TRBERREN; u32 bEndpointAddress; unsigned long flags; @@ -1781,6 +2196,7 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep, priv_ep = ep_to_cdns3_ep(ep); priv_dev = priv_ep->cdns3_dev; + comp_desc = priv_ep->endpoint.comp_desc; if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) { dev_dbg(priv_dev->dev, "usbss: invalid parameters\n"); @@ -1811,6 +2227,24 @@ static int cdns3_gadget_ep_enable(struct usb_ep *ep, goto exit; } + bEndpointAddress = priv_ep->num | priv_ep->dir; + cdns3_select_ep(priv_dev, bEndpointAddress); + + if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) { + /* + * Enable stream support (SS mode) related interrupts + * in EP_STS_EN Register + */ + if (priv_dev->gadget.speed >= USB_SPEED_SUPER) { + reg |= EP_STS_EN_IOTEN | EP_STS_EN_PRIMEEEN | + EP_STS_EN_SIDERREN | EP_STS_EN_MD_EXITEN | + EP_STS_EN_STREAMREN; + priv_ep->use_streams = true; + cdns3_stream_ep_reconfig(priv_dev, priv_ep); + priv_dev->using_streams |= true; + } + } + ret = cdns3_allocate_trb_pool(priv_ep); if (ret) @@ -1957,6 +2391,7 @@ static int cdns3_gadget_ep_disable(struct usb_ep *ep) ep->desc = NULL; priv_ep->flags &= ~EP_ENABLED; + priv_ep->use_streams = false; spin_unlock_irqrestore(&priv_dev->lock, flags); @@ -2005,13 +2440,21 @@ static int __cdns3_gadget_ep_queue(struct usb_ep *ep, list_add_tail(&request->list, &priv_ep->deferred_req_list); /* + * For stream capable endpoint if prime irq flag is set then only start + * request. * If hardware endpoint configuration has not been set yet then * just queue request in deferred list. Transfer will be started in * cdns3_set_hw_configuration. */ - if (priv_dev->hw_configured_flag && !(priv_ep->flags & EP_STALLED) && - !(priv_ep->flags & EP_STALL_PENDING)) - cdns3_start_all_request(priv_dev, priv_ep); + if (!request->stream_id) { + if (priv_dev->hw_configured_flag && + !(priv_ep->flags & EP_STALLED) && + !(priv_ep->flags & EP_STALL_PENDING)) + cdns3_start_all_request(priv_dev, priv_ep); + } else { + if (priv_dev->hw_configured_flag && priv_ep->prime_flag) + cdns3_start_all_request(priv_dev, priv_ep); + } return 0; } diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h index bc4024041ef2..f003a7801872 100644 --- a/drivers/usb/cdns3/gadget.h +++ b/drivers/usb/cdns3/gadget.h @@ -599,6 +599,7 @@ struct cdns3_usb_regs { #define EP_CMD_TDL_MASK GENMASK(15, 9) #define EP_CMD_TDL_SET(p) (((p) << 9) & EP_CMD_TDL_MASK) #define EP_CMD_TDL_GET(p) (((p) & EP_CMD_TDL_MASK) >> 9) +#define EP_CMD_TDL_MAX (EP_CMD_TDL_MASK >> 9) /* ERDY Stream ID value (used in SS mode). */ #define EP_CMD_ERDY_SID_MASK GENMASK(31, 16) @@ -969,10 +970,18 @@ struct cdns3_usb_regs { #define ISO_MAX_INTERVAL 10 +#define MAX_TRB_LENGTH BIT(16) + #if TRBS_PER_SEGMENT < 2 #error "Incorrect TRBS_PER_SEGMENT. Minimal Transfer Ring size is 2." #endif +#define TRBS_PER_STREAM_SEGMENT 2 + +#if TRBS_PER_STREAM_SEGMENT < 2 +#error "Incorrect TRBS_PER_STREAMS_SEGMENT. Minimal Transfer Ring size is 2." +#endif + /* *Only for ISOC endpoints - maximum number of TRBs is calculated as * pow(2, bInterval-1) * number of usb requests. It is limitation made by @@ -1000,6 +1009,7 @@ struct cdns3_trb { #define TRB_SIZE (sizeof(struct cdns3_trb)) #define TRB_RING_SIZE (TRB_SIZE * TRBS_PER_SEGMENT) +#define TRB_STREAM_RING_SIZE (TRB_SIZE * TRBS_PER_STREAM_SEGMENT) #define TRB_ISO_RING_SIZE (TRB_SIZE * TRBS_PER_ISOC_SEGMENT) #define TRB_CTRL_RING_SIZE (TRB_SIZE * 2) @@ -1078,7 +1088,7 @@ struct cdns3_trb { #define CDNS3_ENDPOINTS_MAX_COUNT 32 #define CDNS3_EP_ZLP_BUF_SIZE 1024 -#define CDNS3_EP_BUF_SIZE 2 /* KB */ +#define CDNS3_EP_BUF_SIZE 4 /* KB */ #define CDNS3_EP_ISO_HS_MULT 3 #define CDNS3_EP_ISO_SS_BURST 3 #define CDNS3_MAX_NUM_DESCMISS_BUF 32 @@ -1109,6 +1119,7 @@ struct cdns3_device; * @interval: interval between packets used for ISOC endpoint. * @free_trbs: number of free TRBs in transfer ring * @num_trbs: number of all TRBs in transfer ring + * @alloc_ring_size: size of the allocated TRB ring * @pcs: producer cycle state * @ccs: consumer cycle state * @enqueue: enqueue index in transfer ring @@ -1142,6 +1153,7 @@ struct cdns3_endpoint { #define EP_QUIRK_END_TRANSFER BIT(11) #define EP_QUIRK_EXTRA_BUF_DET BIT(12) #define EP_QUIRK_EXTRA_BUF_EN BIT(13) +#define EP_TDLCHK_EN BIT(15) u32 flags; struct cdns3_request *descmis_req; @@ -1153,6 +1165,7 @@ struct cdns3_endpoint { int free_trbs; int num_trbs; + int alloc_ring_size; u8 pcs; u8 ccs; int enqueue; @@ -1163,6 +1176,14 @@ struct cdns3_endpoint { struct cdns3_trb *wa1_trb; unsigned int wa1_trb_index; unsigned int wa1_cycle_bit:1; + + /* Stream related */ + unsigned int use_streams:1; + unsigned int prime_flag:1; + u32 ep_sts_pending; + u16 last_stream_id; + u16 pending_tdl; + unsigned int stream_sg_idx; }; /** @@ -1290,6 +1311,7 @@ struct cdns3_device { int hw_configured_flag:1; int wake_up_flag:1; unsigned status_completion_no_call:1; + unsigned using_streams:1; int out_mem_is_allocated; struct work_struct pending_status_wq; @@ -1310,8 +1332,6 @@ void cdns3_set_hw_configuration(struct cdns3_device *priv_dev); void cdns3_select_ep(struct cdns3_device *priv_dev, u32 ep); void cdns3_allow_enable_l1(struct cdns3_device *priv_dev, int enable); struct usb_request *cdns3_next_request(struct list_head *list); -int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, - struct usb_request *request); void cdns3_rearm_transfer(struct cdns3_endpoint *priv_ep, u8 rearm); int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep); u8 cdns3_ep_addr_to_index(u8 ep_addr); diff --git a/drivers/usb/cdns3/trace.h b/drivers/usb/cdns3/trace.h index e92348c9b4d7..8d121e207fd8 100644 --- a/drivers/usb/cdns3/trace.h +++ b/drivers/usb/cdns3/trace.h @@ -122,18 +122,24 @@ DECLARE_EVENT_CLASS(cdns3_log_epx_irq, __string(ep_name, priv_ep->name) __field(u32, ep_sts) __field(u32, ep_traddr) + __field(u32, ep_last_sid) + __field(u32, use_streams) __dynamic_array(char, str, CDNS3_MSG_MAX) ), TP_fast_assign( __assign_str(ep_name, priv_ep->name); __entry->ep_sts = readl(&priv_dev->regs->ep_sts); __entry->ep_traddr = readl(&priv_dev->regs->ep_traddr); + __entry->ep_last_sid = priv_ep->last_stream_id; + __entry->use_streams = priv_ep->use_streams; ), - TP_printk("%s, ep_traddr: %08x", + TP_printk("%s, ep_traddr: %08x ep_last_sid: %08x use_streams: %d", cdns3_decode_epx_irq(__get_str(str), __get_str(ep_name), __entry->ep_sts), - __entry->ep_traddr) + __entry->ep_traddr, + __entry->ep_last_sid, + __entry->use_streams) ); DEFINE_EVENT(cdns3_log_epx_irq, cdns3_epx_irq, @@ -210,6 +216,7 @@ DECLARE_EVENT_CLASS(cdns3_log_request, __field(int, end_trb) __field(struct cdns3_trb *, start_trb_addr) __field(int, flags) + __field(unsigned int, stream_id) ), TP_fast_assign( __assign_str(name, req->priv_ep->name); @@ -225,9 +232,10 @@ DECLARE_EVENT_CLASS(cdns3_log_request, __entry->end_trb = req->end_trb; __entry->start_trb_addr = req->trb; __entry->flags = req->flags; + __entry->stream_id = req->request.stream_id; ), TP_printk("%s: req: %p, req buff %p, length: %u/%u %s%s%s, status: %d," - " trb: [start:%d, end:%d: virt addr %pa], flags:%x ", + " trb: [start:%d, end:%d: virt addr %pa], flags:%x SID: %u", __get_str(name), __entry->req, __entry->buf, __entry->actual, __entry->length, __entry->zero ? "Z" : "z", @@ -237,7 +245,8 @@ DECLARE_EVENT_CLASS(cdns3_log_request, __entry->start_trb, __entry->end_trb, __entry->start_trb_addr, - __entry->flags + __entry->flags, + __entry->stream_id ) ); @@ -281,6 +290,39 @@ TRACE_EVENT(cdns3_ep0_queue, __entry->length) ); +DECLARE_EVENT_CLASS(cdns3_stream_split_transfer_len, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req), + TP_STRUCT__entry( + __string(name, req->priv_ep->name) + __field(struct cdns3_request *, req) + __field(unsigned int, length) + __field(unsigned int, actual) + __field(unsigned int, stream_id) + ), + TP_fast_assign( + __assign_str(name, req->priv_ep->name); + __entry->req = req; + __entry->actual = req->request.length; + __entry->length = req->request.actual; + __entry->stream_id = req->request.stream_id; + ), + TP_printk("%s: req: %p,request length: %u actual length: %u SID: %u", + __get_str(name), __entry->req, __entry->length, + __entry->actual, __entry->stream_id) +); + +DEFINE_EVENT(cdns3_stream_split_transfer_len, cdns3_stream_transfer_split, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + +DEFINE_EVENT(cdns3_stream_split_transfer_len, + cdns3_stream_transfer_split_next_part, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + DECLARE_EVENT_CLASS(cdns3_log_aligned_request, TP_PROTO(struct cdns3_request *priv_req), TP_ARGS(priv_req), @@ -319,6 +361,34 @@ DEFINE_EVENT(cdns3_log_aligned_request, cdns3_prepare_aligned_request, TP_ARGS(req) ); +DECLARE_EVENT_CLASS(cdns3_log_map_request, + TP_PROTO(struct cdns3_request *priv_req), + TP_ARGS(priv_req), + TP_STRUCT__entry( + __string(name, priv_req->priv_ep->name) + __field(struct usb_request *, req) + __field(void *, buf) + __field(dma_addr_t, dma) + ), + TP_fast_assign( + __assign_str(name, priv_req->priv_ep->name); + __entry->req = &priv_req->request; + __entry->buf = priv_req->request.buf; + __entry->dma = priv_req->request.dma; + ), + TP_printk("%s: req: %p, req buf %p, dma %p", + __get_str(name), __entry->req, __entry->buf, &__entry->dma + ) +); +DEFINE_EVENT(cdns3_log_map_request, cdns3_map_request, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); +DEFINE_EVENT(cdns3_log_map_request, cdns3_mapped_request, + TP_PROTO(struct cdns3_request *req), + TP_ARGS(req) +); + DECLARE_EVENT_CLASS(cdns3_log_trb, TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb), TP_ARGS(priv_ep, trb), @@ -329,6 +399,7 @@ DECLARE_EVENT_CLASS(cdns3_log_trb, __field(u32, length) __field(u32, control) __field(u32, type) + __field(unsigned int, last_stream_id) ), TP_fast_assign( __assign_str(name, priv_ep->name); @@ -337,8 +408,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb, __entry->length = trb->length; __entry->control = trb->control; __entry->type = usb_endpoint_type(priv_ep->endpoint.desc); + __entry->last_stream_id = priv_ep->last_stream_id; ), - TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s)", + TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s) SID:%lu LAST_SID:%u", __get_str(name), __entry->trb, __entry->buffer, TRB_LEN(__entry->length), (u8)TRB_BURST_LEN_GET(__entry->length), @@ -349,7 +421,9 @@ DECLARE_EVENT_CLASS(cdns3_log_trb, __entry->control & TRB_FIFO_MODE ? "FIFO, " : "", __entry->control & TRB_CHAIN ? "CHAIN, " : "", __entry->control & TRB_IOC ? "IOC, " : "", - TRB_FIELD_TO_TYPE(__entry->control) == TRB_NORMAL ? "Normal" : "LINK" + TRB_FIELD_TO_TYPE(__entry->control) == TRB_NORMAL ? "Normal" : "LINK", + TRB_FIELD_TO_STREAMID(__entry->control), + __entry->last_stream_id ) ); @@ -398,6 +472,7 @@ DECLARE_EVENT_CLASS(cdns3_log_ep, __field(unsigned int, maxpacket) __field(unsigned int, maxpacket_limit) __field(unsigned int, max_streams) + __field(unsigned int, use_streams) __field(unsigned int, maxburst) __field(unsigned int, flags) __field(unsigned int, dir) @@ -409,16 +484,18 @@ DECLARE_EVENT_CLASS(cdns3_log_ep, __entry->maxpacket = priv_ep->endpoint.maxpacket; __entry->maxpacket_limit = priv_ep->endpoint.maxpacket_limit; __entry->max_streams = priv_ep->endpoint.max_streams; + __entry->use_streams = priv_ep->use_streams; __entry->maxburst = priv_ep->endpoint.maxburst; __entry->flags = priv_ep->flags; __entry->dir = priv_ep->dir; __entry->enqueue = priv_ep->enqueue; __entry->dequeue = priv_ep->dequeue; ), - TP_printk("%s: mps: %d/%d. streams: %d, burst: %d, enq idx: %d, " - "deq idx: %d, flags %s%s%s%s%s%s%s%s, dir: %s", + TP_printk("%s: mps: %d/%d. streams: %d, stream enable: %d, burst: %d, " + "enq idx: %d, deq idx: %d, flags %s%s%s%s%s%s%s%s, dir: %s", __get_str(name), __entry->maxpacket, __entry->maxpacket_limit, __entry->max_streams, + __entry->use_streams, __entry->maxburst, __entry->enqueue, __entry->dequeue, __entry->flags & EP_ENABLED ? "EN | " : "", -- 2.20.1