Re: [PATCH] xhci: Don't add NOP TRB for aligned bulk sg transfers

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



It helps if you Cc testers on your patches.  Adding Jérôme, please test.

Sarah Sharp

On Thu, Jan 23, 2014 at 01:42:28PM +0000, David Laight wrote:
> Transfer requests for usb disks can contain a large number of 4k fragments.
> Assume that it doesn't matter if there are LINK TRB in fragment lists
> that are 1k aligned.
> 
> This should stop errors when transfer requests for usb disks contain
> more fragments that will fit in a ring segment.
> 
> Signed-off-by: David Laight <david.laight@xxxxxxxxxx>
> ---
> Note that this patch just allows large numbers of aligned fragments for
> bulk scatter-gather transfers.
> It doesn't remove the need for the patch that changes the way the ownership
> of the the first TRB is set in order to avoid the controller seeing 'dangling'
> LINK TRBs.
> However it does reduce the likelyhood of dangling LINK TRBs, so might seem to
> make the other patch unnecessary.
> 
> I've only compile tested it - but it should be fine.
> 
>  drivers/usb/host/xhci-ring.c | 49 ++++++++++++++++++++++++++++++++++----------
>  1 file changed, 38 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
> index a0b248c..7a4efd2 100644
> --- a/drivers/usb/host/xhci-ring.c
> +++ b/drivers/usb/host/xhci-ring.c
> @@ -2932,7 +2932,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
>   * FIXME allocate segments if the ring is full.
>   */
>  static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
> -		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
> +		u32 ep_state, unsigned int num_trbs, gfp_t mem_flags,
> +		bool aligned_xfer)
>  {
>  	unsigned int num_trbs_needed;
>  
> @@ -2980,6 +2981,9 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
>  			 * Simplest solution is to fill the trb before the
>  			 * LINK with nop commands.
>  			 */
> +			if (aligned_xfer)
> +				/* Caller says buffer is aligned */
> +				break;
>  			if (num_trbs == 1 || num_trbs <= usable || usable == 0)
>  				break;
>  
> @@ -3073,7 +3077,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
>  		unsigned int num_trbs,
>  		struct urb *urb,
>  		unsigned int td_index,
> -		gfp_t mem_flags)
> +		gfp_t mem_flags,
> +		bool aligned_xfer)
>  {
>  	int ret;
>  	struct urb_priv *urb_priv;
> @@ -3090,7 +3095,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
>  
>  	ret = prepare_ring(xhci, ep_ring,
>  			   le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
> -			   num_trbs, mem_flags);
> +			   num_trbs, mem_flags, aligned_xfer);
>  	if (ret)
>  		return ret;
>  
> @@ -3117,7 +3122,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
>  	return 0;
>  }
>  
> -static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
> +static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb,
> +	bool *aligned_xfer)
>  {
>  	int num_sgs, num_trbs, running_total, temp, i;
>  	struct scatterlist *sg;
> @@ -3125,11 +3131,30 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
>  	sg = NULL;
>  	num_sgs = urb->num_mapped_sgs;
>  	temp = urb->transfer_buffer_length;
> +	*aligned_xfer = true;
>  
>  	num_trbs = 0;
>  	for_each_sg(urb->sg, sg, num_sgs, i) {
>  		unsigned int len = sg_dma_len(sg);
>  
> +		/*
> +		 * The 1.0 spec says LINK TRB are only allowed at data offsets
> +		 * that are multiples of the transfer burst size.
> +		 * While the burst size might be 16k the effect is probably
> +		 * that the transfer is split. In particular a non-full-length
> +		 * data block might be sent - terminating a bulk transfer.
> +		 *
> +		 * prepare_ring() ensures that the data TRB don't cross a LINK,
> +		 * but for usb disks we see large numbers of fragments of 4k
> +		 * and that exceeds the size of each ring segment.
> +		 *
> +		 * If all the fragments start and end on 1k boundaries tell
> +		 * prepare_ring() not to worry about LINK TRB.
> +		 */
> +		if ((sg_dma_address(sg) | sg_dma_len(sg)) & 1023)
> +			/* Fragment not 1k aligned */
> +			*aligned_xfer = false;
> +
>  		/* Scatter gather list entries may cross 64KB boundaries */
>  		running_total = TRB_MAX_BUFF_SIZE -
>  			(sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
> @@ -3284,6 +3309,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
>  	bool first_trb;
>  	u64 addr;
>  	bool more_trbs_coming;
> +	bool aligned_xfer;
>  
>  	struct xhci_generic_trb *start_trb;
>  	int start_cycle;
> @@ -3292,14 +3318,14 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
>  	if (!ep_ring)
>  		return -EINVAL;
>  
> -	num_trbs = count_sg_trbs_needed(xhci, urb);
> +	num_trbs = count_sg_trbs_needed(xhci, urb, &aligned_xfer);
>  	num_sgs = urb->num_mapped_sgs;
>  	total_packet_count = DIV_ROUND_UP(urb->transfer_buffer_length,
>  			usb_endpoint_maxp(&urb->ep->desc));
>  
>  	trb_buff_len = prepare_transfer(xhci, xhci->devs[slot_id],
>  			ep_index, urb->stream_id,
> -			num_trbs, urb, 0, mem_flags);
> +			num_trbs, urb, 0, mem_flags, aligned_xfer);
>  	if (trb_buff_len < 0)
>  		return trb_buff_len;
>  
> @@ -3470,7 +3496,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
>  
>  	ret = prepare_transfer(xhci, xhci->devs[slot_id],
>  			ep_index, urb->stream_id,
> -			num_trbs, urb, 0, mem_flags);
> +			num_trbs, urb, 0, mem_flags, false);
>  	if (ret < 0)
>  		return ret;
>  
> @@ -3600,7 +3626,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
>  		num_trbs++;
>  	ret = prepare_transfer(xhci, xhci->devs[slot_id],
>  			ep_index, urb->stream_id,
> -			num_trbs, urb, 0, mem_flags);
> +			num_trbs, urb, 0, mem_flags, false);
>  	if (ret < 0)
>  		return ret;
>  
> @@ -3810,7 +3836,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
>  		trbs_per_td = count_isoc_trbs_needed(xhci, urb, i);
>  
>  		ret = prepare_transfer(xhci, xhci->devs[slot_id], ep_index,
> -				urb->stream_id, trbs_per_td, urb, i, mem_flags);
> +				urb->stream_id, trbs_per_td, urb, i, mem_flags,
> +				false);
>  		if (ret < 0) {
>  			if (i == 0)
>  				return ret;
> @@ -3969,7 +3996,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
>  	 * Do not insert any td of the urb to the ring if the check failed.
>  	 */
>  	ret = prepare_ring(xhci, ep_ring, le32_to_cpu(ep_ctx->ep_info) & EP_STATE_MASK,
> -			   num_trbs, mem_flags);
> +			   num_trbs, mem_flags, false);
>  	if (ret)
>  		return ret;
>  
> @@ -4026,7 +4053,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
>  		reserved_trbs++;
>  
>  	ret = prepare_ring(xhci, xhci->cmd_ring, EP_STATE_RUNNING,
> -			reserved_trbs, GFP_ATOMIC);
> +			reserved_trbs, GFP_ATOMIC, false);
>  	if (ret < 0) {
>  		xhci_err(xhci, "ERR: No room for command on command ring\n");
>  		if (command_must_succeed)
> -- 
> 1.8.1.2
> 
> 
> 
--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux