[PATCH] usb: wusbcore: add isochronous IN support to HWA

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch adds support for isochronous IN transfers to the HWA driver.  
The changes include removing the checks that return errors for isoc IN 
URBs and adding functionality to read the isoc data returned from the 
HWA.

Signed-off-by: Thomas Pugliese <thomas.pugliese@xxxxxxxxx>
---
 drivers/usb/wusbcore/wa-xfer.c |  305 +++++++++++++++++++++++++++-------------
 1 file changed, 207 insertions(+), 98 deletions(-)

diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
index a70e142..2afa886 100644
--- a/drivers/usb/wusbcore/wa-xfer.c
+++ b/drivers/usb/wusbcore/wa-xfer.c
@@ -488,13 +488,14 @@ static int __wa_seg_calculate_isoc_frame_count(struct wa_xfer *xfer,
 		&& ((segment_size + iso_frame_desc[index].length)
 				<= xfer->seg_size)) {
 		/*
-		 * For Alereon HWA devices, only include an isoc frame in a
-		 * segment if it is physically contiguous with the previous
+		 * For Alereon HWA devices, only include an isoc frame in an
+		 * out segment if it is physically contiguous with the previous
 		 * frame.  This is required because those devices expect
 		 * the isoc frames to be sent as a single USB transaction as
 		 * opposed to one transaction per frame with standard HWA.
 		 */
 		if ((xfer->wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC)
+			&& (xfer->is_inbound == 0)
 			&& (index > isoc_frame_offset)
 			&& ((iso_frame_desc[index - 1].offset +
 				iso_frame_desc[index - 1].length) !=
@@ -537,14 +538,8 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
 		result = sizeof(struct wa_xfer_bi);
 		break;
 	case USB_ENDPOINT_XFER_ISOC:
-		if (usb_pipeout(urb->pipe)) {
-			*pxfer_type = WA_XFER_TYPE_ISO;
-			result = sizeof(struct wa_xfer_hwaiso);
-		} else {
-			dev_err(dev, "FIXME: ISOC IN not implemented\n");
-			result = -ENOSYS;
-			goto error;
-		}
+		*pxfer_type = WA_XFER_TYPE_ISO;
+		result = sizeof(struct wa_xfer_hwaiso);
 		break;
 	default:
 		/* never happens */
@@ -555,10 +550,22 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
 	xfer->is_dma = urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? 1 : 0;
 
 	maxpktsize = le16_to_cpu(rpipe->descr.wMaxPacketSize);
+	xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
+		* 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
+	/* Compute the segment size and make sure it is a multiple of
+	 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
+	 * a check (FIXME) */
+	if (xfer->seg_size < maxpktsize) {
+		dev_err(dev,
+			"HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
+			xfer->seg_size, maxpktsize);
+		result = -EINVAL;
+		goto error;
+	}
+	xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
 	if ((rpipe->descr.bmAttribute & 0x3) == USB_ENDPOINT_XFER_ISOC) {
 		int index = 0;
 
-		xfer->seg_size = maxpktsize;
 		xfer->segs = 0;
 		/*
 		 * loop over urb->number_of_packets to determine how many
@@ -571,19 +578,6 @@ static ssize_t __wa_xfer_setup_sizes(struct wa_xfer *xfer,
 			++xfer->segs;
 		}
 	} else {
-		xfer->seg_size = le16_to_cpu(rpipe->descr.wBlocks)
-			* 1 << (xfer->wa->wa_descr->bRPipeBlockSize - 1);
-		/* Compute the segment size and make sure it is a multiple of
-		 * the maxpktsize (WUSB1.0[8.3.3.1])...not really too much of
-		 * a check (FIXME) */
-		if (xfer->seg_size < maxpktsize) {
-			dev_err(dev,
-				"HW BUG? seg_size %zu smaller than maxpktsize %zu\n",
-				xfer->seg_size, maxpktsize);
-			result = -EINVAL;
-			goto error;
-		}
-		xfer->seg_size = (xfer->seg_size / maxpktsize) * maxpktsize;
 		xfer->segs = DIV_ROUND_UP(urb->transfer_buffer_length,
 						xfer->seg_size);
 		if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL)
@@ -844,7 +838,7 @@ static void wa_seg_iso_pack_desc_cb(struct urb *urb)
 				wa_xfer_id(xfer), seg->index, urb->status);
 		if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS,
 			    EDC_ERROR_TIMEFRAME)){
-			dev_err(dev, "DTO: URB max acceptable errors exceeded, resetting device\n");
+			dev_err(dev, "iso xfer: URB max acceptable errors exceeded, resetting device\n");
 			wa_reset_all(wa);
 		}
 		if (seg->status != WA_SEG_ERROR) {
@@ -1108,7 +1102,7 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
 	const struct usb_endpoint_descriptor *dto_epd = xfer->wa->dto_epd;
 	struct wa_seg *seg;
 	size_t buf_itr, buf_size, buf_itr_size;
-	int xfer_isoc_frame_offset = 0;
+	int isoc_frame_offset = 0;
 
 	result = -ENOMEM;
 	xfer->seg = kcalloc(xfer->segs, sizeof(xfer->seg[0]), GFP_ATOMIC);
@@ -1121,10 +1115,14 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
 		size_t iso_pkt_descr_size = 0;
 		int seg_isoc_frame_count = 0, seg_isoc_size = 0;
 
+		/*
+		 * Adjust the size of the segment object to contain space for
+		 * the isoc packet descriptor buffer.
+		 */
 		if (usb_pipeisoc(xfer->urb->pipe)) {
 			seg_isoc_frame_count =
 				__wa_seg_calculate_isoc_frame_count(xfer,
-					xfer_isoc_frame_offset, &seg_isoc_size);
+					isoc_frame_offset, &seg_isoc_size);
 
 			iso_pkt_descr_size =
 				sizeof(struct wa_xfer_packet_info_hwaiso) +
@@ -1137,15 +1135,40 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
 		wa_seg_init(seg);
 		seg->xfer = xfer;
 		seg->index = cnt;
-		seg->isoc_frame_count = seg_isoc_frame_count;
-		seg->isoc_frame_offset = xfer_isoc_frame_offset;
-		seg->isoc_size = seg_isoc_size;
 		usb_fill_bulk_urb(&seg->tr_urb, usb_dev,
 				  usb_sndbulkpipe(usb_dev,
 						  dto_epd->bEndpointAddress),
 				  &seg->xfer_hdr, xfer_hdr_size,
 				  wa_seg_tr_cb, seg);
 		buf_itr_size = min(buf_size, xfer->seg_size);
+
+		if (usb_pipeisoc(xfer->urb->pipe)) {
+			seg->isoc_frame_count = seg_isoc_frame_count;
+			seg->isoc_frame_offset = isoc_frame_offset;
+			seg->isoc_size = seg_isoc_size;
+			/* iso packet descriptor. */
+			seg->isoc_pack_desc_urb =
+					usb_alloc_urb(0, GFP_ATOMIC);
+			if (seg->isoc_pack_desc_urb == NULL)
+				goto error_iso_pack_desc_alloc;
+			/*
+			 * The buffer for the isoc packet descriptor starts
+			 * after the transfer request header in the
+			 * segment object memory buffer.
+			 */
+			usb_fill_bulk_urb(
+				seg->isoc_pack_desc_urb, usb_dev,
+				usb_sndbulkpipe(usb_dev,
+					dto_epd->bEndpointAddress),
+				(void *)(&seg->xfer_hdr) +
+					xfer_hdr_size,
+				iso_pkt_descr_size,
+				wa_seg_iso_pack_desc_cb, seg);
+
+			/* adjust starting frame offset for next seg. */
+			isoc_frame_offset += seg_isoc_frame_count;
+		}
+
 		if (xfer->is_inbound == 0 && buf_size > 0) {
 			/* outbound data. */
 			seg->dto_urb = usb_alloc_urb(0, GFP_ATOMIC);
@@ -1158,25 +1181,6 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
 				NULL, 0, wa_seg_dto_cb, seg);
 
 			if (usb_pipeisoc(xfer->urb->pipe)) {
-				/* iso packet descriptor. */
-				seg->isoc_pack_desc_urb =
-						usb_alloc_urb(0, GFP_ATOMIC);
-				if (seg->isoc_pack_desc_urb == NULL)
-					goto error_iso_pack_desc_alloc;
-				/*
-				 * The buffer for the isoc packet descriptor
-				 * after the transfer request header in the
-				 * segment object memory buffer.
-				 */
-				usb_fill_bulk_urb(
-					seg->isoc_pack_desc_urb, usb_dev,
-					usb_sndbulkpipe(usb_dev,
-						dto_epd->bEndpointAddress),
-					(void *)(&seg->xfer_hdr) +
-						xfer_hdr_size,
-					iso_pkt_descr_size,
-					wa_seg_iso_pack_desc_cb, seg);
-
 				/*
 				 * Fill in the xfer buffer information for the
 				 * first isoc frame.  Subsequent frames in this
@@ -1184,9 +1188,7 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
 				 * DTO completion routine, if needed.
 				 */
 				__wa_populate_dto_urb_isoc(xfer, seg,
-					xfer_isoc_frame_offset);
-				/* adjust starting frame offset for next seg. */
-				xfer_isoc_frame_offset += seg_isoc_frame_count;
+					seg->isoc_frame_offset);
 			} else {
 				/* fill in the xfer buffer information. */
 				result = __wa_populate_dto_urb(xfer, seg,
@@ -1207,10 +1209,11 @@ static int __wa_xfer_setup_segs(struct wa_xfer *xfer, size_t xfer_hdr_size)
 	 * Use the fact that cnt is left at were it failed.  The remaining
 	 * segments will be cleaned up by wa_xfer_destroy.
 	 */
-error_iso_pack_desc_alloc:
 error_seg_outbound_populate:
 	usb_free_urb(xfer->seg[cnt]->dto_urb);
 error_dto_alloc:
+	usb_free_urb(xfer->seg[cnt]->isoc_pack_desc_urb);
+error_iso_pack_desc_alloc:
 	kfree(xfer->seg[cnt]);
 	xfer->seg[cnt] = NULL;
 error_seg_kmalloc:
@@ -1325,8 +1328,6 @@ static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
 	}
 	/* submit the isoc packet descriptor if present. */
 	if (seg->isoc_pack_desc_urb) {
-		struct wahc *wa = xfer->wa;
-
 		result = usb_submit_urb(seg->isoc_pack_desc_urb, GFP_ATOMIC);
 		seg->isoc_frame_index = 0;
 		if (result < 0) {
@@ -1334,23 +1335,24 @@ static int __wa_seg_submit(struct wa_rpipe *rpipe, struct wa_xfer *xfer,
 			       __func__, xfer, seg->index, result);
 			goto error_iso_pack_desc_submit;
 		}
-		/*
-		 * If this segment contains more than one isoc frame, hold
-		 * onto the dto resource until we send all frames.
-		 * Only applies to non-Alereon devices.
-		 */
-		if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
-			&& (seg->isoc_frame_count > 1))
-			*dto_done = 0;
 	}
 	/* submit the out data if this is an out request. */
 	if (seg->dto_urb) {
+		struct wahc *wa = xfer->wa;
 		result = usb_submit_urb(seg->dto_urb, GFP_ATOMIC);
 		if (result < 0) {
 			pr_err("%s: xfer %p#%u: DTO submit failed: %d\n",
 			       __func__, xfer, seg->index, result);
 			goto error_dto_submit;
 		}
+		/*
+		 * If this segment contains more than one isoc frame, hold
+		 * onto the dto resource until we send all frames.
+		 * Only applies to non-Alereon devices.
+		 */
+		if (((wa->quirks & WUSB_QUIRK_ALEREON_HWA_CONCAT_ISOC) == 0)
+			&& (seg->isoc_frame_count > 1))
+			*dto_done = 0;
 	}
 	seg->status = WA_SEG_SUBMITTED;
 	rpipe_avail_dec(rpipe);
@@ -2032,6 +2034,25 @@ static void wa_complete_remaining_xfer_segs(struct wa_xfer *xfer,
 	}
 }
 
+/* Populate the wa->buf_in_urb based on the current isoc transfer state. */
+static void __wa_populate_buf_in_urb_isoc(struct wahc *wa, struct wa_xfer *xfer,
+	struct wa_seg *seg, int curr_iso_frame)
+{
+	BUG_ON(wa->buf_in_urb->status == -EINPROGRESS);
+
+	/* this should always be 0 before a resubmit. */
+	wa->buf_in_urb->num_mapped_sgs	= 0;
+	wa->buf_in_urb->transfer_dma = xfer->urb->transfer_dma +
+		xfer->urb->iso_frame_desc[curr_iso_frame].offset;
+	wa->buf_in_urb->transfer_buffer_length =
+		xfer->urb->iso_frame_desc[curr_iso_frame].length;
+	wa->buf_in_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+	wa->buf_in_urb->transfer_buffer = NULL;
+	wa->buf_in_urb->sg = NULL;
+	wa->buf_in_urb->num_sgs = 0;
+	wa->buf_in_urb->context = seg;
+}
+
 /* Populate the wa->buf_in_urb based on the current transfer state. */
 static int wa_populate_buf_in_urb(struct wahc *wa, struct wa_xfer *xfer,
 	unsigned int seg_idx, unsigned int bytes_transferred)
@@ -2143,12 +2164,13 @@ static void wa_xfer_result_chew(struct wahc *wa, struct wa_xfer *xfer,
 	 */
 	if (xfer_result->bTransferSegment & 0x80)
 		wa_complete_remaining_xfer_segs(xfer, seg, WA_SEG_DONE);
-	if (usb_pipeisoc(xfer->urb->pipe)) {
+	if (usb_pipeisoc(xfer->urb->pipe)
+		&& (le32_to_cpu(xfer_result->dwNumOfPackets) > 0)) {
 		/* set up WA state to read the isoc packet status next. */
 		wa->dti_isoc_xfer_in_progress = wa_xfer_id(xfer);
 		wa->dti_isoc_xfer_seg = seg_idx;
 		wa->dti_state = WA_DTI_ISOC_PACKET_STATUS_PENDING;
-	} else if ((xfer->is_inbound)
+	} else if (xfer->is_inbound && !usb_pipeisoc(xfer->urb->pipe)
 			&& (bytes_transferred > 0)) {
 		/* IN data phase: read to buffer */
 		seg->status = WA_SEG_DTI_PENDING;
@@ -2241,7 +2263,7 @@ segment_aborted:
  *
  * inbound transfers: need to schedule a buf_in_urb read
  */
-static void wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
+static int wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
 {
 	struct device *dev = &wa->usb_iface->dev;
 	struct wa_xfer_packet_status_hwaiso *packet_status;
@@ -2250,8 +2272,8 @@ static void wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
 	unsigned long flags;
 	struct wa_seg *seg;
 	struct wa_rpipe *rpipe;
-	unsigned done = 0;
-	unsigned rpipe_ready = 0, seg_index;
+	unsigned done = 0, dti_busy = 0, data_frame_count = 0, seg_index;
+	unsigned first_frame_index = 0, rpipe_ready = 0;
 	int expected_size;
 
 	/* We have a xfer result buffer; check it */
@@ -2287,18 +2309,48 @@ static void wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
 			le16_to_cpu(packet_status->wLength));
 		goto error_bad_seg;
 	}
-	/* isoc packet status and lengths back xfer urb. */
+	/* write isoc packet status and lengths back to the xfer urb. */
 	status_array = packet_status->PacketStatus;
+	xfer->urb->start_frame =
+		wa->wusb->usb_hcd.driver->get_frame_number(&wa->wusb->usb_hcd);
 	for (seg_index = 0; seg_index < seg->isoc_frame_count; ++seg_index) {
-		xfer->urb->iso_frame_desc[seg->index].status =
+		struct usb_iso_packet_descriptor *iso_frame_desc =
+			xfer->urb->iso_frame_desc;
+		const int urb_frame_index =
+			seg->isoc_frame_offset + seg_index;
+
+		iso_frame_desc[urb_frame_index].status =
 			wa_xfer_status_to_errno(
 			le16_to_cpu(status_array[seg_index].PacketStatus));
-		xfer->urb->iso_frame_desc[seg->index].actual_length =
+		iso_frame_desc[urb_frame_index].actual_length =
 			le16_to_cpu(status_array[seg_index].PacketLength);
+		/* track the number of frames successfully transferred. */
+		if (iso_frame_desc[urb_frame_index].actual_length > 0) {
+			/* save the starting frame index for buf_in_urb. */
+			if (!data_frame_count)
+				first_frame_index = seg_index;
+			++data_frame_count;
+		}
 	}
 
-	if (!xfer->is_inbound) {
-		/* OUT transfer, complete it -- */
+	if (xfer->is_inbound && data_frame_count) {
+		int result;
+
+		seg->isoc_frame_index = first_frame_index;
+		/* submit a read URB for the first frame with data. */
+		__wa_populate_buf_in_urb_isoc(wa, xfer, seg,
+			seg->isoc_frame_index + seg->isoc_frame_offset);
+
+		result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
+		if (result < 0) {
+			dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
+				result);
+			wa_reset_all(wa);
+		} else if (data_frame_count > 1)
+			/* If we need to read multiple frames, set DTI busy. */
+			dti_busy = 1;
+	} else {
+		/* OUT transfer or no more IN data, complete it -- */
 		seg->status = WA_SEG_DONE;
 		xfer->segs_done++;
 		rpipe_ready = rpipe_avail_inc(rpipe);
@@ -2311,13 +2363,13 @@ static void wa_process_iso_packet_status(struct wahc *wa, struct urb *urb)
 	if (rpipe_ready)
 		wa_xfer_delayed_run(rpipe);
 	wa_xfer_put(xfer);
-	return;
+	return dti_busy;
 
 error_bad_seg:
 	spin_unlock_irqrestore(&xfer->lock, flags);
 	wa_xfer_put(xfer);
 error_parse_buffer:
-	return;
+	return dti_busy;
 }
 
 /*
@@ -2337,7 +2389,7 @@ static void wa_buf_in_cb(struct urb *urb)
 	struct wahc *wa;
 	struct device *dev;
 	struct wa_rpipe *rpipe;
-	unsigned rpipe_ready;
+	unsigned rpipe_ready = 0, seg_index, isoc_data_frame_count = 0;
 	unsigned long flags;
 	u8 done = 0;
 
@@ -2345,19 +2397,61 @@ static void wa_buf_in_cb(struct urb *urb)
 	kfree(urb->sg);
 	urb->sg = NULL;
 
+	spin_lock_irqsave(&xfer->lock, flags);
+	wa = xfer->wa;
+	dev = &wa->usb_iface->dev;
+
+	if (usb_pipeisoc(xfer->urb->pipe)) {
+		/*
+		 * Find the next isoc frame with data.  Bail out after
+		 * isoc_data_frame_count > 1 since there is no need to walk
+		 * the entire frame array.  We just need to know if
+		 * isoc_data_frame_count is 0, 1, or >1.
+		 */
+		seg_index = seg->isoc_frame_index + 1;
+		while ((seg_index < seg->isoc_frame_count)
+			&& (isoc_data_frame_count <= 1)) {
+			struct usb_iso_packet_descriptor *iso_frame_desc =
+				xfer->urb->iso_frame_desc;
+			const int urb_frame_index =
+				seg->isoc_frame_offset + seg_index;
+
+			if (iso_frame_desc[urb_frame_index].actual_length > 0) {
+				/* save the index of the next frame with data */
+				if (!isoc_data_frame_count)
+					seg->isoc_frame_index = seg_index;
+				++isoc_data_frame_count;
+			}
+			++seg_index;
+		}
+	}
+	spin_unlock_irqrestore(&xfer->lock, flags);
+
 	switch (urb->status) {
 	case 0:
 		spin_lock_irqsave(&xfer->lock, flags);
-		wa = xfer->wa;
-		dev = &wa->usb_iface->dev;
-		rpipe = xfer->ep->hcpriv;
-		dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
-			xfer, seg->index, (size_t)urb->actual_length);
-		seg->status = WA_SEG_DONE;
-		seg->result = urb->actual_length;
-		xfer->segs_done++;
-		rpipe_ready = rpipe_avail_inc(rpipe);
-		done = __wa_xfer_is_done(xfer);
+
+		seg->result += urb->actual_length;
+		if (isoc_data_frame_count > 0) {
+			int result;
+			/* submit a read URB for the first frame with data. */
+			__wa_populate_buf_in_urb_isoc(wa, xfer, seg,
+				seg->isoc_frame_index + seg->isoc_frame_offset);
+			result = usb_submit_urb(wa->buf_in_urb, GFP_ATOMIC);
+			if (result < 0) {
+				dev_err(dev, "DTI Error: Could not submit buf in URB (%d)",
+					result);
+				wa_reset_all(wa);
+			}
+		} else {
+			rpipe = xfer->ep->hcpriv;
+			seg->status = WA_SEG_DONE;
+			dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n",
+				xfer, seg->index, seg->result);
+			xfer->segs_done++;
+			rpipe_ready = rpipe_avail_inc(rpipe);
+			done = __wa_xfer_is_done(xfer);
+		}
 		spin_unlock_irqrestore(&xfer->lock, flags);
 		if (done)
 			wa_xfer_completion(xfer);
@@ -2369,8 +2463,6 @@ static void wa_buf_in_cb(struct urb *urb)
 		break;
 	default:		/* Other errors ... */
 		spin_lock_irqsave(&xfer->lock, flags);
-		wa = xfer->wa;
-		dev = &wa->usb_iface->dev;
 		rpipe = xfer->ep->hcpriv;
 		if (printk_ratelimit())
 			dev_err(dev, "xfer %p#%u: data in error %d\n",
@@ -2393,6 +2485,20 @@ static void wa_buf_in_cb(struct urb *urb)
 		if (rpipe_ready)
 			wa_xfer_delayed_run(rpipe);
 	}
+	/*
+	 * If we are in this callback and isoc_data_frame_count > 0, it means
+	 * that the dti_urb submission was delayed in wa_dti_cb.  Once
+	 * isoc_data_frame_count gets to 1, we can submit the deferred URB
+	 * since the last buf_in_urb was just submitted.
+	 */
+	if (isoc_data_frame_count == 1) {
+		int result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
+		if (result < 0) {
+			dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
+				result);
+			wa_reset_all(wa);
+		}
+	}
 }
 
 /*
@@ -2423,7 +2529,7 @@ static void wa_buf_in_cb(struct urb *urb)
  */
 static void wa_dti_cb(struct urb *urb)
 {
-	int result;
+	int result, dti_busy = 0;
 	struct wahc *wa = urb->context;
 	struct device *dev = &wa->usb_iface->dev;
 	u32 xfer_id;
@@ -2471,7 +2577,7 @@ static void wa_dti_cb(struct urb *urb)
 			wa_xfer_result_chew(wa, xfer, xfer_result);
 			wa_xfer_put(xfer);
 		} else if (wa->dti_state == WA_DTI_ISOC_PACKET_STATUS_PENDING) {
-			wa_process_iso_packet_status(wa, urb);
+			dti_busy = wa_process_iso_packet_status(wa, urb);
 		} else {
 			dev_err(dev, "DTI Error: unexpected EP state = %d\n",
 				wa->dti_state);
@@ -2494,12 +2600,15 @@ static void wa_dti_cb(struct urb *urb)
 			dev_err(dev, "DTI: URB error %d\n", urb->status);
 		break;
 	}
-	/* Resubmit the DTI URB */
-	result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
-	if (result < 0) {
-		dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
-			"resetting\n", result);
-		wa_reset_all(wa);
+
+	/* Resubmit the DTI URB if we are not busy processing isoc in frames. */
+	if (!dti_busy) {
+		result = usb_submit_urb(wa->dti_urb, GFP_ATOMIC);
+		if (result < 0) {
+			dev_err(dev, "DTI Error: Could not submit DTI URB (%d)\n",
+				result);
+			wa_reset_all(wa);
+		}
 	}
 out:
 	return;
@@ -2566,8 +2675,8 @@ void wa_handle_notif_xfer(struct wahc *wa, struct wa_notif_hdr *notif_hdr)
 		NULL, 0, wa_buf_in_cb, wa);
 	result = usb_submit_urb(wa->dti_urb, GFP_KERNEL);
 	if (result < 0) {
-		dev_err(dev, "DTI Error: Could not submit DTI URB (%d), "
-			"resetting\n", result);
+		dev_err(dev, "DTI Error: Could not submit DTI URB (%d) resetting\n",
+			result);
 		goto error_dti_urb_submit;
 	}
 out:
-- 
1.7.10.4

--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux