[RFC 2/2] xhci: refactor handle_cmd_completion() switch into separate function

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



This patch refactors the big switch statement in handle_cmd_completion()
into a separate function, called handle_cmd_completion2() (this name is
temporary, we will need a different name).

There have been declared some additional local variables, such as
cmd_trb, cmd_comp_code, cmd_type, add_flags and drop_flags, mainly
to reduce line length and code dublication.
Also, the variable ep_ring, that was assigned in the case TRB_CONFIG_EP
but never used, was removed.

Signed-off-by: Xenia Ragiadakou <burzalodowa@xxxxxxxxx>
---
 drivers/usb/host/xhci-ring.c | 158 +++++++++++++++++++++++--------------------
 1 file changed, 83 insertions(+), 75 deletions(-)

diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index ddbda35..02cd900 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1372,70 +1372,40 @@ static int handle_stopped_cmd_ring(struct xhci_hcd *xhci,
 	return cur_trb_is_good;
 }
 
-static void handle_cmd_completion(struct xhci_hcd *xhci,
-		struct xhci_event_cmd *event)
+static void handle_cmd_completion2(struct xhci_hcd *xhci, u32 cmd_comp_code,
+		union xhci_trb *cmd_trb, struct xhci_event_cmd *event)
 {
-	int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
-	u64 cmd_dma;
-	dma_addr_t cmd_dequeue_dma;
-	struct xhci_input_control_ctx *ctrl_ctx;
+	int slot_id;
+	u32 cmd_type;
 	struct xhci_virt_device *virt_dev;
+	struct xhci_input_control_ctx *ctrl_ctx;
 	unsigned int ep_index;
-	struct xhci_ring *ep_ring;
 	unsigned int ep_state;
+	u32 add_flags, drop_flags;
 
-	cmd_dma = le64_to_cpu(event->cmd_trb);
-	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
-			xhci->cmd_ring->dequeue);
-	/* Is the command ring deq ptr out of sync with the deq seg ptr? */
-	if (cmd_dequeue_dma == 0) {
-		xhci->error_bitmask |= 1 << 4;
-		return;
-	}
-	/* Does the DMA address match our internal dequeue pointer address? */
-	if (cmd_dma != (u64) cmd_dequeue_dma) {
-		xhci->error_bitmask |= 1 << 5;
-		return;
-	}
-
-	trace_xhci_cmd_completion(&xhci->cmd_ring->dequeue->generic,
-					(struct xhci_generic_trb *) event);
+	cmd_type = TRB_FIELD_TO_TYPE(le32_to_cpu(cmd_trb->generic.field[3]));
+	slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+	virt_dev = xhci->devs[slot_id];
 
-	if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) ||
-		(GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) {
-		/* If the return value is 0, we think the trb pointed by
-		 * command ring dequeue pointer is a good trb. The good
-		 * trb means we don't want to cancel the trb, but it have
-		 * been stopped by host. So we should handle it normally.
-		 * Otherwise, driver should invoke inc_deq() and return.
-		 */
-		if (handle_stopped_cmd_ring(xhci,
-				GET_COMP_CODE(le32_to_cpu(event->status)))) {
-			inc_deq(xhci, xhci->cmd_ring);
-			return;
-		}
-	}
+	switch (cmd_type) {
 
-	switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3])
-		& TRB_TYPE_BITMASK) {
-	case TRB_TYPE(TRB_ENABLE_SLOT):
-		if (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_SUCCESS)
+	case TRB_ENABLE_SLOT:
+		if (cmd_comp_code == COMP_SUCCESS)
 			xhci->slot_id = slot_id;
 		else
 			xhci->slot_id = 0;
 		complete(&xhci->addr_dev);
 		break;
-	case TRB_TYPE(TRB_DISABLE_SLOT):
-		if (xhci->devs[slot_id]) {
+	case TRB_DISABLE_SLOT:
+		if (virt_dev) {
 			if (xhci->quirks & XHCI_EP_LIMIT_QUIRK)
 				/* Delete default control endpoint resources */
 				xhci_free_device_endpoint_resources(xhci,
-						xhci->devs[slot_id], true);
+						virt_dev, true);
 			xhci_free_virt_device(xhci, slot_id);
 		}
 		break;
-	case TRB_TYPE(TRB_CONFIG_EP):
-		virt_dev = xhci->devs[slot_id];
+	case TRB_CONFIG_EP:
 		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
 			break;
 		/*
@@ -1446,14 +1416,15 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
 		 * If the command was for a halted endpoint, the xHCI driver
 		 * is not waiting on the configure endpoint command.
 		 */
-		ctrl_ctx = xhci_get_input_control_ctx(xhci,
-				virt_dev->in_ctx);
+		ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
 		if (!ctrl_ctx) {
 			xhci_warn(xhci, "Could not get input context, bad type.\n");
 			break;
 		}
+		add_flags = le32_to_cpu(ctrl_ctx->add_flags);
+		drop_flags = le32_to_cpu(ctrl_ctx->drop_flags);
 		/* Input ctx add_flags are the endpoint index plus one */
-		ep_index = xhci_last_valid_endpoint(le32_to_cpu(ctrl_ctx->add_flags)) - 1;
+		ep_index = xhci_last_valid_endpoint(add_flags) - 1;
 		/* A usb_set_interface() call directly after clearing a halted
 		 * condition may race on this quirky hardware.  Not worth
 		 * worrying about, since this is prototype hardware.  Not sure
@@ -1462,10 +1433,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
 		 */
 		if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
 				ep_index != (unsigned int) -1 &&
-		    le32_to_cpu(ctrl_ctx->add_flags) - SLOT_FLAG ==
-		    le32_to_cpu(ctrl_ctx->drop_flags)) {
-			ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
-			ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
+				add_flags - SLOT_FLAG == drop_flags) {
+			ep_state = virt_dev->eps[ep_index].ep_state;
 			if (!(ep_state & EP_HALTED))
 				goto bandwidth_change;
 			xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
@@ -1473,52 +1442,48 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
 					"last ep index = %d, state = %d",
 					ep_index, ep_state);
 			/* Clear internal halted state and restart ring(s) */
-			xhci->devs[slot_id]->eps[ep_index].ep_state &=
-				~EP_HALTED;
+			virt_dev->eps[ep_index].ep_state &= ~EP_HALTED;
 			ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
 			break;
 		}
 bandwidth_change:
 		xhci_dbg_trace(xhci,  trace_xhci_dbg_context_change,
 				"Completed config ep cmd");
-		xhci->devs[slot_id]->cmd_status =
-			GET_COMP_CODE(le32_to_cpu(event->status));
-		complete(&xhci->devs[slot_id]->cmd_completion);
+		virt_dev->cmd_status = cmd_comp_code;
+		complete(&virt_dev->cmd_completion);
 		break;
-	case TRB_TYPE(TRB_EVAL_CONTEXT):
-		virt_dev = xhci->devs[slot_id];
+	case TRB_EVAL_CONTEXT:
 		if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
 			break;
-		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
-		complete(&xhci->devs[slot_id]->cmd_completion);
+		virt_dev->cmd_status = cmd_comp_code;
+		complete(&virt_dev->cmd_completion);
 		break;
-	case TRB_TYPE(TRB_ADDR_DEV):
-		xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(le32_to_cpu(event->status));
+	case TRB_ADDR_DEV:
+		virt_dev->cmd_status = cmd_comp_code;
 		complete(&xhci->addr_dev);
 		break;
-	case TRB_TYPE(TRB_STOP_RING):
-		handle_stopped_endpoint(xhci, xhci->cmd_ring->dequeue, event);
+	case TRB_STOP_RING:
+		handle_stopped_endpoint(xhci, cmd_trb, event);
 		break;
-	case TRB_TYPE(TRB_SET_DEQ):
-		handle_set_deq_completion(xhci, event, xhci->cmd_ring->dequeue);
+	case TRB_SET_DEQ:
+		handle_set_deq_completion(xhci, event, cmd_trb);
 		break;
-	case TRB_TYPE(TRB_CMD_NOOP):
+	case TRB_CMD_NOOP:
 		break;
-	case TRB_TYPE(TRB_RESET_EP):
-		handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
+	case TRB_RESET_EP:
+		handle_reset_ep_completion(xhci, event, cmd_trb);
 		break;
-	case TRB_TYPE(TRB_RESET_DEV):
+	case TRB_RESET_DEV:
 		xhci_dbg(xhci, "Completed reset device command.\n");
 		slot_id = TRB_TO_SLOT_ID(
-			le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]));
-		virt_dev = xhci->devs[slot_id];
+			le32_to_cpu(cmd_trb->generic.field[3]));
 		if (virt_dev)
 			handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
 		else
 			xhci_warn(xhci, "Reset device command completion "
 					"for disabled slot %u\n", slot_id);
 		break;
-	case TRB_TYPE(TRB_NEC_GET_FW):
+	case TRB_NEC_GET_FW:
 		if (!(xhci->quirks & XHCI_NEC_HOST)) {
 			xhci->error_bitmask |= 1 << 6;
 			break;
@@ -1533,7 +1498,50 @@ bandwidth_change:
 		xhci->error_bitmask |= 1 << 6;
 		break;
 	}
+}
+
+static void handle_cmd_completion(struct xhci_hcd *xhci,
+		struct xhci_event_cmd *event)
+{
+	u64 cmd_dma;
+	dma_addr_t cmd_dequeue_dma;
+	union xhci_trb *cmd_trb;
+	u32 cmd_comp_code;
+
+	cmd_dma = le64_to_cpu(event->cmd_trb);
+	cmd_trb = xhci->cmd_ring->dequeue;
+	cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
+						cmd_trb);
+	/* Is the command ring deq ptr out of sync with the deq seg ptr? */
+	if (cmd_dequeue_dma == 0) {
+		xhci->error_bitmask |= 1 << 4;
+		return;
+	}
+	/* Does the DMA address match our internal dequeue pointer address? */
+	if (cmd_dma != (u64) cmd_dequeue_dma) {
+		xhci->error_bitmask |= 1 << 5;
+		return;
+	}
+
+	trace_xhci_cmd_completion(cmd_trb, (struct xhci_generic_trb *) event);
+
+	cmd_comp_code = GET_COMP_CODE(le32_to_cpu(event->status));
+	if (cmd_comp_code == COMP_CMD_ABORT || cmd_comp_code == COMP_CMD_STOP) {
+		/* If the return value is 0, we think the trb pointed by
+		 * command ring dequeue pointer is a good trb. The good
+		 * trb means we don't want to cancel the trb, but it have
+		 * been stopped by host. So we should handle it normally.
+		 * Otherwise, driver should invoke inc_deq() and return.
+		 */
+		if (handle_stopped_cmd_ring(xhci, cmd_comp_code))
+			goto update_ring;
+	}
+
+	handle_cmd_completion2(xhci, cmd_comp_code, cmd_trb, event);
+
+update_ring:
 	inc_deq(xhci, xhci->cmd_ring);
+	return;
 }
 
 static void handle_vendor_event(struct xhci_hcd *xhci,
-- 
1.8.3.2

--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html




[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux