[RFC PATCH 2/6] HCD files for the DWC2 driver

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



Signed-off-by: Paul Zimmerman <paulz@xxxxxxxxxxxx>
---
 drivers/usb/dwc2/hcd.c       | 3014 ++++++++++++++++++++++++++++++++++++++++++
 drivers/usb/dwc2/hcd.h       | 1134 ++++++++++++++++
 drivers/usb/dwc2/hcd_intr.c  | 2029 ++++++++++++++++++++++++++++
 drivers/usb/dwc2/hcd_queue.c |  749 +++++++++++
 4 files changed, 6926 insertions(+), 0 deletions(-)
 create mode 100644 drivers/usb/dwc2/hcd.c
 create mode 100644 drivers/usb/dwc2/hcd.h
 create mode 100644 drivers/usb/dwc2/hcd_intr.c
 create mode 100644 drivers/usb/dwc2/hcd_queue.c

diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
new file mode 100644
index 0000000..0aebc40
--- /dev/null
+++ b/drivers/usb/dwc2/hcd.c
@@ -0,0 +1,3014 @@
+/*
+ * hcd.c - DesignWare HS OTG Controller host-mode routines
+ *
+ * Copyright (C) 2004-2012 Synopsys, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file contains the core HCD code, and implements the Linux hc_driver
+ * API
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch11.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/ch9.h>
+
+#include "core.h"
+#include "hcd.h"
+
+#ifdef DEBUG
+static void dump_channel_info(struct dwc2_hcd *hcd, struct dwc2_qh *qh)
+{
+	struct dwc2_hc *hc = qh->channel;
+	struct list_head *item;
+	struct dwc2_qh *qh_item;
+	int num_channels = hcd->core_params->host_channels;
+	u32 hcchar;
+	u32 hcsplt;
+	u32 hctsiz;
+	u32 hcdma;
+	int i;
+
+	if (hc == NULL)
+		return;
+
+	hcchar = readl(hcd->regs + HCCHAR(hc->hc_num));
+	hcsplt = readl(hcd->regs + HCSPLT(hc->hc_num));
+	hctsiz = readl(hcd->regs + HCTSIZ(hc->hc_num));
+	hcdma = readl(hcd->regs + HCDMA(hc->hc_num));
+
+	dev_dbg(hcd->dev, "  Assigned to channel %p:\n", hc);
+	dev_dbg(hcd->dev, "    hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
+	dev_dbg(hcd->dev, "    hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz, hcdma);
+	dev_dbg(hcd->dev, "    dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+		hc->dev_addr, hc->ep_num, hc->ep_is_in);
+	dev_dbg(hcd->dev, "    ep_type: %d\n", hc->ep_type);
+	dev_dbg(hcd->dev, "    max_packet: %d\n", hc->max_packet);
+	dev_dbg(hcd->dev, "    data_pid_start: %d\n", hc->data_pid_start);
+	dev_dbg(hcd->dev, "    xfer_started: %d\n", hc->xfer_started);
+	dev_dbg(hcd->dev, "    halt_status: %d\n", hc->halt_status);
+	dev_dbg(hcd->dev, "    xfer_buff: %p\n", hc->xfer_buff);
+	dev_dbg(hcd->dev, "    xfer_len: %d\n", hc->xfer_len);
+	dev_dbg(hcd->dev, "    qh: %p\n", hc->qh);
+	dev_dbg(hcd->dev, "  NP inactive sched:\n");
+	list_for_each(item, &hcd->non_periodic_sched_inactive) {
+		qh_item = list_entry(item, struct dwc2_qh, qh_list_entry);
+		dev_dbg(hcd->dev, "    %p\n", qh_item);
+	}
+	dev_dbg(hcd->dev, "  NP active sched:\n");
+	list_for_each(item, &hcd->non_periodic_sched_active) {
+		qh_item = list_entry(item, struct dwc2_qh, qh_list_entry);
+		dev_dbg(hcd->dev, "    %p\n", qh_item);
+	}
+	dev_dbg(hcd->dev, "  Channels:\n");
+	for (i = 0; i < num_channels; i++) {
+		struct dwc2_hc *hc = hcd->hc_ptr_array[i];
+
+		dev_dbg(hcd->dev, "    %2d: %p\n", i, hc);
+	}
+}
+#endif /* DEBUG */
+
+/* Returns index of host channel to perform LPM transaction */
+static int hcd_get_hc_for_lpm_tran(struct dwc2_hcd *hcd, u8 devaddr)
+{
+	struct dwc2_hc *hc;
+	u32 hcchar;
+	u32 gintmsk;
+
+	if (list_empty(&hcd->free_hc_list)) {
+		dev_dbg(hcd->dev,
+			"No free channel to select for LPM transaction\n");
+		return -1;
+	}
+
+	hc = list_first_entry(&hcd->free_hc_list, struct dwc2_hc,
+			      hc_list_entry);
+
+	/* Mask host channel interrupts */
+	gintmsk = readl(hcd->regs + GINTMSK);
+	gintmsk &= ~GINTSTS_HChInt;
+	writel(gintmsk, hcd->regs + GINTMSK);
+
+	/* Fill fields that core needs for LPM transaction */
+	hcchar = (devaddr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK) |
+		 DWC2_EP_TYPE_CONTROL << HCCHAR_EPTYPE_SHIFT |
+		 64 << HCCHAR_MPS_SHIFT;
+	if (hc->speed == DWC2_EP_SPEED_LOW)
+		hcchar |= HCCHAR_LSPDDEV;
+	writel(hcchar, hcd->regs + HCCHAR(hc->hc_num));
+
+	/* Remove the host channel from the free list */
+	list_del_init(&hc->hc_list_entry);
+
+	dev_dbg(hcd->dev, "hcnum = %d devaddr = %d\n", hc->hc_num, devaddr);
+
+	return hc->hc_num;
+}
+
+/* Release hc after performing LPM transaction */
+static void hcd_free_hc_from_lpm(struct dwc2_hcd *hcd)
+{
+	struct dwc2_hc *hc;
+	u32 lpmcfg;
+	u8 hc_num;
+
+	lpmcfg = readl(hcd->regs + GLPMCFG);
+	hc_num = lpmcfg >> GLPMCFG_LPM_CHAN_INDEX_SHIFT &
+		 GLPMCFG_LPM_CHAN_INDEX_MASK >> GLPMCFG_LPM_CHAN_INDEX_SHIFT;
+
+	hc = hcd->hc_ptr_array[hc_num];
+
+	dev_dbg(hcd->dev, "Freeing channel %d after LPM\n", hc_num);
+	/* Return host channel to free list */
+	list_add_tail(&hc->hc_list_entry, &hcd->free_hc_list);
+}
+
+static int hcd_send_lpm(struct dwc2_hcd *hcd, u8 devaddr, u8 hird,
+			u8 bremotewake)
+{
+	u32 pcgctl;
+	u32 lpmcfg;
+	int channel;
+
+	dev_dbg(hcd->dev, "%s()\n", __func__);
+
+	channel = hcd_get_hc_for_lpm_tran(hcd, devaddr);
+	if (channel < 0)
+		return channel;
+
+	pcgctl = readl(hcd->regs + PCGCTL);
+	pcgctl |= PCGCTL_ENBL_SLEEP_GATING;
+	writel(pcgctl, hcd->regs + PCGCTL);
+
+	/* Read LPM config register */
+	lpmcfg = readl(hcd->regs + GLPMCFG);
+
+	/* Program LPM transaction fields */
+	lpmcfg &= ~(GLPMCFG_REM_WKUP_EN | GLPMCFG_HIRD_MASK |
+		    GLPMCFG_HIRD_THRES_MASK | GLPMCFG_LPM_CHAN_INDEX_MASK);
+	lpmcfg = hird << GLPMCFG_HIRD_SHIFT | 0x1c << GLPMCFG_HIRD_THRES_SHIFT |
+		channel << GLPMCFG_LPM_CHAN_INDEX_SHIFT | GLPMCFG_EN_UTMI_SLEEP;
+	if (bremotewake)
+		lpmcfg |= GLPMCFG_REM_WKUP_EN;
+
+	/* Program LPM config register */
+	writel(lpmcfg, hcd->regs + GLPMCFG);
+
+	/* Send LPM transaction */
+	lpmcfg |= GLPMCFG_SEND_LPM;
+	writel(lpmcfg, hcd->regs + GLPMCFG);
+
+	return 0;
+}
+
+/*
+ * Processes all the URBs in a single list of QHs. Completes them with
+ * -ETIMEDOUT and frees the QTD.
+ */
+static void kill_urbs_in_qh_list(struct dwc2_hcd *hcd,
+				 struct list_head *qh_list)
+{
+	struct list_head *qh_item;
+	struct list_head *qtd_item, *qtd_tmp;
+	struct dwc2_qh *qh;
+	struct dwc2_qtd *qtd;
+
+	list_for_each(qh_item, qh_list) {
+		qh = list_entry(qh_item, struct dwc2_qh, qh_list_entry);
+		list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
+			qtd = list_entry(qtd_item, struct dwc2_qtd,
+					 qtd_list_entry);
+			if (qtd->urb != NULL) {
+				dwc2_host_complete(hcd, qtd->urb->priv,
+						   qtd->urb, -ETIMEDOUT);
+				dwc2_hcd_qtd_remove_and_free(hcd, qtd, qh);
+			}
+		}
+	}
+}
+
+static void qh_list_free(struct dwc2_hcd *hcd, struct list_head *qh_list)
+{
+	struct list_head *item;
+	struct dwc2_qh *qh;
+	unsigned long flags;
+
+	if (!qh_list->next)
+		/* The list hasn't been initialized yet */
+		return;
+
+	/*
+	 * Hold spinlock here. Not needed in that case if below function is
+	 * being called from ISR.
+	 */
+	spin_lock_irqsave(&hcd->lock, flags);
+
+	/* Ensure there are no QTDs or URBs left */
+	kill_urbs_in_qh_list(hcd, qh_list);
+	spin_unlock_irqrestore(&hcd->lock, flags);
+
+	list_for_each(item, qh_list) {
+		qh = list_entry(item, struct dwc2_qh, qh_list_entry);
+		dwc2_hcd_qh_remove_and_free(hcd, qh);
+	}
+}
+
+/*
+ * Responds with an error status of -ETIMEDOUT to all URBs in the non-periodic
+ * and periodic schedules. The QTD associated with each URB is removed from
+ * the schedule and freed. This function may be called when a disconnect is
+ * detected or when the HCD is being stopped.
+ */
+static void kill_all_urbs(struct dwc2_hcd *hcd)
+{
+	kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_inactive);
+	kill_urbs_in_qh_list(hcd, &hcd->non_periodic_sched_active);
+	kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_inactive);
+	kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_ready);
+	kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_assigned);
+	kill_urbs_in_qh_list(hcd, &hcd->periodic_sched_queued);
+}
+
+/*
+ * Start the connection timer. An OTG host is required to display a
+ * message if the device does not connect within 10 seconds. The
+ * timer is deleted if a port connect interrupt occurs before the
+ * timer expires.
+ */
+static void start_connect_timer(struct dwc2_hcd *hcd)
+{
+	dev_dbg(hcd->dev, "Modifying conn_timer to expire in +%d msec", 10000);
+	mod_timer(&hcd->conn_timer, jiffies + msecs_to_jiffies(10000));
+}
+
+/*
+ * Connection timeout function. An OTG host is required to display a
+ * message if the device does not connect within 10 seconds.
+ */
+static void hcd_connect_timeout(unsigned long data)
+{
+	struct dwc2_hcd *hcd = (struct dwc2_hcd *)data;
+
+	dev_dbg(hcd->dev, "Connect Timeout\n");
+	dev_dbg(hcd->dev, "Device Not Connected/Responding\n");
+}
+
+static void del_timers(struct dwc2_hcd *hcd)
+{
+	del_timer(&hcd->conn_timer);
+	setup_timer(&hcd->conn_timer, hcd_connect_timeout, (unsigned long)hcd);
+}
+
+/**
+ * dwc2_hcd_start() - Starts the HCD when switching to Host mode
+ *
+ * @hcd: Pointer to struct dwc2_hcd
+ */
+void dwc2_hcd_start(struct dwc2_hcd *hcd)
+{
+	u32 hprt0;
+
+	if (hcd->op_state == B_HOST) {
+		/*
+		 * Reset the port. During a HNP mode switch the reset
+		 * needs to occur within 1ms and have a duration of at
+		 * least 50ms.
+		 */
+		hprt0 = dwc2_read_hprt0(hcd);
+		hprt0 |= HPRT0_RST;
+		writel(hprt0, hcd->regs + HPRT0);
+	}
+
+	queue_delayed_work(hcd->wq_otg, &hcd->start_work, msecs_to_jiffies(50));
+}
+
+/**
+ * dwc2_hcd_disconnect() - Handles disconnect of the HCD
+ *
+ * @hcd: Pointer to struct dwc2_hcd
+ */
+void dwc2_hcd_disconnect(struct dwc2_hcd *hcd)
+{
+	u32 intr;
+
+	/* Set status flags for the hub driver */
+	hcd->flags.b.port_connect_status_change = 1;
+	hcd->flags.b.port_connect_status = 0;
+
+	/*
+	 * Shutdown any transfers in process by clearing the Tx FIFO Empty
+	 * interrupt mask and status bits and disabling subsequent host
+	 * channel interrupts.
+	 */
+	intr = readl(hcd->regs + GINTMSK);
+	intr &= ~(GINTSTS_NPTxFEmp | GINTSTS_PTxFEmp | GINTSTS_HChInt);
+	writel(intr, hcd->regs + GINTMSK);
+	intr = readl(hcd->regs + GINTSTS);
+	intr |= GINTSTS_NPTxFEmp | GINTSTS_PTxFEmp | GINTSTS_HChInt;
+	writel(intr, hcd->regs + GINTSTS);
+
+	del_timers(hcd);
+
+	/*
+	 * Turn off the vbus power only if the core has transitioned to device
+	 * mode. If still in host mode, need to keep power on to detect a
+	 * reconnection.
+	 */
+	if (dwc2_is_device_mode(hcd)) {
+		if (hcd->op_state != A_SUSPEND) {
+			dev_dbg(hcd->dev, "Disconnect: PortPower off\n");
+			writel(0, hcd->regs + HPRT0);
+		}
+
+		dwc2_disable_host_interrupts(hcd);
+	}
+
+	/* Respond with an error status to all URBs in the schedule */
+	kill_all_urbs(hcd);
+
+	if (dwc2_is_host_mode(hcd)) {
+		/* Clean up any host channels that were in use */
+		int num_channels;
+		int i;
+		struct dwc2_hc *channel;
+		u32 hcchar;
+
+		num_channels = hcd->core_params->host_channels;
+
+		if (!hcd->dma_enable) {
+			/* Flush out any channel requests in slave mode */
+			for (i = 0; i < num_channels; i++) {
+				channel = hcd->hc_ptr_array[i];
+				if (list_empty(&channel->hc_list_entry)) {
+					hcchar = readl(hcd->regs + HCCHAR(i));
+					if (hcchar & HCCHAR_CHENA) {
+						hcchar &= ~(HCCHAR_CHENA |
+							    HCCHAR_EPDIR);
+						hcchar |= HCCHAR_CHDIS;
+						writel(hcchar, hcd->regs +
+						       HCCHAR(i));
+					}
+				}
+			}
+		}
+
+		for (i = 0; i < num_channels; i++) {
+			channel = hcd->hc_ptr_array[i];
+			if (list_empty(&channel->hc_list_entry)) {
+				hcchar = readl(hcd->regs + HCCHAR(i));
+				if (hcchar & HCCHAR_CHENA) {
+					/* Halt the channel */
+					hcchar |= HCCHAR_CHDIS;
+					writel(hcchar, hcd->regs + HCCHAR(i));
+				}
+
+				dwc2_hc_cleanup(hcd, channel);
+				list_add_tail(&channel->hc_list_entry,
+					      &hcd->free_hc_list);
+				/*
+				 * Added for Descriptor DMA to prevent channel
+				 * double cleanup in release_channel_ddma(),
+				 * which is called from ep_disable when device
+				 * disconnects
+				 */
+				channel->qh = NULL;
+			}
+		}
+	}
+
+	dwc2_host_disconnect(hcd);
+}
+
+/**
+ * dwc2_hcd_session_start() - Handles session start request
+ *
+ * @hcd: Pointer to struct dwc2_hcd
+ */
+void dwc2_hcd_session_start(struct dwc2_hcd *hcd)
+{
+	dev_dbg(hcd->dev, "%s(%p)\n", __func__, hcd);
+	start_connect_timer(hcd);
+}
+
+/**
+ * dwc2_hcd_rem_wakeup() - Handles Remote Wakeup
+ *
+ * @hcd: Pointer to struct dwc2_hcd
+ */
+void dwc2_hcd_rem_wakeup(struct dwc2_hcd *hcd)
+{
+	if (hcd->lx_state == DWC2_L2)
+		hcd->flags.b.port_suspend_change = 1;
+	else
+		hcd->flags.b.port_l1_change = 1;
+}
+
+/**
+ * dwc2_hcd_sleep() - Handles sleep of HCD
+ *
+ * @hcd: Pointer to struct dwc2_hcd
+ */
+void dwc2_hcd_sleep(struct dwc2_hcd *hcd)
+{
+	hcd_free_hc_from_lpm(hcd);
+}
+
+/**
+ * dwc2_hcd_stop() - Halts the DWC_otg host mode operations in a clean manner
+ *
+ * @hcd: Pointer to struct dwc2_hcd
+ */
+void dwc2_hcd_stop(struct dwc2_hcd *hcd)
+{
+	dev_dbg(hcd->dev, "DWC OTG HCD STOP\n");
+
+	/*
+	 * The root hub should be disconnected before this function is called.
+	 * The disconnect will clear the QTD lists (via ..._hcd_urb_dequeue)
+	 * and the QH lists (via ..._hcd_endpoint_disable).
+	 */
+
+	/* Turn off all host-specific interrupts */
+	dwc2_disable_host_interrupts(hcd);
+
+	/* Turn off the vbus power */
+	dev_dbg(hcd->dev, "PortPower off\n");
+	writel(0, hcd->regs + HPRT0);
+	mdelay(1);
+}
+
+int dwc2_hcd_urb_enqueue(struct dwc2_hcd *hcd, struct dwc2_hcd_urb *urb,
+			 void **ep_handle, gfp_t mem_flags)
+{
+	struct dwc2_qtd *qtd;
+	unsigned long flags;
+	u32 intr_mask;
+	int retval;
+
+	if (!hcd->flags.b.port_connect_status) {
+		/* No longer connected */
+		dev_err(hcd->dev, "Not connected\n");
+		return -ENODEV;
+	}
+
+	qtd = kzalloc(sizeof(*qtd), mem_flags);
+	if (!qtd) {
+		dev_err(hcd->dev,
+			"DWC OTG HCD URB Enqueue failed creating QTD\n");
+		return -ENOMEM;
+	}
+
+	dwc2_hcd_qtd_init(qtd, urb);
+	retval = dwc2_hcd_qtd_add(qtd, hcd, (struct dwc2_qh **)ep_handle,
+				  mem_flags);
+	if (retval < 0) {
+		dev_err(hcd->dev,
+			"DWC OTG HCD URB Enqueue failed adding QTD. Error status %d\n",
+			retval);
+		kfree(qtd);
+		return retval;
+	} else {
+		qtd->qh = *ep_handle;
+	}
+
+	intr_mask = readl(hcd->regs + GINTMSK);
+	if (!(intr_mask & GINTSTS_SOF) && retval == 0) {
+		enum dwc2_transaction_type tr_type;
+
+		if (qtd->qh->ep_type == USB_ENDPOINT_XFER_BULK &&
+		    !(qtd->urb->flags & URB_GIVEBACK_ASAP))
+			/*
+			 * Do not schedule SG transactions until qtd has
+			 * URB_GIVEBACK_ASAP set
+			 */
+			return 0;
+		spin_lock_irqsave(&hcd->lock, flags);
+		tr_type = dwc2_hcd_select_transactions(hcd);
+		if (tr_type != DWC2_TRANSACTION_NONE)
+			dwc2_hcd_queue_transactions(hcd, tr_type);
+		spin_unlock_irqrestore(&hcd->lock, flags);
+	}
+
+	return retval;
+}
+
+int dwc2_hcd_urb_dequeue(struct dwc2_hcd *hcd, struct dwc2_hcd_urb *urb)
+{
+	struct dwc2_qh *qh;
+	struct dwc2_qtd *urb_qtd;
+
+	urb_qtd = urb->qtd;
+	if (!urb_qtd) {
+		dev_dbg(hcd->dev, "## Urb QTD is NULL ##\n");
+		return 1;
+	}
+	qh = urb_qtd->qh;
+	if (!qh) {
+		dev_dbg(hcd->dev, "## Urb QTD QH is NULL ##\n");
+		return 1;
+	}
+
+#ifdef DEBUG
+	if (urb_qtd->in_process)
+		dump_channel_info(hcd, qh);
+#endif
+
+	if (urb_qtd->in_process && qh->channel) {
+		/* The QTD is in process (it has been assigned to a channel) */
+		if (hcd->flags.b.port_connect_status)
+			/*
+			 * If still connected (i.e. in host mode), halt the
+			 * channel so it can be used for other transfers. If
+			 * no longer connected, the host registers can't be
+			 * written to halt the channel since the core is in
+			 * device mode.
+			 */
+			dwc2_hc_halt(hcd, qh->channel,
+				     DWC2_HC_XFER_URB_DEQUEUE);
+	}
+
+	/*
+	 * Free the QTD and clean up the associated QH. Leave the QH in the
+	 * schedule if it has any remaining QTDs.
+	 */
+	if (!hcd->dma_desc_enable) {
+		u8 b = urb_qtd->in_process;
+
+		dwc2_hcd_qtd_remove_and_free(hcd, urb_qtd, qh);
+		if (b) {
+			dwc2_hcd_qh_deactivate(hcd, qh, 0);
+			qh->channel = NULL;
+		} else if (list_empty(&qh->qtd_list)) {
+			dwc2_hcd_qh_remove(hcd, qh);
+		}
+	} else {
+		dwc2_hcd_qtd_remove_and_free(hcd, urb_qtd, qh);
+	}
+
+	return 0;
+}
+
+int dwc2_hcd_endpoint_disable(struct dwc2_hcd *hcd,
+			      struct usb_host_endpoint *ep, int retry)
+{
+	struct dwc2_qh *qh;
+	unsigned long flags;
+
+	dev_dbg(hcd->dev, "%s(%p,%p)\n", __func__, hcd, ep);
+
+	spin_lock_irqsave(&hcd->lock, flags);
+
+	qh = ep->hcpriv;
+	if (!qh) {
+		spin_unlock_irqrestore(&hcd->lock, flags);
+		return -EINVAL;
+	}
+
+	while (!list_empty(&qh->qtd_list) && retry--) {
+		if (retry == 0) {
+			ep->hcpriv = NULL;
+			spin_unlock_irqrestore(&hcd->lock, flags);
+			dev_err(hcd->dev,
+				"## timeout in dwc2_hcd_endpoint_disable() ##\n");
+			return -EBUSY;
+		}
+		spin_unlock_irqrestore(&hcd->lock, flags);
+		msleep(20);
+		spin_lock_irqsave(&hcd->lock, flags);
+	}
+
+	dwc2_hcd_qh_remove(hcd, qh);
+	ep->hcpriv = NULL;
+	spin_unlock_irqrestore(&hcd->lock, flags);
+
+	/*
+	 * Split dwc2_hcd_qh_remove_and_free() into qh_remove
+	 * and qh_free to prevent stack dump on dma_free with
+	 * irq_disabled (spinlock_irqsave) in dwc2_hcd_desc_list_free()
+	 * and dwc2_hcd_frame_list_alloc()
+	 */
+	dwc2_hcd_qh_free(hcd, qh);
+
+	return 0;
+}
+
+int dwc2_hcd_endpoint_reset(struct dwc2_hcd *hcd, struct usb_host_endpoint *ep)
+{
+	struct dwc2_qh *qh = ep->hcpriv;
+
+	if (!qh)
+		return -EINVAL;
+
+	qh->data_toggle = DWC2_HC_PID_DATA0;
+
+	return 0;
+}
+
+/*
+ * Initializes dynamic portions of the DWC_otg HCD state
+ */
+static void hcd_reinit(struct dwc2_hcd *hcd)
+{
+	struct list_head *channel_item, *channel_tmp;
+	struct dwc2_hc *channel;
+	int num_channels;
+	int i;
+
+	hcd->flags.d32 = 0;
+
+	hcd->non_periodic_qh_ptr = &hcd->non_periodic_sched_active;
+	hcd->non_periodic_channels = 0;
+	hcd->periodic_channels = 0;
+
+	/*
+	 * Put all channels in the free channel list and clean up channel
+	 * states
+	 */
+	list_for_each_safe(channel_item, channel_tmp, &hcd->free_hc_list) {
+		channel = list_entry(channel_item, struct dwc2_hc,
+				     hc_list_entry);
+		list_del_init(&channel->hc_list_entry);
+	}
+
+	num_channels = hcd->core_params->host_channels;
+	for (i = 0; i < num_channels; i++) {
+		channel = hcd->hc_ptr_array[i];
+		list_add_tail(&channel->hc_list_entry, &hcd->free_hc_list);
+		dwc2_hc_cleanup(hcd, channel);
+	}
+
+	/* Initialize the DWC core for host mode operation */
+	dwc2_core_host_init(hcd);
+}
+
+/**
+ * assign_and_init_hc() - Assigns transactions from a QTD to a free host channel
+ * and initializes the host channel to perform the transactions. The host
+ * channel is removed from the free list.
+ *
+ * @hcd: The HCD state structure
+ * @qh:  Transactions from the first QTD for this QH are selected and assigned
+ *       to a free host channel
+ */
+static void assign_and_init_hc(struct dwc2_hcd *hcd, struct dwc2_qh *qh)
+{
+	struct dwc2_hcd_iso_packet_desc *frame_desc;
+	struct dwc2_hcd_urb *urb;
+	struct dwc2_qtd *qtd;
+	struct dwc2_hc *hc;
+	void *ptr = NULL;
+
+	dev_dbg(hcd->dev, "%s(%p,%p)\n", __func__, hcd, qh);
+
+	hc = list_first_entry(&hcd->free_hc_list, struct dwc2_hc,
+			      hc_list_entry);
+
+	/* Remove the host channel from the free list */
+	list_del_init(&hc->hc_list_entry);
+
+	qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
+	urb = qtd->urb;
+	qh->channel = hc;
+	qtd->in_process = 1;
+
+	/*
+	 * Use usb_pipedevice to determine device address. This address is
+	 * 0 before the SET_ADDRESS command and the correct address afterward.
+	 */
+	hc->dev_addr = dwc2_hcd_get_dev_addr(&urb->pipe_info);
+	hc->ep_num = dwc2_hcd_get_ep_num(&urb->pipe_info);
+	hc->speed = qh->dev_speed;
+	hc->max_packet = dwc2_max_packet(qh->maxp);
+
+	hc->xfer_started = 0;
+	hc->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
+	hc->error_state = (qtd->error_count > 0);
+	hc->halt_on_queue = 0;
+	hc->halt_pending = 0;
+	hc->requests = 0;
+
+	/*
+	 * The following values may be modified in the transfer type section
+	 * below. The xfer_len value may be reduced when the transfer is
+	 * started to accommodate the max widths of the XferSize and PktCnt
+	 * fields in the HCTSIZn register.
+	 */
+
+	hc->ep_is_in = (dwc2_hcd_is_pipe_in(&urb->pipe_info) != 0);
+	if (hc->ep_is_in)
+		hc->do_ping = 0;
+	else
+		hc->do_ping = qh->ping_state;
+
+	hc->data_pid_start = qh->data_toggle;
+	hc->multi_count = 1;
+
+	if (hcd->dma_enable) {
+		hc->xfer_buff = (u8 *)urb->dma + urb->actual_length;
+
+		/* For non-dword aligned case */
+		if (((unsigned long)hc->xfer_buff & 0x3) &&
+		    !hcd->dma_desc_enable)
+			ptr = (u8 *)urb->buf + urb->actual_length;
+	} else {
+		hc->xfer_buff = (u8 *)urb->buf + urb->actual_length;
+	}
+	hc->xfer_len = urb->length - urb->actual_length;
+	hc->xfer_count = 0;
+
+	/* Set the split attributes */
+	hc->do_split = 0;
+	if (qh->do_split) {
+		u32 hub_addr, port_addr;
+
+		hc->do_split = 1;
+		hc->xact_pos = qtd->isoc_split_pos;
+		hc->complete_split = qtd->complete_split;
+		dwc2_host_hub_info(hcd, urb->priv, &hub_addr, &port_addr);
+		hc->hub_addr = (u8)hub_addr;
+		hc->port_addr = (u8)port_addr;
+	}
+
+	switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		hc->ep_type = DWC2_EP_TYPE_CONTROL;
+		switch (qtd->control_phase) {
+		case DWC2_CONTROL_SETUP:
+			dev_dbg(hcd->dev, "  Control setup transaction\n");
+			hc->do_ping = 0;
+			hc->ep_is_in = 0;
+			hc->data_pid_start = DWC2_HC_PID_SETUP;
+			if (hcd->dma_enable)
+				hc->xfer_buff = (u8 *)urb->setup_dma;
+			else
+				hc->xfer_buff = (u8 *)urb->setup_packet;
+			hc->xfer_len = 8;
+			ptr = NULL;
+			break;
+		case DWC2_CONTROL_DATA:
+			dev_dbg(hcd->dev, "  Control data transaction\n");
+			hc->data_pid_start = qtd->data_toggle;
+			break;
+		case DWC2_CONTROL_STATUS:
+			/*
+			 * Direction is opposite of data direction or IN if no
+			 * data
+			 */
+			dev_dbg(hcd->dev, "  Control status transaction\n");
+			if (urb->length == 0)
+				hc->ep_is_in = 1;
+			else
+				hc->ep_is_in =
+					dwc2_hcd_is_pipe_out(&urb->pipe_info);
+			if (hc->ep_is_in)
+				hc->do_ping = 0;
+			hc->data_pid_start = DWC2_HC_PID_DATA1;
+			hc->xfer_len = 0;
+			if (hcd->dma_enable)
+				hc->xfer_buff = (u8 *)hcd->status_buf_dma;
+			else
+				hc->xfer_buff = (u8 *)hcd->status_buf;
+			ptr = NULL;
+			break;
+		}
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		hc->ep_type = DWC2_EP_TYPE_BULK;
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		hc->ep_type = DWC2_EP_TYPE_INTR;
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		hc->ep_type = DWC2_EP_TYPE_ISOC;
+
+		if (hcd->dma_desc_enable)
+			break;
+
+		frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
+		frame_desc->status = 0;
+
+		if (hcd->dma_enable)
+			hc->xfer_buff = (u8 *)urb->dma;
+		else
+			hc->xfer_buff = (u8 *)urb->buf;
+		hc->xfer_buff += frame_desc->offset + qtd->isoc_split_offset;
+		hc->xfer_len = frame_desc->length - qtd->isoc_split_offset;
+
+		/* For non-dword aligned buffers */
+		if (((unsigned long)hc->xfer_buff & 0x3) && hcd->dma_enable)
+			ptr = (u8 *)urb->buf + frame_desc->offset +
+					qtd->isoc_split_offset;
+		else
+			ptr = NULL;
+
+		if (hc->xact_pos == DWC_HCSPLT_XACTPOS_ALL) {
+			if (hc->xfer_len <= 188)
+				hc->xact_pos = DWC_HCSPLT_XACTPOS_ALL;
+			else
+				hc->xact_pos = DWC_HCSPLT_XACTPOS_BEGIN;
+		}
+		break;
+	}
+
+	/* Non DWORD-aligned buffer case */
+	if (ptr) {
+		u32 buf_size;
+
+		dev_dbg(hcd->dev, "Non-aligned buffer\n");
+		if (hc->ep_type != DWC2_EP_TYPE_ISOC)
+			buf_size = hcd->core_params->max_transfer_size;
+		else
+			buf_size = 4096;
+		if (!qh->dw_align_buf) {
+			qh->dw_align_buf = dma_alloc_coherent(hcd->dev,
+						buf_size, &qh->dw_align_buf_dma,
+						GFP_ATOMIC);
+			if (!qh->dw_align_buf) {
+				dev_err(hcd->dev,
+					"%s: Failed to allocate memory to handle non-dword aligned buffer\n",
+					__func__);
+				return;
+			}
+		}
+		if (!hc->ep_is_in)
+			memcpy(qh->dw_align_buf, ptr, hc->xfer_len);
+		hc->align_buff = qh->dw_align_buf_dma;
+	} else {
+		hc->align_buff = 0;
+	}
+
+	if (hc->ep_type == DWC2_EP_TYPE_INTR ||
+	    hc->ep_type == DWC2_EP_TYPE_ISOC) {
+		/*
+		 * This value may be modified when the transfer is started to
+		 * reflect the actual transfer length
+		 */
+		hc->multi_count = dwc2_hb_mult(qh->maxp);
+	}
+
+	if (hcd->dma_desc_enable)
+		hc->desc_list_addr = qh->desc_list_dma;
+
+	dwc2_hc_init(hcd, hc);
+	hc->qh = qh;
+}
+
+/**
+ * dwc2_hcd_select_transactions() - Selects transactions from the HCD transfer
+ * schedule and assigns them to available host channels. Called from the HCD
+ * interrupt handler functions.
+ *
+ * @hcd: The HCD state structure
+ *
+ * Return: The types of new transactions that were assigned to host channels
+ */
+enum dwc2_transaction_type dwc2_hcd_select_transactions(struct dwc2_hcd *hcd)
+{
+	enum dwc2_transaction_type ret_val = DWC2_TRANSACTION_NONE;
+	struct list_head *qh_ptr;
+	struct list_head *qh_tmp;
+	struct dwc2_qh *qh;
+	int num_channels;
+
+#ifdef DEBUG_SOF
+	dev_dbg(hcd->dev, "  Select Transactions\n");
+#endif
+
+	/* Process entries in the periodic ready list */
+	list_for_each_safe(qh_ptr, qh_tmp, &hcd->periodic_sched_ready) {
+		if (list_empty(&hcd->free_hc_list))
+			break;
+		qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
+		assign_and_init_hc(hcd, qh);
+
+		/*
+		 * Move the QH from the periodic ready schedule to the
+		 * periodic assigned schedule
+		 */
+		list_move(&qh->qh_list_entry, &hcd->periodic_sched_assigned);
+		ret_val = DWC2_TRANSACTION_PERIODIC;
+	}
+
+	/*
+	 * Process entries in the inactive portion of the non-periodic
+	 * schedule. Some free host channels may not be used if they are
+	 * reserved for periodic transfers.
+	 */
+	num_channels = hcd->core_params->host_channels;
+	list_for_each_safe(qh_ptr, qh_tmp, &hcd->non_periodic_sched_inactive) {
+		if (hcd->non_periodic_channels >= num_channels -
+							hcd->periodic_channels)
+			break;
+		if (list_empty(&hcd->free_hc_list))
+			break;
+		qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
+		assign_and_init_hc(hcd, qh);
+
+		/*
+		 * Move the QH from the non-periodic inactive schedule to the
+		 * non-periodic active schedule
+		 */
+		list_move(&qh->qh_list_entry, &hcd->non_periodic_sched_active);
+
+		if (ret_val == DWC2_TRANSACTION_NONE)
+			ret_val = DWC2_TRANSACTION_NON_PERIODIC;
+		else
+			ret_val = DWC2_TRANSACTION_ALL;
+
+		hcd->non_periodic_channels++;
+	}
+
+	return ret_val;
+}
+
+/**
+ * queue_transaction() - Attempts to queue a single transaction request for
+ * a host channel associated with either a periodic or non-periodic transfer
+ *
+ * @hcd: The HCD state structure
+ * @hc:  Host channel descriptor associated with either a periodic or
+ *       non-periodic transfer
+ * @fifo_dwords_avail: Number of DWORDs available in the periodic Tx FIFO
+ *                     for periodic transfers or the non-periodic Tx FIFO
+ *                     for non-periodic transfers
+ *
+ * Return: 1 if a request is queued and more requests may be needed to
+ * complete the transfer, 0 if no more requests are required for this
+ * transfer, -1 if there is insufficient space in the Tx FIFO
+ *
+ * This function assumes that there is space available in the appropriate
+ * request queue. For an OUT transfer or SETUP transaction in Slave mode,
+ * it checks whether space is available in the appropriate Tx FIFO.
+ */
+static int queue_transaction(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+			     u16 fifo_dwords_avail)
+{
+	int retval = 0;
+
+	if (hcd->dma_enable) {
+		if (hcd->dma_desc_enable) {
+			if (!hc->xfer_started ||
+			    hc->ep_type == DWC2_EP_TYPE_ISOC) {
+				dwc2_hcd_start_xfer_ddma(hcd, hc->qh);
+				hc->qh->ping_state = 0;
+			}
+		} else if (!hc->xfer_started) {
+			dwc2_hc_start_transfer(hcd, hc);
+			hc->qh->ping_state = 0;
+		}
+	} else if (hc->halt_pending) {
+		/* Don't queue a request if the channel has been halted */
+	} else if (hc->halt_on_queue) {
+		dwc2_hc_halt(hcd, hc, hc->halt_status);
+	} else if (hc->do_ping) {
+		if (!hc->xfer_started)
+			dwc2_hc_start_transfer(hcd, hc);
+	} else if (!hc->ep_is_in || hc->data_pid_start == DWC2_HC_PID_SETUP) {
+		if ((fifo_dwords_avail * 4) >= hc->max_packet) {
+			if (!hc->xfer_started) {
+				dwc2_hc_start_transfer(hcd, hc);
+				retval = 1;
+			} else {
+				retval = dwc2_hc_continue_transfer(hcd, hc);
+			}
+		} else {
+			retval = -1;
+		}
+	} else {
+		if (!hc->xfer_started) {
+			dwc2_hc_start_transfer(hcd, hc);
+			retval = 1;
+		} else {
+			retval = dwc2_hc_continue_transfer(hcd, hc);
+		}
+	}
+
+	return retval;
+}
+
+/*
+ * Processes periodic channels for the next frame and queues transactions for
+ * these channels to the DWC_otg controller. After queueing transactions, the
+ * Periodic Tx FIFO Empty interrupt is enabled if there are more transactions
+ * to queue as Periodic Tx FIFO or request queue space becomes available.
+ * Otherwise, the Periodic Tx FIFO Empty interrupt is disabled.
+ */
+static void process_periodic_channels(struct dwc2_hcd *hcd)
+{
+	struct list_head *qh_ptr;
+	struct dwc2_qh *qh;
+	u32 tx_status;
+	u32 fspcavail;
+	u32 gintmsk;
+	int status;
+	int no_queue_space = 0;
+	int no_fifo_space = 0;
+#ifdef DEBUG
+	u32 qspcavail;
+#endif
+
+	dev_dbg(hcd->dev, "Queue periodic transactions\n");
+#ifdef DEBUG
+	tx_status = readl(hcd->regs + HPTXSTS);
+	qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
+		    TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
+	fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+		    TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+	dev_dbg(hcd->dev, "  P Tx Req Queue Space Avail (before queue): %d\n",
+		qspcavail);
+	dev_dbg(hcd->dev, "  P Tx FIFO Space Avail (before queue): %d\n",
+		fspcavail);
+#endif
+
+	qh_ptr = hcd->periodic_sched_assigned.next;
+	while (!list_is_last(qh_ptr, &hcd->periodic_sched_assigned)) {
+		tx_status = readl(hcd->regs + HPTXSTS);
+		if ((tx_status & TXSTS_QSPCAVAIL_MASK) == 0) {
+			no_queue_space = 1;
+			break;
+		}
+
+		qh = list_entry(qh_ptr, struct dwc2_qh, qh_list_entry);
+
+		/*
+		 * Set a flag if we're queuing high-bandwidth in slave mode.
+		 * The flag prevents any halts to get into the request queue in
+		 * the middle of multiple high-bandwidth packets getting queued.
+		 */
+		if (!hcd->dma_enable && qh->channel->multi_count > 1)
+			hcd->queuing_high_bandwidth = 1;
+
+		fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+			    TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+		status = queue_transaction(hcd, qh->channel, fspcavail);
+		if (status < 0) {
+			no_fifo_space = 1;
+			break;
+		}
+
+		/*
+		 * In Slave mode, stay on the current transfer until there is
+		 * nothing more to do or the high-bandwidth request count is
+		 * reached. In DMA mode, only need to queue one request. The
+		 * controller automatically handles multiple packets for
+		 * high-bandwidth transfers.
+		 */
+		if (hcd->dma_enable || status == 0 ||
+		    qh->channel->requests == qh->channel->multi_count) {
+			qh_ptr = qh_ptr->next;
+			/*
+			 * Move the QH from the periodic assigned schedule to
+			 * the periodic queued schedule
+			 */
+			list_move(&qh->qh_list_entry,
+				  &hcd->periodic_sched_queued);
+
+			/* done queuing high bandwidth */
+			hcd->queuing_high_bandwidth = 0;
+		}
+	}
+
+	if (!hcd->dma_enable) {
+#ifdef DEBUG
+		tx_status = readl(hcd->regs + HPTXSTS);
+		qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
+			    TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
+		fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+			    TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+		dev_dbg(hcd->dev,
+			"  P Tx Req Queue Space Avail (after queue): %d\n",
+			qspcavail);
+		dev_dbg(hcd->dev, "  P Tx FIFO Space Avail (after queue): %d\n",
+			fspcavail);
+#endif
+		if (!list_empty(&hcd->periodic_sched_assigned) ||
+		    no_queue_space || no_fifo_space) {
+			/*
+			 * May need to queue more transactions as the request
+			 * queue or Tx FIFO empties. Enable the periodic Tx
+			 * FIFO empty interrupt. (Always use the half-empty
+			 * level to ensure that new requests are loaded as
+			 * soon as possible.)
+			 */
+			gintmsk = readl(hcd->regs + GINTMSK);
+			gintmsk |= GINTSTS_PTxFEmp;
+			writel(gintmsk, hcd->regs + GINTMSK);
+		} else {
+			/*
+			 * Disable the Tx FIFO empty interrupt since there are
+			 * no more transactions that need to be queued right
+			 * now. This function is called from interrupt
+			 * handlers to queue more transactions as transfer
+			 * states change.
+			 */
+			gintmsk = readl(hcd->regs + GINTMSK);
+			gintmsk &= ~GINTSTS_PTxFEmp;
+			writel(gintmsk, hcd->regs + GINTMSK);
+		}
+	}
+}
+
+/*
+ * Processes active non-periodic channels and queues transactions for these
+ * channels to the DWC_otg controller. After queueing transactions, the NP Tx
+ * FIFO Empty interrupt is enabled if there are more transactions to queue as
+ * NP Tx FIFO or request queue space becomes available. Otherwise, the NP Tx
+ * FIFO Empty interrupt is disabled.
+ */
+static void process_non_periodic_channels(struct dwc2_hcd *hcd)
+{
+	struct list_head *orig_qh_ptr;
+	struct dwc2_qh *qh;
+	u32 tx_status;
+	u32 qspcavail;
+	u32 fspcavail;
+	u32 gintmsk;
+	int status;
+	int no_queue_space = 0;
+	int no_fifo_space = 0;
+	int more_to_do = 0;
+
+	dev_dbg(hcd->dev, "Queue non-periodic transactions\n");
+#ifdef DEBUG
+	tx_status = readl(hcd->regs + GNPTXSTS);
+	qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
+		    TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
+	fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+		    TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+	dev_dbg(hcd->dev, "  NP Tx Req Queue Space Avail (before queue): %d\n",
+		qspcavail);
+	dev_dbg(hcd->dev, "  NP Tx FIFO Space Avail (before queue): %d\n",
+		fspcavail);
+#endif
+	/*
+	 * Keep track of the starting point. Skip over the start-of-list
+	 * entry.
+	 */
+	if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active)
+		hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
+	orig_qh_ptr = hcd->non_periodic_qh_ptr;
+
+	/*
+	 * Process once through the active list or until no more space is
+	 * available in the request queue or the Tx FIFO
+	 */
+	do {
+		tx_status = readl(hcd->regs + GNPTXSTS);
+		qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
+			    TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
+		if (!hcd->dma_enable && qspcavail == 0) {
+			no_queue_space = 1;
+			break;
+		}
+
+		qh = list_entry(hcd->non_periodic_qh_ptr, struct dwc2_qh,
+				qh_list_entry);
+		fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+			    TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+		status = queue_transaction(hcd, qh->channel, fspcavail);
+
+		if (status > 0) {
+			more_to_do = 1;
+		} else if (status < 0) {
+			no_fifo_space = 1;
+			break;
+		}
+
+		/* Advance to next QH, skipping start-of-list entry */
+		hcd->non_periodic_qh_ptr = hcd->non_periodic_qh_ptr->next;
+		if (hcd->non_periodic_qh_ptr == &hcd->non_periodic_sched_active)
+			hcd->non_periodic_qh_ptr =
+					hcd->non_periodic_qh_ptr->next;
+	} while (hcd->non_periodic_qh_ptr != orig_qh_ptr);
+
+	if (!hcd->dma_enable) {
+#ifdef DEBUG
+		tx_status = readl(hcd->regs + GNPTXSTS);
+		qspcavail = tx_status >> TXSTS_QSPCAVAIL_SHIFT &
+			    TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT;
+		fspcavail = tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+			    TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT;
+		dev_dbg(hcd->dev,
+			"  NP Tx Req Queue Space Avail (after queue): %d\n",
+			qspcavail);
+		dev_dbg(hcd->dev,
+			"  NP Tx FIFO Space Avail (after queue): %d\n",
+			fspcavail);
+#endif
+		if (more_to_do || no_queue_space || no_fifo_space) {
+			/*
+			 * May need to queue more transactions as the request
+			 * queue or Tx FIFO empties. Enable the non-periodic
+			 * Tx FIFO empty interrupt. (Always use the half-empty
+			 * level to ensure that new requests are loaded as
+			 * soon as possible.)
+			 */
+			gintmsk = readl(hcd->regs + GINTMSK);
+			gintmsk |= GINTSTS_NPTxFEmp;
+			writel(gintmsk, hcd->regs + GINTMSK);
+		} else {
+			/*
+			 * Disable the Tx FIFO empty interrupt since there are
+			 * no more transactions that need to be queued right
+			 * now. This function is called from interrupt
+			 * handlers to queue more transactions as transfer
+			 * states change.
+			 */
+			gintmsk = readl(hcd->regs + GINTMSK);
+			gintmsk &= ~GINTSTS_NPTxFEmp;
+			writel(gintmsk, hcd->regs + GINTMSK);
+		}
+	}
+}
+
+/**
+ * dwc2_hcd_queue_transactions() - Processes the currently active host channels
+ * and queues transactions for these channels to the DWC_otg controller. Called
+ * from the HCD interrupt handler functions.
+ *
+ * @hcd:     The HCD state structure
+ * @tr_type: The type(s) of transactions to queue (non-periodic, periodic,
+ *           or both)
+ */
+void dwc2_hcd_queue_transactions(struct dwc2_hcd *hcd,
+				 enum dwc2_transaction_type tr_type)
+{
+#ifdef DEBUG_SOF
+	dev_dbg(hcd->dev, "Queue Transactions\n");
+#endif
+	/* Process host channels associated with periodic transfers */
+	if ((tr_type == DWC2_TRANSACTION_PERIODIC ||
+	     tr_type == DWC2_TRANSACTION_ALL) &&
+	    !list_empty(&hcd->periodic_sched_assigned))
+		process_periodic_channels(hcd);
+
+	/* Process host channels associated with non-periodic transfers */
+	if (tr_type == DWC2_TRANSACTION_NON_PERIODIC ||
+	    tr_type == DWC2_TRANSACTION_ALL) {
+		if (!list_empty(&hcd->non_periodic_sched_active)) {
+			process_non_periodic_channels(hcd);
+		} else {
+			/*
+			 * Ensure NP Tx FIFO empty interrupt is disabled when
+			 * there are no non-periodic transfers to process
+			 */
+			u32 gintmsk = readl(hcd->regs + GINTMSK);
+
+			gintmsk &= ~GINTSTS_NPTxFEmp;
+			writel(gintmsk, hcd->regs + GINTMSK);
+		}
+	}
+}
+
+static void port_suspend(struct dwc2_hcd *hcd, u16 windex)
+{
+	unsigned long flags;
+	u32 hprt0;
+	u32 pcgctl;
+	u32 gotgctl;
+
+	dev_dbg(hcd->dev, "%s()\n", __func__);
+
+	if (dwc2_hcd_otg_port(hcd) == windex &&
+			dwc2_host_get_b_hnp_enable(hcd)) {
+		gotgctl = readl(hcd->regs + GOTGCTL);
+		gotgctl |= GOTGCTL_HSTSETHNPEN;
+		writel(gotgctl, hcd->regs + GOTGCTL);
+		hcd->op_state = A_SUSPEND;
+	}
+
+	hprt0 = dwc2_read_hprt0(hcd);
+	hprt0 |= HPRT0_SUSP;
+	writel(hprt0, hcd->regs + HPRT0);
+
+	/* Update lx_state */
+	spin_lock_irqsave(&hcd->lock, flags);
+	hcd->lx_state = DWC2_L2;
+	spin_unlock_irqrestore(&hcd->lock, flags);
+
+	/* Suspend the Phy Clock */
+	pcgctl = readl(hcd->regs + PCGCTL);
+	pcgctl |= PCGCTL_STOPPCLK;
+	writel(pcgctl, hcd->regs + PCGCTL);
+	udelay(10);
+
+	/* For HNP the bus must be suspended for at least 200ms */
+	if (dwc2_host_get_b_hnp_enable(hcd)) {
+		pcgctl = readl(hcd->regs + PCGCTL);
+		pcgctl &= ~PCGCTL_STOPPCLK;
+		writel(pcgctl, hcd->regs + PCGCTL);
+		msleep(200);
+	}
+}
+
+/* Handles hub class-specific requests */
+static int hcd_hub_control(struct dwc2_hcd *hcd, u16 typereq, u16 wvalue,
+			   u16 windex, u8 *buf, u16 wlength)
+{
+	struct usb_hub_descriptor *hub_desc;
+	int retval = 0;
+	u32 hprt0;
+	u32 port_status;
+	u32 speed;
+	u32 pcgctl;
+	u32 lpmcfg;
+	int portnum, hird, devaddr, remwake;
+	u32 time_usecs;
+	u32 gintsts;
+	u32 gintmsk;
+
+	dev_dbg(hcd->dev, "%s()\n", __func__);
+
+	switch (typereq) {
+	case ClearHubFeature:
+		dev_dbg(hcd->dev, "ClearHubFeature %1xh\n", wvalue);
+
+		switch (wvalue) {
+		case C_HUB_LOCAL_POWER:
+		case C_HUB_OVER_CURRENT:
+			/* Nothing required here */
+			break;
+		default:
+			retval = -EINVAL;
+			dev_err(hcd->dev,
+				"ClearHubFeature request %1xh unknown\n",
+				wvalue);
+		}
+		break;
+	case ClearPortFeature:
+		if (wvalue != USB_PORT_FEAT_L1)
+			if (!windex || windex > 1)
+				goto error;
+
+		switch (wvalue) {
+		case USB_PORT_FEAT_ENABLE:
+			dev_dbg(hcd->dev,
+				"ClearPortFeature USB_PORT_FEAT_ENABLE\n");
+			hprt0 = dwc2_read_hprt0(hcd);
+			hprt0 |= HPRT0_ENA;
+			writel(hprt0, hcd->regs + HPRT0);
+			break;
+		case USB_PORT_FEAT_SUSPEND:
+			dev_dbg(hcd->dev,
+				"ClearPortFeature USB_PORT_FEAT_SUSPEND\n");
+			writel(0, hcd->regs + PCGCTL);
+			msleep(20);
+
+			hprt0 = dwc2_read_hprt0(hcd);
+			hprt0 |= HPRT0_RES;
+			writel(hprt0, hcd->regs + HPRT0);
+			hprt0 &= ~HPRT0_SUSP;
+			msleep(100);
+
+			hprt0 &= ~HPRT0_RES;
+			writel(hprt0, hcd->regs + HPRT0);
+			break;
+
+		case USB_PORT_FEAT_L1:
+			lpmcfg = readl(hcd->regs + GLPMCFG);
+			lpmcfg &= ~(GLPMCFG_EN_UTMI_SLEEP |
+				    GLPMCFG_HIRD_THRES_EN);
+			lpmcfg |= GLPMCFG_PRT_SLEEP_STS;
+			writel(lpmcfg, hcd->regs + GLPMCFG);
+
+			/* Clear Enbl_L1Gating bit */
+			pcgctl = readl(hcd->regs + PCGCTL);
+			pcgctl &= ~PCGCTL_ENBL_SLEEP_GATING;
+			writel(pcgctl, hcd->regs + PCGCTL);
+			msleep(20);
+
+			hprt0 = dwc2_read_hprt0(hcd);
+			hprt0 |= HPRT0_RES;
+			writel(hprt0, hcd->regs + HPRT0);
+			/* This bit will be cleared in wakeup intr handler */
+			break;
+
+		case USB_PORT_FEAT_POWER:
+			dev_dbg(hcd->dev,
+				"ClearPortFeature USB_PORT_FEAT_POWER\n");
+			hprt0 = dwc2_read_hprt0(hcd);
+			hprt0 &= ~HPRT0_PWR;
+			writel(hprt0, hcd->regs + HPRT0);
+			break;
+		case USB_PORT_FEAT_INDICATOR:
+			dev_dbg(hcd->dev,
+				"ClearPortFeature USB_PORT_FEAT_INDICATOR\n");
+			/* Port indicator not supported */
+			break;
+		case USB_PORT_FEAT_C_CONNECTION:
+			/*
+			 * Clears driver's internal Connect Status Change flag
+			 */
+			dev_dbg(hcd->dev,
+				"ClearPortFeature USB_PORT_FEAT_C_CONNECTION\n");
+			hcd->flags.b.port_connect_status_change = 0;
+			break;
+		case USB_PORT_FEAT_C_RESET:
+			/* Clears driver's internal Port Reset Change flag */
+			dev_dbg(hcd->dev,
+				"ClearPortFeature USB_PORT_FEAT_C_RESET\n");
+			hcd->flags.b.port_reset_change = 0;
+			break;
+		case USB_PORT_FEAT_C_ENABLE:
+			/*
+			 * Clears the driver's internal Port Enable/Disable
+			 * Change flag
+			 */
+			dev_dbg(hcd->dev,
+				"ClearPortFeature USB_PORT_FEAT_C_ENABLE\n");
+			hcd->flags.b.port_enable_change = 0;
+			break;
+		case USB_PORT_FEAT_C_SUSPEND:
+			/*
+			 * Clears the driver's internal Port Suspend Change
+			 * flag, which is set when resume signaling on the host
+			 * port is complete
+			 */
+			dev_dbg(hcd->dev,
+				"ClearPortFeature USB_PORT_FEAT_C_SUSPEND\n");
+			hcd->flags.b.port_suspend_change = 0;
+			break;
+
+		case USB_PORT_FEAT_C_PORT_L1:
+			hcd->flags.b.port_l1_change = 0;
+			break;
+
+		case USB_PORT_FEAT_C_OVER_CURRENT:
+			dev_dbg(hcd->dev,
+				"ClearPortFeature USB_PORT_FEAT_C_OVER_CURRENT\n");
+			hcd->flags.b.port_over_current_change = 0;
+			break;
+		default:
+			retval = -EINVAL;
+			dev_err(hcd->dev,
+				"ClearPortFeature request %1xh unknown or unsupported\n",
+				wvalue);
+		}
+		break;
+	case GetHubDescriptor:
+		dev_dbg(hcd->dev, "GetHubDescriptor\n");
+		hub_desc = (struct usb_hub_descriptor *)buf;
+		hub_desc->bDescLength = 9;
+		hub_desc->bDescriptorType = 0x29;
+		hub_desc->bNbrPorts = 1;
+		hub_desc->wHubCharacteristics = cpu_to_le16(0x08);
+		hub_desc->bPwrOn2PwrGood = 1;
+		hub_desc->bHubContrCurrent = 0;
+		hub_desc->u.hs.DeviceRemovable[0] = 0;
+		hub_desc->u.hs.DeviceRemovable[1] = 0xff;
+		break;
+	case GetHubStatus:
+		dev_dbg(hcd->dev, "GetHubStatus\n");
+		memset(buf, 0, 4);
+		break;
+	case GetPortStatus:
+		dev_dbg(hcd->dev, "GetPortStatus wIndex=0x%04x flags=0x%08x\n",
+			windex, hcd->flags.d32);
+		if (!windex || windex > 1)
+			goto error;
+
+		port_status = 0;
+
+		if (hcd->flags.b.port_connect_status_change)
+			port_status |= USB_PORT_STAT_C_CONNECTION << 16;
+
+		if (hcd->flags.b.port_enable_change)
+			port_status |= USB_PORT_STAT_C_ENABLE << 16;
+
+		if (hcd->flags.b.port_suspend_change)
+			port_status |= USB_PORT_STAT_C_SUSPEND << 16;
+
+		if (hcd->flags.b.port_l1_change)
+			port_status |= USB_PORT_STAT_C_L1 << 16;
+
+		if (hcd->flags.b.port_reset_change)
+			port_status |= USB_PORT_STAT_C_RESET << 16;
+
+		if (hcd->flags.b.port_over_current_change) {
+			dev_warn(hcd->dev, "Overcurrent change detected\n");
+			port_status |= USB_PORT_STAT_C_OVERCURRENT << 16;
+		}
+
+		if (!hcd->flags.b.port_connect_status) {
+			/*
+			 * The port is disconnected, which means the core is
+			 * either in device mode or it soon will be. Just
+			 * return 0's for the remainder of the port status
+			 * since the port register can't be read if the core
+			 * is in device mode.
+			 */
+			*(__le32 *)buf = cpu_to_le32(port_status);
+			break;
+		}
+
+		hprt0 = readl(hcd->regs + HPRT0);
+		dev_dbg(hcd->dev, "  HPRT0: 0x%08x\n", hprt0);
+
+		if (hprt0 & HPRT0_CONNSTS)
+			port_status |= USB_PORT_STAT_CONNECTION;
+
+		if (hprt0 & HPRT0_ENA)
+			port_status |= USB_PORT_STAT_ENABLE;
+
+		if (hprt0 & HPRT0_SUSP)
+			port_status |= USB_PORT_STAT_SUSPEND;
+
+		if (hprt0 & HPRT0_OVRCURRACT)
+			port_status |= USB_PORT_STAT_OVERCURRENT;
+
+		if (hprt0 & HPRT0_RST)
+			port_status |= USB_PORT_STAT_RESET;
+
+		if (hprt0 & HPRT0_PWR)
+			port_status |= USB_PORT_STAT_POWER;
+
+		speed = hprt0 & HPRT0_SPD_MASK;
+		if (speed == HPRT0_SPD_HIGH_SPEED)
+			port_status |= USB_PORT_STAT_HIGH_SPEED;
+		else if (speed == HPRT0_SPD_LOW_SPEED)
+			port_status |= USB_PORT_STAT_LOW_SPEED;
+
+		if (hprt0 & HPRT0_TSTCTL_MASK)
+			port_status |= USB_PORT_STAT_TEST;
+		if (dwc2_get_lpm_portsleepstatus(hcd))
+			port_status |= USB_PORT_STAT_L1;
+		/* USB_PORT_FEAT_INDICATOR unsupported always 0 */
+
+		dev_dbg(hcd->dev, "port_status=%08x\n", port_status);
+		*(__le32 *)buf = cpu_to_le32(port_status);
+		break;
+	case SetHubFeature:
+		dev_dbg(hcd->dev, "SetHubFeature\n");
+		/* No HUB features supported */
+		break;
+	case SetPortFeature:
+		if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1))
+			goto error;
+
+		if (!hcd->flags.b.port_connect_status) {
+			/*
+			 * The port is disconnected, which means the core is
+			 * either in device mode or it soon will be. Just
+			 * return without doing anything since the port
+			 * register can't be written if the core is in device
+			 * mode.
+			 */
+			break;
+		}
+
+		switch (wvalue) {
+		case USB_PORT_FEAT_SUSPEND:
+			dev_dbg(hcd->dev,
+				"SetPortFeature - USB_PORT_FEAT_SUSPEND\n");
+			if (dwc2_hcd_otg_port(hcd) != windex)
+				goto error;
+			port_suspend(hcd, windex);
+			break;
+		case USB_PORT_FEAT_POWER:
+			dev_dbg(hcd->dev,
+				"SetPortFeature - USB_PORT_FEAT_POWER\n");
+			hprt0 = dwc2_read_hprt0(hcd);
+			hprt0 |= HPRT0_PWR;
+			writel(hprt0, hcd->regs + HPRT0);
+			break;
+		case USB_PORT_FEAT_RESET:
+			hprt0 = dwc2_read_hprt0(hcd);
+			dev_dbg(hcd->dev,
+				"SetPortFeature - USB_PORT_FEAT_RESET\n");
+			pcgctl = readl(hcd->regs + PCGCTL);
+			pcgctl &= ~(PCGCTL_ENBL_SLEEP_GATING | PCGCTL_STOPPCLK);
+			writel(pcgctl, hcd->regs + PCGCTL);
+			/* ??? Original driver does this */
+			writel(0, hcd->regs + PCGCTL);
+
+			lpmcfg = readl(hcd->regs + GLPMCFG);
+			if (lpmcfg & GLPMCFG_PRT_SLEEP_STS) {
+				lpmcfg &= ~(GLPMCFG_EN_UTMI_SLEEP |
+					    GLPMCFG_HIRD_THRES_EN);
+				writel(lpmcfg, hcd->regs + GLPMCFG);
+				msleep(20);
+			}
+
+			hprt0 = dwc2_read_hprt0(hcd);
+			/* Clear suspend bit if resetting from suspend state */
+			hprt0 &= ~HPRT0_SUSP;
+			/*
+			 * When B-Host the Port reset bit is set in the Start
+			 * HCD Callback function, so that the reset is started
+			 * within 1ms of the HNP success interrupt
+			 */
+			if (!dwc2_hcd_is_b_host(hcd)) {
+				hprt0 |= HPRT0_PWR | HPRT0_RST;
+				dev_dbg(hcd->dev, "In host mode, hprt0=%08x\n",
+					hprt0);
+				writel(hprt0, hcd->regs + HPRT0);
+			}
+			/* Clear reset bit in 10ms (FS/LS) or 50ms (HS) */
+			msleep(60);
+			hprt0 &= ~HPRT0_RST;
+			writel(hprt0, hcd->regs + HPRT0);
+			hcd->lx_state = DWC2_L0; /* Now back to On state */
+			break;
+		case USB_PORT_FEAT_INDICATOR:
+			dev_dbg(hcd->dev,
+				"SetPortFeature - USB_PORT_FEAT_INDICATOR\n");
+			/* Not supported */
+			break;
+		default:
+			retval = -EINVAL;
+			dev_err(hcd->dev,
+				"SetPortFeature %1xh unknown or unsupported\n",
+				wvalue);
+			break;
+		}
+		break;
+
+	case 0xa30c:	/* USB_REQ_TEST_AND_SET_PORT_FEATURE */
+		if (wvalue != USB_PORT_FEAT_L1)
+			goto error;
+		if (hcd->core_params->lpm_enable <= 0)
+			goto error;
+		if (wvalue != USB_PORT_FEAT_L1 || wlength != 1)
+			goto error;
+
+		/* Check if the port currently is in SLEEP state */
+		lpmcfg = readl(hcd->regs + GLPMCFG);
+		if (lpmcfg & GLPMCFG_PRT_SLEEP_STS) {
+			dev_info(hcd->dev, "Port is already in sleep mode\n");
+			buf[0] = 0;	/* Return success */
+			break;
+		}
+
+		portnum = windex & 0xf;
+		hird = windex >> 4 & 0xf;
+		devaddr = windex >> 8 & 0x7f;
+		remwake = windex >> 15;
+
+		if (portnum != 1) {
+			retval = -EINVAL;
+			dev_warn(hcd->dev,
+				 "Wrong port number(%d) in SetandTestPortFeature request\n",
+				 portnum);
+			break;
+		}
+
+		dev_dbg(hcd->dev,
+			"SetandTestPortFeature: portnum=%d, hird=%d, devaddr=%d, remwake=%d\n",
+			portnum, hird, devaddr, remwake);
+		/* Disable LPM interrupt */
+		gintmsk = readl(hcd->regs + GINTMSK);
+		gintmsk &= ~GINTSTS_LPMTranRcvd;
+		writel(gintmsk, hcd->regs + GINTMSK);
+
+		if (hcd_send_lpm(hcd, devaddr, hird, remwake)) {
+			retval = -EINVAL;
+			break;
+		}
+
+		time_usecs = 10 * ((lpmcfg >> GLPMCFG_RETRY_COUNT_SHIFT &
+			GLPMCFG_RETRY_COUNT_MASK >> GLPMCFG_RETRY_COUNT_SHIFT) +
+			1);
+
+		/*
+		 * We will consider timeout if time_usecs microseconds pass,
+		 * and we don't receive LPM transaction status.
+		 * After receiving non-error response (ACK/NYET/STALL) from
+		 * device, core will set lpmtranrcvd bit.
+		 */
+		do {
+			gintsts = readl(hcd->regs + GINTSTS);
+			if (gintsts & GINTSTS_LPMTranRcvd)
+				break;
+			udelay(1);
+		} while (--time_usecs);
+		/* lpm_int bit will be cleared in LPM interrupt handler */
+
+		/*
+		 * Now fill status
+		 * 0x00 - Success
+		 * 0x10 - NYET
+		 * 0x11 - Timeout
+		 */
+		if (!(gintsts & GINTSTS_LPMTranRcvd)) {
+			buf[0] = 0x3;	/* Completion code is Timeout */
+			hcd_free_hc_from_lpm(hcd);
+		} else {
+			lpmcfg = readl(hcd->regs + GLPMCFG);
+			if ((lpmcfg & GLPMCFG_LPM_RESP_MASK) ==
+			    3 << GLPMCFG_LPM_RESP_SHIFT)
+				/* ACK response from the device */
+				buf[0] = 0;	/* Success */
+			else if ((lpmcfg & GLPMCFG_LPM_RESP_MASK) ==
+				 2 << GLPMCFG_LPM_RESP_SHIFT)
+				/* NYET response from the device */
+				buf[0] = 2;
+			else
+				/* Otherwise respond with Timeout */
+				buf[0] = 3;
+		}
+		dev_dbg(hcd->dev, "Device response to LPM trans is %1xh\n",
+			lpmcfg >> GLPMCFG_LPM_RESP_SHIFT &
+			GLPMCFG_LPM_RESP_MASK >> GLPMCFG_LPM_RESP_SHIFT);
+		gintmsk = readl(hcd->regs + GINTMSK);
+		gintmsk |= GINTSTS_LPMTranRcvd;
+		writel(gintmsk, hcd->regs + GINTMSK);
+		break;
+
+	default:
+error:
+		retval = -EINVAL;
+		dev_warn(hcd->dev,
+			 "Unknown hub control request: %1xh wIndex: %1xh wValue: %1xh\n",
+			 typereq, windex, wvalue);
+		break;
+	}
+
+	return retval;
+}
+
+int dwc2_hcd_is_status_changed(struct dwc2_hcd *hcd, int port)
+{
+	int retval;
+
+	dev_dbg(hcd->dev, "%s()\n", __func__);
+
+	if (port != 1)
+		return -EINVAL;
+
+	retval = (hcd->flags.b.port_connect_status_change ||
+		  hcd->flags.b.port_reset_change ||
+		  hcd->flags.b.port_enable_change ||
+		  hcd->flags.b.port_suspend_change ||
+		  hcd->flags.b.port_over_current_change);
+#ifdef DEBUG
+	if (retval) {
+		dev_dbg(hcd->dev,
+			"DWC OTG HCD HUB STATUS DATA: Root port status changed\n");
+		dev_dbg(hcd->dev, "  port_connect_status_change: %d\n",
+			hcd->flags.b.port_connect_status_change);
+		dev_dbg(hcd->dev, "  port_reset_change: %d\n",
+			hcd->flags.b.port_reset_change);
+		dev_dbg(hcd->dev, "  port_enable_change: %d\n",
+			hcd->flags.b.port_enable_change);
+		dev_dbg(hcd->dev, "  port_suspend_change: %d\n",
+			hcd->flags.b.port_suspend_change);
+		dev_dbg(hcd->dev, "  port_over_current_change: %d\n",
+			hcd->flags.b.port_over_current_change);
+	}
+#endif
+	return retval;
+}
+
+int dwc2_hcd_get_frame_number(struct dwc2_hcd *hcd)
+{
+	u32 hfnum = readl(hcd->regs + HFNUM);
+
+#ifdef DEBUG_SOF
+	dev_dbg(hcd->dev, "DWC OTG HCD GET FRAME NUMBER %d\n",
+		hfnum >> HFNUM_FRNUM_SHIFT &
+		HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT);
+#endif
+	return hfnum >> HFNUM_FRNUM_SHIFT &
+	       HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT;
+}
+
+int dwc2_hcd_startup(struct dwc2_hcd *hcd)
+{
+	if (dwc2_is_device_mode(hcd))
+		return -ENODEV;
+
+	hcd_reinit(hcd);
+	return 0;
+}
+
+struct usb_hcd *dwc2_hcd_get_priv_data(struct dwc2_hcd *hcd)
+{
+	return (struct usb_hcd *)hcd->priv;
+}
+
+void dwc2_hcd_set_priv_data(struct dwc2_hcd *hcd, struct usb_hcd *priv_data)
+{
+	hcd->priv = priv_data;
+}
+
+u32 dwc2_hcd_otg_port(struct dwc2_hcd *hcd)
+{
+	return hcd->otg_port;
+}
+
+int dwc2_hcd_is_b_host(struct dwc2_hcd *hcd)
+{
+	return (hcd->op_state == B_HOST);
+}
+
+static struct dwc2_hcd_urb *hcd_urb_alloc(struct dwc2_hcd *hcd,
+					  int iso_desc_count, gfp_t mem_flags)
+{
+	struct dwc2_hcd_urb *urb;
+	u32 size = sizeof(*urb) + iso_desc_count *
+		   sizeof(struct dwc2_hcd_iso_packet_desc);
+
+	urb = kzalloc(size, mem_flags);
+	if (urb)
+		urb->packet_count = iso_desc_count;
+
+	return urb;
+}
+
+static void hcd_fill_pipe(struct dwc2_hcd_pipe_info *pipe, u8 devaddr,
+			  u8 ep_num, u8 pipe_type, u8 pipe_dir, u16 mps)
+{
+	pipe->dev_addr = devaddr;
+	pipe->ep_num = ep_num;
+	pipe->pipe_type = pipe_type;
+	pipe->pipe_dir = pipe_dir;
+	pipe->mps = mps;
+}
+
+static void hcd_urb_set_pipeinfo(struct dwc2_hcd *hcd, struct dwc2_hcd_urb *urb,
+				 u8 dev_addr, u8 ep_num, u8 ep_type, u8 ep_dir,
+				 u16 mps)
+{
+	hcd_fill_pipe(&urb->pipe_info, dev_addr, ep_num, ep_type, ep_dir, mps);
+
+#ifdef VERBOSE
+	dev_info(hcd->dev,
+		 "addr=%d, ep_num=%d, ep_dir=%1x, ep_type=%1x, mps=%d\n",
+		 dev_addr, ep_num, ep_dir, ep_type, mps);
+#endif
+}
+
+static void hcd_urb_set_params(struct dwc2_hcd_urb *urb, void *context,
+			       void *buf, dma_addr_t dma, u32 buflen,
+			       void *setup_packet, dma_addr_t setup_dma,
+			       u32 flags, u16 interval)
+{
+	urb->priv = context;
+	urb->buf = buf;
+	urb->dma = dma;
+	urb->length = buflen;
+	urb->setup_packet = setup_packet;
+	urb->setup_dma = setup_dma;
+	urb->flags = flags;
+	urb->interval = interval;
+	urb->status = -EINPROGRESS;
+}
+
+void dwc2_hcd_dump_state(struct dwc2_hcd *hcd)
+{
+#ifdef DEBUG
+	struct dwc2_hc *hc;
+	struct dwc2_qtd *qtd;
+	struct dwc2_hcd_urb *urb;
+	struct list_head *qtd_item;
+	u32 np_tx_status;
+	u32 p_tx_status;
+	int num_channels;
+	int i;
+
+	num_channels = hcd->core_params->host_channels;
+	dev_dbg(hcd->dev, "\n");
+	dev_dbg(hcd->dev,
+		"************************************************************\n");
+	dev_dbg(hcd->dev, "HCD State:\n");
+	dev_dbg(hcd->dev, "  Num channels: %d\n", num_channels);
+
+	for (i = 0; i < num_channels; i++) {
+		hc = hcd->hc_ptr_array[i];
+		dev_dbg(hcd->dev, "  Channel %d:\n", i);
+		dev_dbg(hcd->dev,
+			"    dev_addr: %d, ep_num: %d, ep_is_in: %d\n",
+			hc->dev_addr, hc->ep_num, hc->ep_is_in);
+		dev_dbg(hcd->dev, "    speed: %d\n", hc->speed);
+		dev_dbg(hcd->dev, "    ep_type: %d\n", hc->ep_type);
+		dev_dbg(hcd->dev, "    max_packet: %d\n", hc->max_packet);
+		dev_dbg(hcd->dev, "    data_pid_start: %d\n",
+			hc->data_pid_start);
+		dev_dbg(hcd->dev, "    multi_count: %d\n", hc->multi_count);
+		dev_dbg(hcd->dev, "    xfer_started: %d\n", hc->xfer_started);
+		dev_dbg(hcd->dev, "    xfer_buff: %p\n", hc->xfer_buff);
+		dev_dbg(hcd->dev, "    xfer_len: %d\n", hc->xfer_len);
+		dev_dbg(hcd->dev, "    xfer_count: %d\n", hc->xfer_count);
+		dev_dbg(hcd->dev, "    halt_on_queue: %d\n", hc->halt_on_queue);
+		dev_dbg(hcd->dev, "    halt_pending: %d\n", hc->halt_pending);
+		dev_dbg(hcd->dev, "    halt_status: %d\n", hc->halt_status);
+		dev_dbg(hcd->dev, "    do_split: %d\n", hc->do_split);
+		dev_dbg(hcd->dev, "    complete_split: %d\n",
+			hc->complete_split);
+		dev_dbg(hcd->dev, "    hub_addr: %d\n", hc->hub_addr);
+		dev_dbg(hcd->dev, "    port_addr: %d\n", hc->port_addr);
+		dev_dbg(hcd->dev, "    xact_pos: %d\n", hc->xact_pos);
+		dev_dbg(hcd->dev, "    requests: %d\n", hc->requests);
+		dev_dbg(hcd->dev, "    qh: %p\n", hc->qh);
+
+		if (hc->xfer_started) {
+			u32 hfnum, hcchar, hctsiz, hcint, hcintmsk;
+
+			hfnum = readl(hcd->regs + HFNUM);
+			hcchar = readl(hcd->regs + HCCHAR(i));
+			hctsiz = readl(hcd->regs + HCTSIZ(i));
+			hcint = readl(hcd->regs + HCINT(i));
+			hcintmsk = readl(hcd->regs + HCINTMSK(i));
+			dev_dbg(hcd->dev, "    hfnum: 0x%08x\n", hfnum);
+			dev_dbg(hcd->dev, "    hcchar: 0x%08x\n", hcchar);
+			dev_dbg(hcd->dev, "    hctsiz: 0x%08x\n", hctsiz);
+			dev_dbg(hcd->dev, "    hcint: 0x%08x\n", hcint);
+			dev_dbg(hcd->dev, "    hcintmsk: 0x%08x\n", hcintmsk);
+		}
+
+		if (!(hc->xfer_started && hc->qh))
+			continue;
+
+		list_for_each(qtd_item, &hc->qh->qtd_list) {
+			qtd = list_entry(qtd_item, struct dwc2_qtd,
+					 qtd_list_entry);
+			if (!qtd->in_process)
+				break;
+			urb = qtd->urb;
+			dev_dbg(hcd->dev, "    URB Info:\n");
+			dev_dbg(hcd->dev, "      qtd: %p, urb: %p\n", qtd, urb);
+			if (urb) {
+				dev_dbg(hcd->dev, "      Dev: %d, EP: %d %s\n",
+					dwc2_hcd_get_dev_addr(&urb->pipe_info),
+					dwc2_hcd_get_ep_num(&urb->pipe_info),
+					dwc2_hcd_is_pipe_in(&urb->pipe_info) ?
+					"IN" : "OUT");
+				dev_dbg(hcd->dev, "      Max packet size: %d\n",
+					dwc2_hcd_get_mps(&urb->pipe_info));
+				dev_dbg(hcd->dev, "      transfer_buffer: %p\n",
+					urb->buf);
+				dev_dbg(hcd->dev, "      transfer_dma: %p\n",
+					(void *)urb->dma);
+				dev_dbg(hcd->dev,
+					"      transfer_buffer_length: %d\n",
+					urb->length);
+				dev_dbg(hcd->dev, "      actual_length: %d\n",
+					urb->actual_length);
+			}
+		}
+	}
+
+	dev_dbg(hcd->dev, "  non_periodic_channels: %d\n",
+		hcd->non_periodic_channels);
+	dev_dbg(hcd->dev, "  periodic_channels: %d\n", hcd->periodic_channels);
+	dev_dbg(hcd->dev, "  periodic_usecs: %d\n", hcd->periodic_usecs);
+	np_tx_status = readl(hcd->regs + GNPTXSTS);
+	dev_dbg(hcd->dev, "  NP Tx Req Queue Space Avail: %d\n",
+		np_tx_status >> TXSTS_QSPCAVAIL_SHIFT &
+		TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT);
+	dev_dbg(hcd->dev, "  NP Tx FIFO Space Avail: %d\n",
+		np_tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+		TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT);
+	p_tx_status = readl(hcd->regs + HPTXSTS);
+	dev_dbg(hcd->dev, "  P Tx Req Queue Space Avail: %d\n",
+		p_tx_status >> TXSTS_QSPCAVAIL_SHIFT &
+		TXSTS_QSPCAVAIL_MASK >> TXSTS_QSPCAVAIL_SHIFT);
+	dev_dbg(hcd->dev, "  P Tx FIFO Space Avail: %d\n",
+		p_tx_status >> TXSTS_FSPCAVAIL_SHIFT &
+		TXSTS_FSPCAVAIL_MASK >> TXSTS_FSPCAVAIL_SHIFT);
+	dwc2_hcd_dump_frrem(hcd);
+	dwc2_dump_global_registers(hcd);
+	dwc2_dump_host_registers(hcd);
+	dev_dbg(hcd->dev,
+		"************************************************************\n");
+	dev_dbg(hcd->dev, "\n");
+#endif
+}
+
+void dwc2_hcd_dump_frrem(struct dwc2_hcd *hcd)
+{
+#if 0
+	dev_dbg(hcd->dev, "Frame remaining at SOF:\n");
+	dev_dbg(hcd->dev, "  samples %u, accum %llu, avg %llu\n",
+		hcd->frrem_samples, hcd->frrem_accum,
+		hcd->frrem_samples > 0 ?
+		hcd->frrem_accum / hcd->frrem_samples : 0);
+
+	dev_dbg(hcd->dev, "\n");
+	dev_dbg(hcd->dev, "Frame remaining at start_transfer (uframe 7):\n");
+	dev_dbg(hcd->dev, "  samples %u, accum %llu, avg %llu\n",
+		hcd->hfnum_7_samples,
+		hcd->hfnum_7_frrem_accum,
+		hcd->hfnum_7_samples > 0 ?
+		hcd->hfnum_7_frrem_accum / hcd->hfnum_7_samples : 0);
+	dev_dbg(hcd->dev, "Frame remaining at start_transfer (uframe 0):\n");
+	dev_dbg(hcd->dev, "  samples %u, accum %llu, avg %llu\n",
+		hcd->hfnum_0_samples,
+		hcd->hfnum_0_frrem_accum,
+		hcd->hfnum_0_samples > 0 ?
+		hcd->hfnum_0_frrem_accum / hcd->hfnum_0_samples : 0);
+	dev_dbg(hcd->dev, "Frame remaining at start_transfer (uframe 1-6):\n");
+	dev_dbg(hcd->dev, "  samples %u, accum %llu, avg %llu\n",
+		hcd->hfnum_other_samples,
+		hcd->hfnum_other_frrem_accum,
+		hcd->hfnum_other_samples > 0 ?
+		hcd->hfnum_other_frrem_accum / hcd->hfnum_other_samples : 0);
+
+	dev_dbg(hcd->dev, "\n");
+	dev_dbg(hcd->dev, "Frame remaining at sample point A (uframe 7):\n");
+	dev_dbg(hcd->dev, "  samples %u, accum %llu, avg %llu\n",
+		hcd->hfnum_7_samples_a, hcd->hfnum_7_frrem_accum_a,
+		hcd->hfnum_7_samples_a > 0 ?
+		hcd->hfnum_7_frrem_accum_a / hcd->hfnum_7_samples_a : 0);
+	dev_dbg(hcd->dev, "Frame remaining at sample point A (uframe 0):\n");
+	dev_dbg(hcd->dev, "  samples %u, accum %llu, avg %llu\n",
+		hcd->hfnum_0_samples_a, hcd->hfnum_0_frrem_accum_a,
+		hcd->hfnum_0_samples_a > 0 ?
+		hcd->hfnum_0_frrem_accum_a / hcd->hfnum_0_samples_a : 0);
+	dev_dbg(hcd->dev, "Frame remaining at sample point A (uframe 1-6):\n");
+	dev_dbg(hcd->dev, "  samples %u, accum %llu, avg %llu\n",
+		hcd->hfnum_other_samples_a, hcd->hfnum_other_frrem_accum_a,
+		hcd->hfnum_other_samples_a > 0 ?
+		hcd->hfnum_other_frrem_accum_a / hcd->hfnum_other_samples_a :
+		0);
+
+	dev_dbg(hcd->dev, "\n");
+	dev_dbg(hcd->dev, "Frame remaining at sample point B (uframe 7):\n");
+	dev_dbg(hcd->dev, "  samples %u, accum %llu, avg %llu\n",
+		hcd->hfnum_7_samples_b, hcd->hfnum_7_frrem_accum_b,
+		hcd->hfnum_7_samples_b > 0 ?
+		hcd->hfnum_7_frrem_accum_b / hcd->hfnum_7_samples_b : 0);
+	dev_dbg(hcd->dev, "Frame remaining at sample point B (uframe 0):\n");
+	dev_dbg(hcd->dev, "  samples %u, accum %llu, avg %llu\n",
+		hcd->hfnum_0_samples_b, hcd->hfnum_0_frrem_accum_b,
+		(hcd->hfnum_0_samples_b > 0) ?
+		hcd->hfnum_0_frrem_accum_b / hcd->hfnum_0_samples_b : 0);
+	dev_dbg(hcd->dev, "Frame remaining at sample point B (uframe 1-6):\n");
+	dev_dbg(hcd->dev, "  samples %u, accum %llu, avg %llu\n",
+		hcd->hfnum_other_samples_b, hcd->hfnum_other_frrem_accum_b,
+		(hcd->hfnum_other_samples_b > 0) ?
+		hcd->hfnum_other_frrem_accum_b / hcd->hfnum_other_samples_b :
+		0);
+#endif
+}
+
+/*
+ * Gets the endpoint number from a bEndpointAddress argument. The endpoint is
+ * qualified with its direction (possible 32 endpoints per device).
+ */
+#define dwc2_ep_addr_to_endpoint(_bendpointaddress_)		\
+	((_bendpointaddress_ & USB_ENDPOINT_NUMBER_MASK) |	\
+	 ((_bendpointaddress_ & USB_DIR_IN) != 0) << 4)
+
+struct wrapper_priv_data {
+	struct dwc2_hcd *dwc2_hcd;
+};
+
+/* Gets the dwc2_hcd from a struct usb_hcd */
+static struct dwc2_hcd *hcd_to_dwc2_hcd(struct usb_hcd *hcd)
+{
+	struct wrapper_priv_data *p;
+
+	p = (struct wrapper_priv_data *) &hcd->hcd_priv;
+	return p->dwc2_hcd;
+}
+
+/* Gets the struct usb_hcd that contains a struct dwc2_hcd */
+static struct usb_hcd *dwc2_hcd_to_hcd(struct dwc2_hcd *dwc2_hcd)
+{
+	return dwc2_hcd_get_priv_data(dwc2_hcd);
+}
+
+#if 0
+/* Gets the usb_host_endpoint associated with an URB */
+static struct usb_host_endpoint *urb_to_endpoint(struct urb *urb)
+{
+	struct usb_device *dev = urb->dev;
+	int ep_num = usb_pipeendpoint(urb->pipe);
+
+	if (usb_pipein(urb->pipe))
+		return dev->ep_in[ep_num];
+	else
+		return dev->ep_out[ep_num];
+}
+#endif
+
+static int hcd_start(struct usb_hcd *hcd);
+
+void dwc2_host_start(struct dwc2_hcd *hcd)
+{
+	struct usb_hcd *usb_hcd = dwc2_hcd_to_hcd(hcd);
+
+	usb_hcd->self.is_b_host = dwc2_hcd_is_b_host(hcd);
+	hcd_start(usb_hcd);
+}
+
+void dwc2_host_disconnect(struct dwc2_hcd *hcd)
+{
+	struct usb_hcd *usb_hcd = dwc2_hcd_to_hcd(hcd);
+
+	usb_hcd->self.is_b_host = 0;
+}
+
+void dwc2_host_hub_info(struct dwc2_hcd *hcd, void *context, u32 *hub_addr,
+			u32 *port_addr)
+{
+	struct urb *urb = context;
+
+	if (urb->dev->tt)
+		*hub_addr = urb->dev->tt->hub->devnum;
+	else
+		*hub_addr = 0;
+
+	*port_addr = urb->dev->ttport;
+}
+
+int dwc2_host_speed(struct dwc2_hcd *hcd, void *context)
+{
+	struct urb *urb = context;
+
+	return urb->dev->speed;
+}
+
+int dwc2_host_get_b_hnp_enable(struct dwc2_hcd *hcd)
+{
+	struct usb_hcd *usb_hcd = dwc2_hcd_to_hcd(hcd);
+
+	return usb_hcd->self.b_hnp_enable;
+}
+
+static void allocate_bus_bandwidth(struct usb_hcd *hcd, u32 bw, struct urb *urb)
+{
+	if (urb->interval)
+		hcd_to_bus(hcd)->bandwidth_allocated += bw / urb->interval;
+	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+		hcd_to_bus(hcd)->bandwidth_isoc_reqs++;
+	else
+		hcd_to_bus(hcd)->bandwidth_int_reqs++;
+}
+
+static void free_bus_bandwidth(struct usb_hcd *hcd, u32 bw, struct urb *urb)
+{
+	if (urb->interval)
+		hcd_to_bus(hcd)->bandwidth_allocated -= bw / urb->interval;
+	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+		hcd_to_bus(hcd)->bandwidth_isoc_reqs--;
+	else
+		hcd_to_bus(hcd)->bandwidth_int_reqs--;
+}
+
+/*
+ * Sets the final status of an URB and returns it to the device driver. Any
+ * required cleanup of the URB is performed.
+ */
+void dwc2_host_complete(struct dwc2_hcd *hcd, void *context,
+			struct dwc2_hcd_urb *dwc2_urb, int status)
+{
+	struct urb *urb = context;
+
+	urb->actual_length = dwc2_hcd_urb_get_actual_length(dwc2_urb);
+
+#ifdef DEBUG
+	dev_info(hcd->dev,
+		 "%s: urb %p device %d ep %d-%s status %d actual %d\n",
+		 __func__, urb, usb_pipedevice(urb->pipe),
+		 usb_pipeendpoint(urb->pipe),
+		 usb_pipein(urb->pipe) ? "IN" : "OUT", status,
+		 urb->actual_length);
+	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+		int i;
+
+		for (i = 0; i < urb->number_of_packets; i++)
+			dev_info(hcd->dev, " ISO Desc %d status %d\n",
+				 i, urb->iso_frame_desc[i].status);
+	}
+#endif
+
+	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) {
+		int i;
+
+		urb->error_count = dwc2_hcd_urb_get_error_count(dwc2_urb);
+		for (i = 0; i < urb->number_of_packets; ++i) {
+			urb->iso_frame_desc[i].actual_length =
+				dwc2_hcd_urb_get_iso_desc_actual_length(
+						dwc2_urb, i);
+			urb->iso_frame_desc[i].status =
+				dwc2_hcd_urb_get_iso_desc_status(dwc2_urb, i);
+		}
+	}
+
+	urb->status = status;
+	urb->hcpriv = NULL;
+	if (!status) {
+		if ((urb->transfer_flags & URB_SHORT_NOT_OK) &&
+		    urb->actual_length < urb->transfer_buffer_length)
+			urb->status = -EREMOTEIO;
+	}
+
+	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS ||
+	    usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
+#if 0
+		struct usb_host_endpoint *ep = urb_to_endpoint(urb);
+#else
+		struct usb_host_endpoint *ep = urb->ep;
+#endif
+		if (ep)
+			free_bus_bandwidth(dwc2_hcd_to_hcd(hcd),
+				dwc2_hcd_get_ep_bandwidth(hcd, ep),
+				urb);
+	}
+
+	kfree(dwc2_urb);
+
+	spin_unlock(&hcd->lock);
+	usb_hcd_giveback_urb(dwc2_hcd_to_hcd(hcd), urb, status);
+	spin_lock(&hcd->lock);
+}
+
+/*
+ * Work queue function for starting the HCD when A-Cable is connected
+ */
+static void hcd_start_func(struct work_struct *work)
+{
+	struct dwc2_hcd *hcd = container_of(work, struct dwc2_hcd,
+					    start_work.work);
+
+	dev_dbg(hcd->dev, "%s() %p\n", __func__, hcd);
+	dwc2_host_start(hcd);
+}
+
+/*
+ * Reset work queue function
+ */
+static void hcd_reset_func(struct work_struct *work)
+{
+	struct dwc2_hcd *hcd = container_of(work, struct dwc2_hcd,
+					    reset_work.work);
+	u32 hprt0;
+
+	dev_dbg(hcd->dev, "USB RESET function called\n");
+
+	hprt0 = dwc2_read_hprt0(hcd);
+	hprt0 &= ~HPRT0_RST;
+	writel(hprt0, hcd->regs + HPRT0);
+	hcd->flags.b.port_reset_change = 1;
+}
+
+/*
+ * =========================================================================
+ *  Linux HC Driver Functions
+ * =========================================================================
+ */
+
+/*
+ * Initializes the DWC_otg controller and its root hub and prepares it for host
+ * mode operation. Activates the root port. Returns 0 on success and a negative
+ * error code on failure.
+ */
+static int hcd_start(struct usb_hcd *hcd)
+{
+	struct dwc2_hcd *dwc2_hcd = hcd_to_dwc2_hcd(hcd);
+	struct usb_bus *bus;
+
+	dev_dbg(dwc2_hcd->dev, "DWC OTG HCD START\n");
+	bus = hcd_to_bus(hcd);
+
+	hcd->state = HC_STATE_RUNNING;
+	if (dwc2_hcd_startup(dwc2_hcd))
+		return 0;	/* why 0 ?? */
+
+	/* Initialize and connect root hub if one is not already attached */
+	if (bus->root_hub) {
+		dev_dbg(dwc2_hcd->dev, "DWC OTG HCD Has Root Hub\n");
+		/* Inform the HUB driver to resume */
+		usb_hcd_resume_root_hub(hcd);
+	}
+
+	return 0;
+}
+
+/*
+ * Halts the DWC_otg host mode operations in a clean manner. USB transfers are
+ * stopped.
+ */
+static void hcd_stop(struct usb_hcd *hcd)
+{
+	struct dwc2_hcd *dwc2_hcd = hcd_to_dwc2_hcd(hcd);
+
+	dwc2_hcd_stop(dwc2_hcd);
+}
+
+/* Returns the current frame number */
+static int get_frame_number(struct usb_hcd *hcd)
+{
+	struct dwc2_hcd *dwc2_hcd = hcd_to_dwc2_hcd(hcd);
+
+	return dwc2_hcd_get_frame_number(dwc2_hcd);
+}
+
+#ifdef DEBUG
+static void dump_urb_info(struct usb_hcd *hcd, struct urb *urb, char *fn_name)
+{
+	struct dwc2_hcd *dwc2_hcd = hcd_to_dwc2_hcd(hcd);
+	char *pipetype;
+	char *speed;
+
+	dev_info(dwc2_hcd->dev, "%s, urb %p\n", fn_name, urb);
+	dev_info(dwc2_hcd->dev, "  Device address: %d\n",
+		 usb_pipedevice(urb->pipe));
+	dev_info(dwc2_hcd->dev, "  Endpoint: %d, %s\n",
+		 usb_pipeendpoint(urb->pipe),
+		 usb_pipein(urb->pipe) ? "IN" : "OUT");
+
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_CONTROL:
+		pipetype = "CONTROL";
+		break;
+	case PIPE_BULK:
+		pipetype = "BULK";
+		break;
+	case PIPE_INTERRUPT:
+		pipetype = "INTERRUPT";
+		break;
+	case PIPE_ISOCHRONOUS:
+		pipetype = "ISOCHRONOUS";
+		break;
+	default:
+		pipetype = "UNKNOWN";
+		break;
+	}
+	dev_info(dwc2_hcd->dev, "  Endpoint type: %s %s (%s)\n", pipetype,
+		 usb_urb_dir_in(urb) ? "IN" : "OUT", usb_pipein(urb->pipe) ?
+		 "IN" : "OUT");
+
+	switch (urb->dev->speed) {
+	case USB_SPEED_HIGH:
+		speed = "HIGH";
+		break;
+	case USB_SPEED_FULL:
+		speed = "FULL";
+		break;
+	case USB_SPEED_LOW:
+		speed = "LOW";
+		break;
+	default:
+		speed = "UNKNOWN";
+		break;
+	}
+	dev_info(dwc2_hcd->dev, "  Speed: %s\n", speed);
+
+	dev_info(dwc2_hcd->dev, "  Max packet size: %d\n",
+		 usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe)));
+	dev_info(dwc2_hcd->dev, "  Data buffer length: %d\n",
+		 urb->transfer_buffer_length);
+	dev_info(dwc2_hcd->dev, "  Transfer buffer: %p, Transfer DMA: %p\n",
+		 urb->transfer_buffer, (void *)urb->transfer_dma);
+	dev_info(dwc2_hcd->dev, "  Setup buffer: %p, Setup DMA: %p\n",
+		 urb->setup_packet, (void *)urb->setup_dma);
+	dev_info(dwc2_hcd->dev, "  Interval: %d\n", urb->interval);
+
+	if (pipetype == PIPE_ISOCHRONOUS) {
+		int i;
+
+		for (i = 0; i < urb->number_of_packets; i++) {
+			dev_info(dwc2_hcd->dev, "  ISO Desc %d:\n", i);
+			dev_info(dwc2_hcd->dev, "    offset: %d, length %d\n",
+				 urb->iso_frame_desc[i].offset,
+				 urb->iso_frame_desc[i].length);
+		}
+	}
+}
+#endif
+
+/*
+ * Starts processing a USB transfer request specified by a USB Request Block
+ * (URB). mem_flags indicates the type of memory allocation to use while
+ * processing this URB.
+ */
+static int urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
+{
+	struct dwc2_hcd *dwc2_hcd = hcd_to_dwc2_hcd(hcd);
+	struct usb_host_endpoint *ep = urb->ep;
+	struct dwc2_hcd_urb *dwc2_urb;
+	int i;
+	int alloc_bandwidth = 0;
+	int retval = 0;
+	u8 ep_type = 0;
+	u32 tflags = 0;
+	void *buf;
+	unsigned long flags;
+
+#ifdef DEBUG
+	dump_urb_info(hcd, urb, "urb_enqueue");
+#endif
+
+	if (ep == NULL)
+		return -EINVAL;
+
+	if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS ||
+	    usb_pipetype(urb->pipe) == PIPE_INTERRUPT) {
+		spin_lock_irqsave(&dwc2_hcd->lock, flags);
+		if (!dwc2_hcd_is_bandwidth_allocated(dwc2_hcd, ep))
+			alloc_bandwidth = 1;
+		spin_unlock_irqrestore(&dwc2_hcd->lock, flags);
+	}
+
+	switch (usb_pipetype(urb->pipe)) {
+	case PIPE_CONTROL:
+		ep_type = USB_ENDPOINT_XFER_CONTROL;
+		break;
+	case PIPE_ISOCHRONOUS:
+		ep_type = USB_ENDPOINT_XFER_ISOC;
+		break;
+	case PIPE_BULK:
+		ep_type = USB_ENDPOINT_XFER_BULK;
+		break;
+	case PIPE_INTERRUPT:
+		ep_type = USB_ENDPOINT_XFER_INT;
+		break;
+	default:
+		dev_warn(dwc2_hcd->dev, "Wrong ep type\n");
+	}
+
+	dwc2_urb = hcd_urb_alloc(dwc2_hcd, urb->number_of_packets, mem_flags);
+	if (!dwc2_urb)
+		return -ENOMEM;
+
+	hcd_urb_set_pipeinfo(dwc2_hcd, dwc2_urb, usb_pipedevice(urb->pipe),
+			     usb_pipeendpoint(urb->pipe), ep_type,
+			     usb_pipein(urb->pipe),
+			     usb_maxpacket(urb->dev, urb->pipe,
+					   !(usb_pipein(urb->pipe))));
+
+	buf = urb->transfer_buffer;
+	if (hcd->self.uses_dma) {
+		/*
+		 * Calculate virtual address from physical address, because
+		 * some class driver may not fill transfer_buffer.
+		 * In Buffer DMA mode virtual address is used, when handling
+		 * non-DWORD aligned buffers.
+		 */
+		buf = phys_to_virt(urb->transfer_dma);
+	}
+
+	if (!(urb->transfer_flags & URB_NO_INTERRUPT))
+		tflags |= URB_GIVEBACK_ASAP;
+	if (urb->transfer_flags & URB_ZERO_PACKET)
+		tflags |= URB_SEND_ZERO_PACKET;
+
+	hcd_urb_set_params(dwc2_urb, urb, buf, urb->transfer_dma,
+			   urb->transfer_buffer_length, urb->setup_packet,
+			   urb->setup_dma, tflags, urb->interval);
+
+	for (i = 0; i < urb->number_of_packets; ++i)
+		dwc2_hcd_urb_set_iso_desc_params(dwc2_urb, i,
+						 urb->iso_frame_desc[i].offset,
+						 urb->iso_frame_desc[i].length);
+
+	urb->hcpriv = dwc2_urb;
+	retval = dwc2_hcd_urb_enqueue(dwc2_hcd, dwc2_urb, &ep->hcpriv,
+				      mem_flags);
+	if (retval) {
+		urb->hcpriv = NULL;
+		kfree(dwc2_urb);
+	} else {
+		if (alloc_bandwidth) {
+			spin_lock_irqsave(&dwc2_hcd->lock, flags);
+			allocate_bus_bandwidth(hcd,
+					dwc2_hcd_get_ep_bandwidth(dwc2_hcd, ep),
+					urb);
+			spin_unlock_irqrestore(&dwc2_hcd->lock, flags);
+		}
+	}
+
+	return retval;
+}
+
+/*
+ * Aborts/cancels a USB transfer request. Always returns 0 to indicate success.
+ */
+static int urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
+{
+	struct dwc2_hcd *dwc2_hcd = hcd_to_dwc2_hcd(hcd);
+	unsigned long flags;
+
+	dev_dbg(dwc2_hcd->dev, "DWC OTG HCD URB Dequeue\n");
+
+#ifdef DEBUG
+	dump_urb_info(hcd, urb, "urb_dequeue");
+#endif
+
+	spin_lock_irqsave(&dwc2_hcd->lock, flags);
+
+	dwc2_hcd_urb_dequeue(dwc2_hcd, urb->hcpriv);
+
+	kfree(urb->hcpriv);
+	urb->hcpriv = NULL;
+
+	/* Higher layer software sets URB status */
+	spin_unlock(&dwc2_hcd->lock);
+	usb_hcd_giveback_urb(hcd, urb, status);
+	spin_lock(&dwc2_hcd->lock);
+
+	dev_info(dwc2_hcd->dev, "Called usb_hcd_giveback_urb()\n");
+	dev_info(dwc2_hcd->dev, "  urb->status = %d\n", urb->status);
+
+	spin_unlock_irqrestore(&dwc2_hcd->lock, flags);
+
+	return 0;
+}
+
+/*
+ * Frees resources in the DWC_otg controller related to a given endpoint. Also
+ * clears state in the HCD related to the endpoint. Any URBs for the endpoint
+ * must already be dequeued.
+ */
+static void endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+	struct dwc2_hcd *dwc2_hcd = hcd_to_dwc2_hcd(hcd);
+
+	dev_dbg(dwc2_hcd->dev,
+		"DWC OTG HCD EP DISABLE: bEndpointAddress=0x%02x, endpoint=%d, ep->hcpriv=%p\n",
+		ep->desc.bEndpointAddress,
+		dwc2_ep_addr_to_endpoint(ep->desc.bEndpointAddress),
+		ep->hcpriv);
+	dwc2_hcd_endpoint_disable(dwc2_hcd, ep, 250);
+}
+
+/*
+ * Resets endpoint specific parameter values, in current version used to reset
+ * the data toggle (as a WA). This function can be called from usb_clear_halt
+ * routine.
+ */
+static void endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
+{
+	struct dwc2_hcd *dwc2_hcd = hcd_to_dwc2_hcd(hcd);
+	int is_control = usb_endpoint_xfer_control(&ep->desc);
+	int is_out = usb_endpoint_dir_out(&ep->desc);
+	int epnum = usb_endpoint_num(&ep->desc);
+	struct usb_device *udev;
+	unsigned long flags;
+
+	dev_dbg(dwc2_hcd->dev, "DWC OTG HCD EP RESET: Endpoint Num=0x%02d\n",
+		epnum);
+
+	udev = to_usb_device(dwc2_hcd->dev);
+
+	spin_lock_irqsave(&dwc2_hcd->lock, flags);
+
+	usb_settoggle(udev, epnum, is_out, 0);
+	if (is_control)
+		usb_settoggle(udev, epnum, !is_out, 0);
+
+	dwc2_hcd_endpoint_reset(dwc2_hcd, ep);
+
+	spin_unlock_irqrestore(&dwc2_hcd->lock, flags);
+}
+
+/*
+ * Handles host mode interrupts for the DWC_otg controller. Returns IRQ_NONE if
+ * there was no interrupt to handle. Returns IRQ_HANDLED if there was a valid
+ * interrupt.
+ *
+ * This function is called by the USB core when an interrupt occurs
+ */
+static irqreturn_t hcd_irq(struct usb_hcd *hcd)
+{
+	struct dwc2_hcd *dwc2_hcd = hcd_to_dwc2_hcd(hcd);
+	int retval = dwc2_hcd_handle_intr(dwc2_hcd);
+
+	return IRQ_RETVAL(retval);
+}
+
+/*
+ * Creates Status Change bitmap for the root hub and root port. The bitmap is
+ * returned in buf. Bit 0 is the status change indicator for the root hub. Bit 1
+ * is the status change indicator for the single root port. Returns 1 if either
+ * change indicator is 1, otherwise returns 0.
+ */
+static int hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+	struct dwc2_hcd *dwc2_hcd = hcd_to_dwc2_hcd(hcd);
+
+	buf[0] = dwc2_hcd_is_status_changed(dwc2_hcd, 1) << 1;
+	return buf[0] != 0;
+}
+
+/* Handles hub class-specific requests */
+static int hub_control(struct usb_hcd *hcd, u16 typereq, u16 wvalue,
+		       u16 windex, char *buf, u16 wlength)
+{
+	int retval = hcd_hub_control(hcd_to_dwc2_hcd(hcd), typereq, wvalue,
+				     windex, buf, wlength);
+	return retval;
+}
+
+static struct hc_driver dwc2_hc_driver = {
+	.description = "dwc2_hcd",
+	.product_desc = "DWC OTG Controller",
+	.hcd_priv_size = sizeof(struct wrapper_priv_data),
+
+	.irq = hcd_irq,
+	.flags = HCD_MEMORY | HCD_USB2,
+
+	.start = hcd_start,
+	.stop = hcd_stop,
+	.urb_enqueue = urb_enqueue,
+	.urb_dequeue = urb_dequeue,
+	.endpoint_disable = endpoint_disable,
+	.endpoint_reset = endpoint_reset,
+	.get_frame_number = get_frame_number,
+
+	.hub_status_data = hub_status_data,
+	.hub_control = hub_control,
+};
+
+/*
+ * Frees secondary storage associated with the dwc2_hcd structure contained
+ * in the struct usb_hcd field
+ */
+static void hcd_free(struct dwc2_hcd *hcd)
+{
+	u32 ahbcfg;
+	u32 dctl;
+	int i;
+
+	dev_dbg(hcd->dev, "DWC OTG HCD FREE\n");
+
+	del_timers(hcd);
+
+	/* Free memory for QH/QTD lists */
+	qh_list_free(hcd, &hcd->non_periodic_sched_inactive);
+	qh_list_free(hcd, &hcd->non_periodic_sched_active);
+	qh_list_free(hcd, &hcd->periodic_sched_inactive);
+	qh_list_free(hcd, &hcd->periodic_sched_ready);
+	qh_list_free(hcd, &hcd->periodic_sched_assigned);
+	qh_list_free(hcd, &hcd->periodic_sched_queued);
+
+	/* Free memory for the host channels */
+	for (i = 0; i < MAX_EPS_CHANNELS; i++) {
+		struct dwc2_hc *hc = hcd->hc_ptr_array[i];
+
+		if (hc != NULL) {
+			dev_dbg(hcd->dev, "HCD Free channel #%i, hc=%p\n",
+				i, hc);
+			hcd->hc_ptr_array[i] = NULL;
+			kfree(hc);
+		}
+	}
+
+	if (hcd->dma_enable) {
+		if (hcd->status_buf) {
+			dma_free_coherent(hcd->dev, DWC2_HCD_STATUS_BUF_SIZE,
+					  hcd->status_buf, hcd->status_buf_dma);
+			hcd->status_buf = NULL;
+		}
+	} else {
+		kfree(hcd->status_buf);
+		hcd->status_buf = NULL;
+	}
+
+	del_timer(&hcd->conn_timer);
+
+	ahbcfg = readl(hcd->regs + GAHBCFG);
+
+	/* Disable all interrupts */
+	ahbcfg &= ~GAHBCFG_GlblIntrEn;
+	writel(ahbcfg, hcd->regs + GAHBCFG);
+	writel(0, hcd->regs + GINTMSK);
+
+	if (hcd->snpsid >= DWC2_CORE_REV_3_00a) {
+		dctl = readl(hcd->regs + DCTL);
+		dctl |= DCTL_SftDiscon;
+		writel(dctl, hcd->regs + DCTL);
+	}
+
+	if (hcd->wq_otg) {
+		if (!cancel_work_sync(&hcd->wf_otg))
+			flush_workqueue(hcd->wq_otg);
+		destroy_workqueue(hcd->wq_otg);
+	}
+
+	kfree(hcd->core_params);
+	hcd->core_params = NULL;
+	del_timer(&hcd->wkp_timer);
+}
+
+static void hcd_release(struct dwc2_hcd *hcd)
+{
+	/* Turn off all host-specific interrupts */
+	dwc2_disable_host_interrupts(hcd);
+
+	hcd_free(hcd);
+}
+
+static void set_uninitialized(int *p, int size)
+{
+	int i;
+
+	for (i = 0; i < size; i++)
+		p[i] = -1;
+}
+
+/*
+ * Initializes the HCD. This function allocates memory for and initializes the
+ * static parts of the usb_hcd and dwc2_hcd structures. It also registers the
+ * USB bus with the core and calls the hc_driver->start() function. It returns
+ * a negative error on failure.
+ */
+int dwc2_hcd_init(struct device *dev, struct dwc2_device *otg_dev, int irq)
+{
+	struct usb_hcd *usb_hcd;
+	struct dwc2_hcd *hcd;
+	struct dwc2_hc *channel;
+	u32 gusbcfg;
+	int i, num_channels;
+	int retval = -ENOMEM;
+
+	dev_dbg(dev, "DWC OTG HCD INIT\n");
+
+	/*
+	 * Allocate memory for the base HCD plus the DWC OTG HCD.
+	 * Initialize the base HCD.
+	 */
+	usb_hcd = usb_create_hcd(&dwc2_hc_driver, dev, dev_name(dev));
+	if (!usb_hcd)
+		goto error1;
+
+	usb_hcd->has_tt = 1;
+
+	/* Initialize the DWC OTG HCD */
+	hcd = kzalloc(sizeof(*hcd), GFP_KERNEL);
+	if (!hcd)
+		goto error2;
+
+	((struct wrapper_priv_data *) &usb_hcd->hcd_priv)->dwc2_hcd = hcd;
+	hcd->dev = dev;
+	hcd->otg_dev = otg_dev;
+	hcd->regs = otg_dev->base;
+	dwc2_hcd_set_priv_data(hcd, usb_hcd);
+
+	/*
+	 * Store the contents of the hardware configuration registers here for
+	 * easy access later
+	 */
+	hcd->hwcfg1 = readl(hcd->regs + GHWCFG1);
+	hcd->hwcfg2 = readl(hcd->regs + GHWCFG2);
+	hcd->hwcfg3 = readl(hcd->regs + GHWCFG3);
+	hcd->hwcfg4 = readl(hcd->regs + GHWCFG4);
+
+	dev_dbg(hcd->dev, "hwcfg1=%08x\n", hcd->hwcfg1);
+	dev_dbg(hcd->dev, "hwcfg2=%08x\n", hcd->hwcfg2);
+	dev_dbg(hcd->dev, "hwcfg3=%08x\n", hcd->hwcfg3);
+	dev_dbg(hcd->dev, "hwcfg4=%08x\n", hcd->hwcfg4);
+
+	/* Force host mode to get HPTXFSIZ exact power on value */
+	gusbcfg = readl(hcd->regs + GUSBCFG);
+	gusbcfg |= GUSBCFG_ForceHostMode;
+	writel(gusbcfg, hcd->regs + GUSBCFG);
+	msleep(100);
+
+	hcd->hptxfsiz = readl(hcd->regs + HPTXFSIZ);
+	dev_dbg(hcd->dev, "hptxfsiz=%08x\n", hcd->hptxfsiz);
+	gusbcfg = readl(hcd->regs + GUSBCFG);
+	gusbcfg &= ~GUSBCFG_ForceHostMode;
+	writel(gusbcfg, hcd->regs + GUSBCFG);
+	msleep(100);
+
+	hcd->hcfg = readl(hcd->regs + HCFG);
+	dev_dbg(hcd->dev, "hcfg=%08x\n", hcd->hcfg);
+	dev_dbg(hcd->dev, "op_mode=%0x\n",
+		hcd->hwcfg2 >> GHWCFG2_OP_MODE_SHIFT &
+		GHWCFG2_OP_MODE_MASK >> GHWCFG2_OP_MODE_SHIFT);
+	dev_dbg(hcd->dev, "arch=%0x\n",
+		hcd->hwcfg2 >> GHWCFG2_ARCHITECTURE_SHIFT &
+		GHWCFG2_ARCHITECTURE_MASK >> GHWCFG2_ARCHITECTURE_SHIFT);
+	dev_dbg(hcd->dev, "num_dev_ep=%d\n",
+		hcd->hwcfg2 >> GHWCFG2_NUM_DEV_EP_SHIFT &
+		GHWCFG2_NUM_DEV_EP_MASK >> GHWCFG2_NUM_DEV_EP_SHIFT);
+	dev_dbg(hcd->dev, "max_host_chan=%d\n",
+		hcd->hwcfg2 >> GHWCFG2_NUM_HOST_CHAN_SHIFT &
+		GHWCFG2_NUM_HOST_CHAN_MASK >> GHWCFG2_NUM_HOST_CHAN_SHIFT);
+	dev_dbg(hcd->dev, "nonperio_tx_q_depth=0x%0x\n",
+		hcd->hwcfg2 >> GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT &
+		GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK >>
+				GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT);
+	dev_dbg(hcd->dev, "host_perio_tx_q_depth=0x%0x\n",
+		hcd->hwcfg2 >> GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT &
+		GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK >>
+				GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT);
+	dev_dbg(hcd->dev, "dev_token_q_depth=0x%0x\n",
+		hcd->hwcfg2 >> GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT &
+		GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK >>
+				GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT);
+
+	hcd->core_params = kzalloc(sizeof(*hcd->core_params), GFP_KERNEL);
+	if (!hcd->core_params)
+		goto error3;
+
+	set_uninitialized((int *)hcd->core_params,
+			  sizeof(*hcd->core_params) / sizeof(int));
+
+	/* Validate parameter values */
+	if (dwc2_set_parameters(hcd)) {
+		retval = -EINVAL;
+		goto error4;
+	}
+
+	/* Initialize the DWC_otg core */
+	dwc2_core_init(hcd);
+
+	spin_lock_init(&hcd->lock);
+
+	/*
+	 * Disable the global interrupt until all the interrupt handlers are
+	 * installed
+	 */
+	dwc2_disable_global_interrupts(hcd);
+
+	/* Create new workqueue and init work */
+	hcd->wq_otg = create_singlethread_workqueue("dwc_otg");
+	if (!hcd->wq_otg) {
+		dev_err(hcd->dev, "Failed to create workqueue\n");
+		goto error4;
+	}
+	INIT_WORK(&hcd->wf_otg, w_conn_id_status_change);
+
+	hcd->snpsid = readl(hcd->regs + GSNPSID);
+	dev_info(hcd->dev, "Core Release: %1x.%1x%1x%1x\n",
+		 hcd->snpsid >> 12 & 0xf, hcd->snpsid >> 8 & 0xf,
+		 hcd->snpsid >> 4 & 0xf, hcd->snpsid & 0xf);
+
+	setup_timer(&hcd->wkp_timer, w_wakeup_detected, (unsigned long)hcd);
+
+	/* Initialize the non-periodic schedule */
+	INIT_LIST_HEAD(&hcd->non_periodic_sched_inactive);
+	INIT_LIST_HEAD(&hcd->non_periodic_sched_active);
+
+	/* Initialize the periodic schedule */
+	INIT_LIST_HEAD(&hcd->periodic_sched_inactive);
+	INIT_LIST_HEAD(&hcd->periodic_sched_ready);
+	INIT_LIST_HEAD(&hcd->periodic_sched_assigned);
+	INIT_LIST_HEAD(&hcd->periodic_sched_queued);
+
+	/*
+	 * Create a host channel descriptor for each host channel implemented
+	 * in the controller. Initialize the channel descriptor array.
+	 */
+	INIT_LIST_HEAD(&hcd->free_hc_list);
+	num_channels = hcd->core_params->host_channels;
+	memset(&hcd->hc_ptr_array[0], 0, sizeof(hcd->hc_ptr_array));
+
+	for (i = 0; i < num_channels; i++) {
+		channel = kzalloc(sizeof(*channel), GFP_KERNEL);
+		if (channel == NULL) {
+			dev_err(hcd->dev,
+				"%s: host channel allocation failed\n",
+				__func__);
+			goto error5;
+		}
+		channel->hc_num = i;
+		hcd->hc_ptr_array[i] = channel;
+	}
+
+	/* Initialize the Connection timeout timer */
+	setup_timer(&hcd->conn_timer, hcd_connect_timeout, (unsigned long)hcd);
+
+	/* Initialize hcd start work */
+	INIT_DELAYED_WORK(&hcd->start_work, hcd_start_func);
+
+	/* Initialize port reset work */
+	INIT_DELAYED_WORK(&hcd->reset_work, hcd_reset_func);
+
+	/*
+	 * Allocate space for storing data on status transactions. Normally no
+	 * data is sent, but this space acts as a bit bucket. This must be
+	 * done after usb_add_hcd since that function allocates the DMA buffer
+	 * pool.
+	 */
+	if (hcd->dma_enable)
+		hcd->status_buf = dma_alloc_coherent(hcd->dev,
+					DWC2_HCD_STATUS_BUF_SIZE,
+					&hcd->status_buf_dma, GFP_KERNEL);
+	else
+		hcd->status_buf = kzalloc(DWC2_HCD_STATUS_BUF_SIZE,
+					  GFP_KERNEL);
+
+	if (!hcd->status_buf) {
+		dev_err(hcd->dev, "%s: status_buf allocation failed\n",
+			__func__);
+		goto error5;
+	}
+
+	hcd->otg_port = 1;
+	hcd->frame_list = NULL;
+	hcd->frame_list_dma = 0;
+	hcd->periodic_qh_count = 0;
+
+	/* Initiate lx_state to L3 disconnected state */
+	hcd->lx_state = DWC2_L3;
+
+	usb_hcd->self.otg_port = dwc2_hcd_otg_port(hcd);
+	/*usb_hcd->self.otg_version = dwc2_get_otg_version(otg_dev->core_if);*/
+
+	/* Don't support SG list at this point */
+	usb_hcd->self.sg_tablesize = 0;
+
+	/*
+	 * Finish generic HCD initialization and start the HCD. This function
+	 * allocates the DMA buffer pool, registers the USB bus, requests the
+	 * IRQ line, and calls hcd_start method.
+	 */
+	retval = usb_add_hcd(usb_hcd, irq, IRQF_SHARED | IRQF_DISABLED);
+	if (retval < 0)
+		goto error5;
+
+	otg_dev->hcd = hcd;
+	return 0;
+
+error5:
+	hcd_release(hcd);
+error4:
+	kfree(hcd->core_params);
+error3:
+	kfree(hcd);
+error2:
+	usb_put_hcd(usb_hcd);
+error1:
+	dev_err(dev, "%s() FAILED, returning %d\n", __func__, retval);
+	return retval;
+}
+
+/*
+ * Removes the HCD.
+ * Frees memory and resources associated with the HCD and deregisters the bus.
+ */
+void dwc2_hcd_remove(struct device *dev, struct dwc2_device *otg_dev)
+{
+	struct dwc2_hcd *dwc2_hcd;
+	struct usb_hcd *hcd;
+
+	dev_dbg(dev, "DWC OTG HCD REMOVE\n");
+
+	if (!otg_dev) {
+		dev_dbg(dev, "%s: otg_dev NULL!\n", __func__);
+		return;
+	}
+
+	dwc2_hcd = otg_dev->hcd;
+	dev_dbg(dev, "otg_dev->hcd = %p\n", dwc2_hcd);
+
+	if (!dwc2_hcd) {
+		dev_dbg(dev, "%s: otg_dev->hcd NULL!\n", __func__);
+		return;
+	}
+
+	hcd = dwc2_hcd_to_hcd(dwc2_hcd);
+	dev_dbg(dev, "otg_dev->hcd->hcd = %p\n", hcd);
+
+	if (!hcd) {
+		dev_dbg(dev, "%s: dwc2_hcd_to_hcd(dwc2_hcd) NULL!\n", __func__);
+		return;
+	}
+
+	usb_remove_hcd(hcd);
+	dwc2_hcd_set_priv_data(dwc2_hcd, NULL);
+	hcd_release(dwc2_hcd);
+	kfree(dwc2_hcd->core_params);
+	kfree(dwc2_hcd);
+	usb_put_hcd(hcd);
+}
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
new file mode 100644
index 0000000..2df5f71
--- /dev/null
+++ b/drivers/usb/dwc2/hcd.h
@@ -0,0 +1,1134 @@
+/*
+ * hcd.h - DesignWare HS OTG Controller host-mode declarations
+ *
+ * Copyright (C) 2004-2012 Synopsys, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __DWC_HCD_H__
+#define __DWC_HCD_H__
+
+/*
+ * This file contains the structures, constants, and interfaces for the
+ * Host Contoller Driver (HCD)
+ *
+ * The Host Controller Driver (HCD) is responsible for translating requests
+ * from the USB Driver into the appropriate actions on the DWC_otg controller.
+ * It isolates the USBD from the specifics of the controller by providing an
+ * API to the USBD.
+ */
+
+struct dwc2_hcd_pipe_info {
+	u8 dev_addr;
+	u8 ep_num;
+	u8 pipe_type;
+	u8 pipe_dir;
+	u16 mps;
+};
+
+struct dwc2_hcd_iso_packet_desc {
+	u32 offset;
+	u32 length;
+	u32 actual_length;
+	u32 status;
+};
+
+struct dwc2_qtd;
+
+struct dwc2_hcd_urb {
+	void *priv;
+	struct dwc2_qtd *qtd;
+	void *buf;
+	dma_addr_t dma;
+	void *setup_packet;
+	dma_addr_t setup_dma;
+	u32 length;
+	u32 actual_length;
+	u32 status;
+	u32 error_count;
+	u32 packet_count;
+	u32 flags;
+	u16 interval;
+	struct dwc2_hcd_pipe_info pipe_info;
+	struct dwc2_hcd_iso_packet_desc iso_descs[0];
+};
+
+/* Phases for control transfers */
+enum dwc2_control_phase {
+	DWC2_CONTROL_SETUP,
+	DWC2_CONTROL_DATA,
+	DWC2_CONTROL_STATUS
+};
+
+/* Transaction types */
+enum dwc2_transaction_type {
+	DWC2_TRANSACTION_NONE,
+	DWC2_TRANSACTION_PERIODIC,
+	DWC2_TRANSACTION_NON_PERIODIC,
+	DWC2_TRANSACTION_ALL
+};
+
+struct dwc2_qh;
+
+/*
+ * A Queue Transfer Descriptor (QTD) holds the state of a bulk, control,
+ * interrupt, or isochronous transfer. A single QTD is created for each URB
+ * (of one of these types) submitted to the HCD. The transfer associated with
+ * a QTD may require one or multiple transactions.
+ *
+ * A QTD is linked to a Queue Head, which is entered in either the
+ * non-periodic or periodic schedule for execution. When a QTD is chosen for
+ * execution, some or all of its transactions may be executed. After
+ * execution, the state of the QTD is updated. The QTD may be retired if all
+ * its transactions are complete or if an error occurred. Otherwise, it
+ * remains in the schedule so more transactions can be executed later.
+ */
+struct dwc2_qtd {
+	/* Current phase for control transfers (Setup, Data, or Status) */
+	enum dwc2_control_phase control_phase;
+
+	/* Indicates if this QTD is currently processed by HW */
+	u8 in_process;
+
+	/*
+	 * Determines the PID of the next data packet for the data phase of
+	 * control transfers. Ignored for other transfer types.
+	 * One of the following values:
+	 *	- DWC2_HC_PID_DATA0
+	 *	- DWC2_HC_PID_DATA1
+	 */
+	u8 data_toggle;
+
+	/*
+	 * Keep track of the current split type for FS/LS endpoints on a HS Hub
+	 */
+	u8 complete_split;
+
+	/* Position of the ISOC split in full/low speed */
+	u8 isoc_split_pos;
+
+	/*
+	 * Index of the next frame descriptor for an isochronous transfer. A
+	 * frame descriptor describes the buffer position and length of the
+	 * data to be transferred in the next scheduled (micro)frame of an
+	 * isochronous transfer. It also holds status for that transaction.
+	 * The frame index starts at 0.
+	 */
+	u16 isoc_frame_index;
+
+	/* Position of the ISOC split in the buffer for the current frame */
+	u16 isoc_split_offset;
+
+	/* How many bytes transferred during SSPLIT OUT */
+	u32 ssplit_out_xfer_count;
+
+	/*
+	 * Holds the number of bus errors that have occurred for a transaction
+	 * within this transfer
+	 */
+	u8 error_count;
+
+	/* Number of DMA descriptors for this QTD */
+	u8 n_desc;
+
+	/*
+	 * Last activated frame (packet) index, used in Descriptor DMA mode only
+	 */
+	u16 isoc_frame_index_last;
+
+	/* URB for this transfer */
+	struct dwc2_hcd_urb *urb;
+
+	struct dwc2_qh *qh;
+
+	/* This list of QTDs */
+	struct list_head qtd_list_entry;
+};
+
+/*
+ * A Queue Head (QH) holds the static characteristics of an endpoint and
+ * maintains a list of transfers (QTDs) for that endpoint. A QH structure may
+ * be entered in either the non-periodic or periodic schedule.
+ */
+struct dwc2_qh {
+	/*
+	 * Endpoint type. One of the following values:
+	 *	- USB_ENDPOINT_XFER_CONTROL
+	 *	- USB_ENDPOINT_XFER_BULK
+	 *	- USB_ENDPOINT_XFER_INT
+	 *	- USB_ENDPOINT_XFER_ISOC
+	 */
+	u8 ep_type;
+	u8 ep_is_in;
+
+	/* wMaxPacketSize Field of Endpoint Descriptor */
+	u16 maxp;
+
+	/*
+	 * Device speed. One of the following values:
+	 *	- DWC2_EP_SPEED_LOW
+	 *	- DWC2_EP_SPEED_FULL
+	 *	- DWC2_EP_SPEED_HIGH
+	 */
+	u8 dev_speed;
+
+	/*
+	 * Determines the PID of the next data packet for non-control
+	 * transfers. Ignored for control transfers.
+	 * One of the following values:
+	 *	- DWC2_HC_PID_DATA0
+	 *	- DWC2_HC_PID_DATA1
+	 */
+	u8 data_toggle;
+
+	/* Ping state if 1 */
+	u8 ping_state;
+
+	/* Full/low speed endpoint on high-speed hub requires split */
+	u8 do_split;
+
+	/* List of QTDs for this QH */
+	struct list_head qtd_list;
+
+	/* Host channel currently processing transfers for this QH */
+	struct dwc2_hc *channel;
+
+	/* Bandwidth in microseconds per (micro)frame */
+	u16 usecs;
+
+	/* Interval between transfers in (micro)frames */
+	u16 interval;
+
+	/*
+	 * (micro)frame to initialize a periodic transfer. The transfer
+	 * executes in the following (micro)frame.
+	 */
+	u16 sched_frame;
+
+	/* (micro)frame at which last start split was initialized */
+	u16 start_split_frame;
+
+	/*
+	 * Used instead of original buffer if it (physical address) is not
+	 * dword-aligned
+	 */
+	u8 *dw_align_buf;
+	dma_addr_t dw_align_buf_dma;
+
+	/* Entry for QH in either the periodic or non-periodic schedule */
+	struct list_head qh_list_entry;
+
+	/* Descriptor List */
+	struct dwc2_host_dma_desc *desc_list;
+
+	/* Descriptor List physical address */
+	dma_addr_t desc_list_dma;
+
+	/*
+	 * Xfer Bytes array.
+	 * Each element corresponds to a descriptor and indicates
+	 * original XferSize size value for the descriptor.
+	 */
+	u32 *n_bytes;
+
+	/* Actual number of transfer descriptors in a list */
+	u16 ntd;
+
+	/* First activated isochronous transfer descriptor index */
+	u8 td_first;
+
+	/* Last activated isochronous transfer descriptor index */
+	u8 td_last;
+};
+
+struct dwc2_hcd;
+
+#ifdef DEBUG
+struct hc_xfer_info {
+	struct dwc2_hcd *hcd;
+	struct dwc2_hc *hc;
+};
+#endif
+
+/*
+ * This structure is a wrapper that encapsulates the driver components used
+ * to manage a single DWC_otg controller
+ */
+struct dwc2_device {
+	/* Base address returned from ioremap() */
+	void *base;
+
+	/* Start address of a PCI region */
+	resource_size_t rsrc_start;
+
+	/* Length address of a PCI region */
+	resource_size_t rsrc_len;
+
+	/* Pointer to the HCD structure  */
+	struct dwc2_hcd *hcd;
+
+	/* Flag to indicate whether the common IRQ handler is installed  */
+	u8 common_irq_installed;
+};
+
+/*
+ * This structure holds the state of the HCD, including the non-periodic and
+ * periodic schedules
+ */
+struct dwc2_hcd {
+	/* The DWC otg device pointer */
+	struct dwc2_device *otg_dev;
+
+	struct device *dev;
+
+	void __iomem *regs;
+
+	/* Parameters that define how the core should be configured */
+	struct dwc2_core_params *core_params;
+
+	/* Hardware Configuration -- stored here for convenience */
+	u32 hwcfg1;
+	u32 hwcfg2;
+	u32 hwcfg3;
+	u32 hwcfg4;
+	u32 hptxfsiz;
+
+	/* Host Configuration -- stored here for convenience */
+	u32 hcfg;
+
+	/* Value from SNPSID register */
+	u32 snpsid;
+
+	/* Total RAM for FIFOs (Bytes) */
+	u16 total_fifo_size;
+
+	/* Size of Rx FIFO (Bytes) */
+	u16 rx_fifo_size;
+
+	/* Size of Non-periodic Tx FIFO (Bytes) */
+	u16 nperio_tx_fifo_size;
+
+	/*
+	 * The operational State, during transations (a_host>>a_peripheral and
+	 * b_device=>b_host) this may not match the core but allows the software
+	 * to determine transitions
+	 */
+	u8 op_state;
+
+	/* A-Device is a_host */
+#define A_HOST		1
+	/* A-Device is a_suspend */
+#define A_SUSPEND	2
+	/* A-Device is a_peripheral */
+#define A_PERIPHERAL	3
+	/* B-Device is operating as a Peripheral */
+#define B_PERIPHERAL	4
+	/* B-Device is operating as a Host */
+#define B_HOST		5
+
+	/* OTG revision supported */
+	u32 otg_ver;
+
+	/*
+	 * Set to 1 if the core PHY interface bits in USBCFG have been
+	 * initialized
+	 */
+	unsigned int phy_init_done:1;
+
+	/*
+	 * Set to 1 if multiple packets of a high-bandwidth transfer are in
+	 * process of being queued
+	 */
+	unsigned int queuing_high_bandwidth:1;
+
+	unsigned int dma_enable:1;
+	unsigned int dma_desc_enable:1;
+	unsigned int en_multiple_tx_fifo:1;
+	unsigned int srp_success:1;
+
+	/* Workqueue object used for handling several interrupts */
+	struct workqueue_struct *wq_otg;
+
+	/* Work object for handling connection id status change interrupt */
+	struct work_struct wf_otg;
+
+	/* Timer object for handling "Wakeup Detected" interrupt */
+	struct timer_list wkp_timer;
+
+	/* Lx state of connected device */
+	enum dwc2_lx_state lx_state;
+
+	/* Internal DWC HCD Flags */
+	union dwc2_hcd_internal_flags {
+		u32 d32;
+		struct {
+			unsigned port_connect_status_change:1;
+			unsigned port_connect_status:1;
+			unsigned port_reset_change:1;
+			unsigned port_enable_change:1;
+			unsigned port_suspend_change:1;
+			unsigned port_over_current_change:1;
+			unsigned port_l1_change:1;
+			unsigned reserved:26;
+		} b;
+	} flags;
+
+	/*
+	 * Inactive items in the non-periodic schedule. This is a list of
+	 * Queue Heads. Transfers associated with these Queue Heads are not
+	 * currently assigned to a host channel.
+	 */
+	struct list_head non_periodic_sched_inactive;
+
+	/*
+	 * Active items in the non-periodic schedule. This is a list of
+	 * Queue Heads. Transfers associated with these Queue Heads are
+	 * currently assigned to a host channel.
+	 */
+	struct list_head non_periodic_sched_active;
+
+	/*
+	 * Pointer to the next Queue Head to process in the active
+	 * non-periodic schedule
+	 */
+	struct list_head *non_periodic_qh_ptr;
+
+	/*
+	 * Inactive items in the periodic schedule. This is a list of QHs for
+	 * periodic transfers that are _not_ scheduled for the next frame.
+	 * Each QH in the list has an interval counter that determines when it
+	 * needs to be scheduled for execution. This scheduling mechanism
+	 * allows only a simple calculation for periodic bandwidth used (i.e.
+	 * must assume that all periodic transfers may need to execute in the
+	 * same frame). However, it greatly simplifies scheduling and should
+	 * be sufficient for the vast majority of OTG hosts, which need to
+	 * connect to a small number of peripherals at one time.
+	 *
+	 * Items move from this list to periodic_sched_ready when the QH
+	 * interval counter is 0 at SOF.
+	 */
+	struct list_head periodic_sched_inactive;
+
+	/*
+	 * List of periodic QHs that are ready for execution in the next
+	 * frame, but have not yet been assigned to host channels.
+	 *
+	 * Items move from this list to periodic_sched_assigned as host
+	 * channels become available during the current frame.
+	 */
+	struct list_head periodic_sched_ready;
+
+	/*
+	 * List of periodic QHs to be executed in the next frame that are
+	 * assigned to host channels.
+	 *
+	 * Items move from this list to periodic_sched_queued as the
+	 * transactions for the QH are queued to the DWC_otg controller.
+	 */
+	struct list_head periodic_sched_assigned;
+
+	/*
+	 * List of periodic QHs that have been queued for execution.
+	 *
+	 * Items move from this list to either periodic_sched_inactive or
+	 * periodic_sched_ready when the channel associated with the transfer
+	 * is released. If the interval for the QH is 1, the item moves to
+	 * periodic_sched_ready because it must be rescheduled for the next
+	 * frame. Otherwise, the item moves to periodic_sched_inactive.
+	 */
+	struct list_head periodic_sched_queued;
+
+	/*
+	 * Total bandwidth claimed so far for periodic transfers. This value
+	 * is in microseconds per (micro)frame. The assumption is that all
+	 * periodic transfers may occur in the same (micro)frame.
+	 */
+	u16 periodic_usecs;
+
+	/*
+	 * Frame number read from the core at SOF. The value ranges from 0 to
+	 * HFNUM_MAX_FRNUM.
+	 */
+	u16 frame_number;
+
+	/*
+	 * Count of periodic QHs, if using several eps. For SOF enable/disable.
+	 */
+	u16 periodic_qh_count;
+
+	/*
+	 * Free host channels in the controller. This is a list of
+	 * struct dwc2_hc items.
+	 */
+	struct list_head free_hc_list;
+
+	/*
+	 * Number of host channels assigned to periodic transfers. Currently
+	 * assuming that there is a dedicated host channel for each periodic
+	 * transaction and at least one host channel available for
+	 * non-periodic transactions.
+	 */
+	int periodic_channels;
+
+	/* Number of host channels assigned to non-periodic transfers */
+	int non_periodic_channels;
+
+	/*
+	 * Array of pointers to the host channel descriptors. Allows accessing
+	 * a host channel descriptor given the host channel number. This is
+	 * useful in interrupt handlers.
+	 */
+	struct dwc2_hc *hc_ptr_array[MAX_EPS_CHANNELS];
+
+	/*
+	 * Buffer to use for any data received during the status phase of a
+	 * control transfer. Normally no data is transferred during the status
+	 * phase. This buffer is used as a bit bucket.
+	 */
+	u8 *status_buf;
+
+	/* DMA address for status_buf */
+	dma_addr_t status_buf_dma;
+#define DWC2_HCD_STATUS_BUF_SIZE 64
+
+	/*
+	 * Connection timer. An OTG host must display a message if the device
+	 * does not connect. Started when the VBus power is turned on via
+	 * sysfs attribute "buspower".
+	 */
+	struct timer_list conn_timer;
+
+	/* Delayed work for handling host A-cable connection */
+	struct delayed_work start_work;
+
+	/* Delayed work to do a port reset */
+	struct delayed_work reset_work;
+
+	spinlock_t lock;
+
+	/* Private data that could be used by OS wrapper */
+	void *priv;
+
+	u8 otg_port;
+
+	/* Frame List */
+	u32 *frame_list;
+
+	/* Frame List DMA address */
+	dma_addr_t frame_list_dma;
+
+#ifdef DEBUG
+	u32 frrem_samples;
+	u64 frrem_accum;
+
+	u32 hfnum_7_samples_a;
+	u64 hfnum_7_frrem_accum_a;
+	u32 hfnum_0_samples_a;
+	u64 hfnum_0_frrem_accum_a;
+	u32 hfnum_other_samples_a;
+	u64 hfnum_other_frrem_accum_a;
+
+	u32 hfnum_7_samples_b;
+	u64 hfnum_7_frrem_accum_b;
+	u32 hfnum_0_samples_b;
+	u64 hfnum_0_frrem_accum_b;
+	u32 hfnum_other_samples_b;
+	u64 hfnum_other_frrem_accum_b;
+#endif
+};
+
+/* Macro used to clear one channel interrupt */
+#define clear_hc_int(_hcd_, _chnum_, _intr_)	\
+	writel((_intr_), (_hcd_)->regs + HCINT(_chnum_))
+
+/*
+ * Macro used to disable one channel interrupt. Channel interrupts are
+ * disabled when the channel is halted or released by the interrupt handler.
+ * There is no need to handle further interrupts of that type until the
+ * channel is re-assigned. In fact, subsequent handling may cause crashes
+ * because the channel structures are cleaned up when the channel is released.
+ */
+#define disable_hc_int(_hcd_, _chnum_, _intr_) \
+do { \
+	u32 _hcintmsk_; \
+	_hcintmsk_ = readl((_hcd_)->regs + HCINTMSK(_chnum_)); \
+	_hcintmsk_ &= ~(_intr_); \
+	writel(_hcintmsk_, (_hcd_)->regs + HCINTMSK(_chnum_)); \
+} while (0)
+
+/*
+ * Returns the mode of operation, host or device
+ */
+static inline int dwc2_is_host_mode(struct dwc2_hcd *hcd)
+{
+	return (readl(hcd->regs + GINTSTS) & GINTSTS_CurMode_Host) != 0;
+}
+static inline int dwc2_is_device_mode(struct dwc2_hcd *hcd)
+{
+	return (readl(hcd->regs + GINTSTS) & GINTSTS_CurMode_Host) == 0;
+}
+
+/*
+ * Reads HPRT0 in preparation to modify. It keeps the WC bits 0 so that if they
+ * are read as 1, they won't clear when written back.
+ */
+static inline u32 dwc2_read_hprt0(struct dwc2_hcd *hcd)
+{
+	u32 hprt0;
+
+	hprt0 = readl(hcd->regs + HPRT0);
+	hprt0 &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG | HPRT0_OVRCURRCHG);
+
+	return hprt0;
+}
+
+static inline u8 dwc2_hcd_get_ep_num(struct dwc2_hcd_pipe_info *pipe)
+{
+	return pipe->ep_num;
+}
+
+static inline u8 dwc2_hcd_get_pipe_type(struct dwc2_hcd_pipe_info *pipe)
+{
+	return pipe->pipe_type;
+}
+
+static inline u16 dwc2_hcd_get_mps(struct dwc2_hcd_pipe_info *pipe)
+{
+	return pipe->mps;
+}
+
+static inline u8 dwc2_hcd_get_dev_addr(struct dwc2_hcd_pipe_info *pipe)
+{
+	return pipe->dev_addr;
+}
+
+static inline u8 dwc2_hcd_is_pipe_isoc(struct dwc2_hcd_pipe_info *pipe)
+{
+	return pipe->pipe_type == USB_ENDPOINT_XFER_ISOC;
+}
+
+static inline u8 dwc2_hcd_is_pipe_int(struct dwc2_hcd_pipe_info *pipe)
+{
+	return pipe->pipe_type == USB_ENDPOINT_XFER_INT;
+}
+
+static inline u8 dwc2_hcd_is_pipe_bulk(struct dwc2_hcd_pipe_info *pipe)
+{
+	return pipe->pipe_type == USB_ENDPOINT_XFER_BULK;
+}
+
+static inline u8 dwc2_hcd_is_pipe_control(struct dwc2_hcd_pipe_info *pipe)
+{
+	return pipe->pipe_type == USB_ENDPOINT_XFER_CONTROL;
+}
+
+static inline u8 dwc2_hcd_is_pipe_in(struct dwc2_hcd_pipe_info *pipe)
+{
+	return pipe->pipe_dir == USB_DIR_IN;
+}
+
+static inline u8 dwc2_hcd_is_pipe_out(struct dwc2_hcd_pipe_info *pipe)
+{
+	return !dwc2_hcd_is_pipe_in(pipe);
+}
+
+extern int dwc2_hcd_init(struct device *dev, struct dwc2_device *otg_dev,
+			 int irq);
+extern void dwc2_hcd_remove(struct device *dev, struct dwc2_device *otg_dev);
+extern int dwc2_set_parameters(struct dwc2_hcd *hcd);
+
+/* Transaction Execution Functions */
+extern enum dwc2_transaction_type dwc2_hcd_select_transactions(
+						struct dwc2_hcd *hcd);
+extern void dwc2_hcd_queue_transactions(struct dwc2_hcd *hcd,
+					enum dwc2_transaction_type tr_type);
+
+/* Interrupt Handler Functions */
+extern int dwc2_hcd_handle_intr(struct dwc2_hcd *dwc2_hcd);
+
+/* Schedule Queue Functions */
+/* Implemented in hcd_queue.c */
+extern void dwc2_hcd_qh_free(struct dwc2_hcd *hcd, struct dwc2_qh *qh);
+extern int dwc2_hcd_qh_add(struct dwc2_hcd *hcd, struct dwc2_qh *qh);
+extern void dwc2_hcd_qh_remove(struct dwc2_hcd *hcd, struct dwc2_qh *qh);
+extern void dwc2_hcd_qh_deactivate(struct dwc2_hcd *hcd, struct dwc2_qh *qh,
+				   int sched_csplit);
+
+/* Remove and free a QH */
+static inline void dwc2_hcd_qh_remove_and_free(struct dwc2_hcd *hcd,
+					       struct dwc2_qh *qh)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&hcd->lock, flags);
+	dwc2_hcd_qh_remove(hcd, qh);
+	spin_unlock_irqrestore(&hcd->lock, flags);
+	dwc2_hcd_qh_free(hcd, qh);
+}
+
+extern void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb);
+extern int dwc2_hcd_qtd_add(struct dwc2_qtd *qtd, struct dwc2_hcd *dwc2_hcd,
+			    struct dwc2_qh **qh, gfp_t mem_flags);
+
+/* Removes and frees a QTD */
+static inline void dwc2_hcd_qtd_remove_and_free(struct dwc2_hcd *hcd,
+						struct dwc2_qtd *qtd,
+						struct dwc2_qh *qh)
+{
+	list_del_init(&qtd->qtd_list_entry);
+	kfree(qtd);
+}
+
+/* Descriptor DMA support functions */
+extern void dwc2_hcd_start_xfer_ddma(struct dwc2_hcd *hcd, struct dwc2_qh *qh);
+extern void dwc2_hcd_complete_xfer_ddma(struct dwc2_hcd *hcd,
+					struct dwc2_hc *hc, int chnum,
+					enum dwc2_halt_status halt_status);
+
+extern int dwc2_hcd_qh_init_ddma(struct dwc2_hcd *hcd, struct dwc2_qh *qh,
+				 gfp_t mem_flags);
+extern void dwc2_hcd_qh_free_ddma(struct dwc2_hcd *hcd, struct dwc2_qh *qh);
+
+/* Check if QH is non-periodic */
+#define dwc2_qh_is_non_per(_qh_ptr_) \
+	((_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_BULK || \
+	 (_qh_ptr_)->ep_type == USB_ENDPOINT_XFER_CONTROL)
+
+/* High bandwidth multiplier as encoded in highspeed endpoint descriptors */
+#define dwc2_hb_mult(wmaxpacketsize) (1 + (((wmaxpacketsize) >> 11) & 0x03))
+
+/* Packet size for any kind of endpoint descriptor */
+#define dwc2_max_packet(wmaxpacketsize) ((wmaxpacketsize) & 0x07ff)
+
+/*
+ * Returns true if frame1 is less than or equal to frame2. The comparison is
+ * done modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the
+ * frame number when the max frame number is reached.
+ */
+static inline int dwc2_frame_num_le(u16 frame1, u16 frame2)
+{
+	return ((frame2 - frame1) & HFNUM_MAX_FRNUM) <= (HFNUM_MAX_FRNUM >> 1);
+}
+
+/*
+ * Returns true if frame1 is greater than frame2. The comparison is done
+ * modulo HFNUM_MAX_FRNUM. This accounts for the rollover of the frame
+ * number when the max frame number is reached.
+ */
+static inline int dwc2_frame_num_gt(u16 frame1, u16 frame2)
+{
+	return (frame1 != frame2) &&
+	       ((frame1 - frame2) & HFNUM_MAX_FRNUM) < (HFNUM_MAX_FRNUM >> 1);
+}
+
+/*
+ * Increments frame by the amount specified by inc. The addition is done
+ * modulo HFNUM_MAX_FRNUM. Returns the incremented value.
+ */
+static inline u16 dwc2_frame_num_inc(u16 frame, u16 inc)
+{
+	return (frame + inc) & HFNUM_MAX_FRNUM;
+}
+
+static inline u16 dwc2_full_frame_num(u16 frame)
+{
+	return (frame & HFNUM_MAX_FRNUM) >> 3;
+}
+
+static inline u16 dwc2_micro_frame_num(u16 frame)
+{
+	return frame & 0x7;
+}
+
+/*
+ * Returns the Core Interrupt Status register contents, ANDed with the Core
+ * Interrupt Mask register contents
+ */
+static inline u32 dwc2_read_core_intr(struct dwc2_hcd *hcd)
+{
+	return readl(hcd->regs + GINTSTS) & readl(hcd->regs + GINTMSK);
+}
+
+/**
+ * dwc2_pcd_start() - Starts the PCD
+ *
+ * @hcd: Programming view of DWC_otg controller
+ */
+static inline void dwc2_pcd_start(struct dwc2_hcd *hcd)
+{
+}
+
+/**
+ * dwc2_pcd_stop() - Stops the PCD
+ *
+ * @hcd: Programming view of DWC_otg controller
+ */
+static inline void dwc2_pcd_stop(struct dwc2_hcd *hcd)
+{
+}
+
+/**
+ * dwc2_pcd_suspend() - Suspends the PCD
+ *
+ * @hcd: Programming view of DWC_otg controller
+ */
+static inline void dwc2_pcd_suspend(struct dwc2_hcd *hcd)
+{
+}
+
+/**
+ * dwc2_pcd_resume() - Resumes the PCD
+ *
+ * @hcd: Programming view of DWC_otg controller
+ */
+static inline void dwc2_pcd_resume(struct dwc2_hcd *hcd)
+{
+}
+
+static inline u32 dwc2_hcd_urb_get_status(struct dwc2_hcd_urb *dwc2_urb)
+{
+	return dwc2_urb->status;
+}
+
+static inline u32 dwc2_hcd_urb_get_actual_length(struct dwc2_hcd_urb *dwc2_urb)
+{
+	return dwc2_urb->actual_length;
+}
+
+static inline u32 dwc2_hcd_urb_get_error_count(struct dwc2_hcd_urb *dwc2_urb)
+{
+	return dwc2_urb->error_count;
+}
+
+static inline void dwc2_hcd_urb_set_iso_desc_params(
+		struct dwc2_hcd_urb *dwc2_urb, int desc_num, u32 offset,
+		u32 length)
+{
+	dwc2_urb->iso_descs[desc_num].offset = offset;
+	dwc2_urb->iso_descs[desc_num].length = length;
+}
+
+static inline u32 dwc2_hcd_urb_get_iso_desc_status(
+		struct dwc2_hcd_urb *dwc2_urb, int desc_num)
+{
+	return dwc2_urb->iso_descs[desc_num].status;
+}
+
+static inline u32 dwc2_hcd_urb_get_iso_desc_actual_length(
+		struct dwc2_hcd_urb *dwc2_urb, int desc_num)
+{
+	return dwc2_urb->iso_descs[desc_num].actual_length;
+}
+
+static inline int dwc2_hcd_is_bandwidth_allocated(struct dwc2_hcd *hcd,
+						  struct usb_host_endpoint *ep)
+{
+	struct dwc2_qh *qh = ep->hcpriv;
+
+	if (qh && !list_empty(&qh->qh_list_entry))
+		return 1;
+
+	return 0;
+}
+
+static inline u8 dwc2_hcd_get_ep_bandwidth(struct dwc2_hcd *hcd,
+					   struct usb_host_endpoint *ep)
+{
+	struct dwc2_qh *qh = ep->hcpriv;
+
+	if (!qh) {
+		WARN_ON(1);
+		return 0;
+	}
+
+	return qh->usecs;
+}
+
+extern void dwc2_hcd_save_data_toggle(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+				      int chnum, struct dwc2_qtd *qtd);
+
+/* HCD Core API */
+
+/**
+ * dwc2_hcd_handle_intr() - Called on every hardware interrupt
+ *
+ * @dwc2_hcd: The HCD
+ *
+ * Returns non zero if interrupt is handled
+ * Return 0 if interrupt is not handled
+ */
+extern int dwc2_hcd_handle_intr(struct dwc2_hcd *dwc2_hcd);
+
+/**
+ * dwc2_hcd_get_priv_data() - Returns private data set by
+ * dwc2_hcd_set_priv_data() function
+ *
+ * @hcd: The HCD
+ */
+extern struct usb_hcd *dwc2_hcd_get_priv_data(struct dwc2_hcd *hcd);
+
+/**
+ * dwc2_hcd_set_priv_data() - Sets private data
+ *
+ * @hcd:       The HCD
+ * @priv_data: Pointer to be stored in private data
+ */
+extern void dwc2_hcd_set_priv_data(struct dwc2_hcd *hcd,
+				   struct usb_hcd *priv_data);
+
+/**
+ * dwc2_hcd_startup() - Starts the DWC_otg host mode operation
+ *
+ * @hcd:  The HCD
+ *
+ * Returns -ENODEV if Core is currently is in device mode
+ * Returns 0 on success
+ */
+extern int dwc2_hcd_startup(struct dwc2_hcd *hcd);
+
+/**
+ * dwc2_hcd_stop() - Halts the DWC_otg host mode operation
+ *
+ * @hcd: The HCD
+ */
+extern void dwc2_hcd_stop(struct dwc2_hcd *hcd);
+
+extern void dwc2_hcd_start(struct dwc2_hcd *hcd);
+extern void dwc2_hcd_disconnect(struct dwc2_hcd *hcd);
+extern void dwc2_hcd_session_start(struct dwc2_hcd *hcd);
+extern void dwc2_hcd_rem_wakeup(struct dwc2_hcd *hcd);
+extern void dwc2_hcd_sleep(struct dwc2_hcd *hcd);
+
+/**
+ * dwc2_hcd_otg_port() - Returns otg port number
+ *
+ * @hcd: The HCD
+ */
+extern u32 dwc2_hcd_otg_port(struct dwc2_hcd *hcd);
+
+/**
+ * dwc2_hcd_is_b_host() - Returns 1 if core currently is acting as B host,
+ * and 0 otherwise
+ *
+ * @hcd: The HCD
+ */
+extern int dwc2_hcd_is_b_host(struct dwc2_hcd *hcd);
+
+/**
+ * dwc2_hcd_get_frame_number() - Returns current frame number
+ *
+ * @hcd: The HCD
+ */
+extern int dwc2_hcd_get_frame_number(struct dwc2_hcd *hcd);
+
+/**
+ * dwc2_hcd_dump_state() - Dumps hcd state
+ *
+ * @hcd: The HCD
+ */
+extern void dwc2_hcd_dump_state(struct dwc2_hcd *hcd);
+
+/**
+ * dwc2_hcd_dump_frrem() - Dumps the average frame remaining at SOF
+ *
+ * @hcd: The HCD
+ *
+ * This can be used to determine average interrupt latency. Frame remaining is
+ * also shown for start transfer and two additional sample points.
+ */
+extern void dwc2_hcd_dump_frrem(struct dwc2_hcd *hcd);
+
+/* URB interface */
+
+/* Transfer flags */
+#define URB_GIVEBACK_ASAP	0x1
+#define URB_SEND_ZERO_PACKET	0x2
+
+/**
+ * dwc2_hcd_urb_get_status() - Gets status from dwc2_hcd_urb
+ *
+ * @dwc2_urb: DWC_otg URB
+ */
+extern u32 dwc2_hcd_urb_get_status(struct dwc2_hcd_urb *dwc2_urb);
+
+/**
+ * dwc2_hcd_urb_get_actual_length() - Gets actual length from dwc2_hcd_urb
+ *
+ * @dwc2_urb: DWC_otg URB
+ */
+extern u32 dwc2_hcd_urb_get_actual_length(struct dwc2_hcd_urb *dwc2_urb);
+
+/**
+ * dwc2_hcd_urb_get_error_count() - Gets error count from dwc2_hcd_urb. Only
+ * for ISOC URBs.
+ *
+ * @dwc2_urb: DWC_otg URB
+ */
+extern u32 dwc2_hcd_urb_get_error_count(struct dwc2_hcd_urb *dwc2_urb);
+
+/**
+ * dwc2_hcd_urb_get_iso_desc_status() - Gets status from ISOC descriptor
+ * specified by desc_num
+ *
+ * @dwc2_urb: DWC_otg URB
+ * @desc_num:  ISOC descriptor number
+ */
+extern u32 dwc2_hcd_urb_get_iso_desc_status(struct dwc2_hcd_urb *dwc2_urb,
+					    int desc_num);
+
+/**
+ * dwc2_hcd_urb_get_iso_desc_actual_length() - Gets actual data length from
+ * ISOC descriptor specified by desc_num
+ *
+ * @dwc2_urb: DWC_otg URB
+ * @desc_num:  ISOC descriptor number
+ */
+extern u32 dwc2_hcd_urb_get_iso_desc_actual_length(
+		struct dwc2_hcd_urb *dwc2_urb, int desc_num);
+
+/**
+ * dwc2_hcd_urb_enqueue() - Queues URB. After transfer completes, the
+ * completion callback will be called with the URB status.
+ *
+ * @dwc2_hcd:  The HCD
+ * @dwc2_urb:  DWC_otg URB
+ * @ep_handle: Out parameter for returning endpoint handle
+ * @flags:     Allocation flags
+ *
+ * Returns -ENODEV if no device is connected
+ * Returns -ENOMEM if there is not enough memory
+ * Returns 0 on success
+ */
+extern int dwc2_hcd_urb_enqueue(struct dwc2_hcd *dwc2_hcd,
+				struct dwc2_hcd_urb *dwc2_urb,
+				void **ep_handle, gfp_t mem_flags);
+
+/**
+ * dwc2_hcd_urb_dequeue() - De-queues the specified URB
+ *
+ * @dwc2_hcd: The HCD
+ * @dwc2_urb: DWC_otg URB
+ */
+extern int dwc2_hcd_urb_dequeue(struct dwc2_hcd *dwc2_hcd,
+				struct dwc2_hcd_urb *dwc2_urb);
+
+/**
+ * dwc2_hcd_endpoint_disable() - Frees resources in the DWC_otg controller
+ * related to a given endpoint. Any URBs for the endpoint must already be
+ * dequeued.
+ *
+ * @hcd:       The HCD
+ * @ep_handle: Endpoint handle, returned by dwc2_hcd_urb_enqueue function
+ * @retry:     Number of retries if there are queued transfers
+ *
+ * Returns -EINVAL if invalid arguments are passed
+ * Returns 0 on success
+ */
+extern int dwc2_hcd_endpoint_disable(struct dwc2_hcd *hcd,
+				     struct usb_host_endpoint *ep, int retry);
+
+/**
+ * dwc2_hcd_endpoint_reset() - Resets the data toggle in QH structure. This
+ * function can be called from usb_clear_halt() routine.
+ *
+ * @hcd:       The HCD
+ * @ep_handle: Endpoint handle, returned by dwc2_hcd_urb_enqueue function
+ *
+ * Returns -EINVAL if invalid arguments are passed
+ * Returns 0 on success
+ */
+extern int dwc2_hcd_endpoint_reset(struct dwc2_hcd *hcd,
+				   struct usb_host_endpoint *ep);
+
+/**
+ * dwc2_hcd_is_status_changed() - Returns 1 if status of specified port is
+ * changed and 0 otherwise
+ *
+ * @hcd:  The HCD
+ * @port: Port number
+ */
+extern int dwc2_hcd_is_status_changed(struct dwc2_hcd *hcd, int port);
+
+/* Host driver callbacks */
+
+extern void dwc2_host_start(struct dwc2_hcd *hcd);
+extern void dwc2_host_disconnect(struct dwc2_hcd *hcd);
+extern void dwc2_host_hub_info(struct dwc2_hcd *hcd, void *context,
+			       u32 *hub_addr, u32 *port_addr);
+extern int dwc2_host_speed(struct dwc2_hcd *hcd, void *context);
+extern int dwc2_host_get_b_hnp_enable(struct dwc2_hcd *hcd);
+extern void dwc2_host_complete(struct dwc2_hcd *hcd, void *context,
+			       struct dwc2_hcd_urb *dwc2_urb, int status);
+
+#ifdef DEBUG
+/*
+ * Macro to sample the remaining PHY clocks left in the current frame. This
+ * may be used during debugging to determine the average time it takes to
+ * execute sections of code. There are two possible sample points, "a" and
+ * "b", so the _letter_ argument must be one of these values.
+ *
+ * To dump the average sample times, read the "hcd_frrem" sysfs attribute. For
+ * example, "cat /sys/devices/lm0/hcd_frrem".
+ */
+#define dwc2_sample_frrem(_hcd_, _qh_, _letter_)			\
+do {									\
+	struct hfnum_data _hfnum_;					\
+	struct dwc2_qtd *_qtd_;						\
+									\
+	_qtd_ = list_entry((_qh_)->qtd_list.next, struct dwc2_qtd,	\
+			   qtd_list_entry);				\
+	if (usb_pipeint(_qtd_->urb->pipe) &&				\
+	    (_qh_)->start_split_frame != 0 && !_qtd_->complete_split) {	\
+		_hfnum_.d32 = readl((_hcd_)->regs + HFNUM);		\
+		switch (_hfnum_.b.frnum & 0x7) {			\
+		case 7:							\
+			(_hcd_)->hfnum_7_samples_##_letter_++;		\
+			(_hcd_)->hfnum_7_frrem_accum_##_letter_ +=	\
+				_hfnum_.b.frrem;			\
+			break;						\
+		case 0:							\
+			(_hcd_)->hfnum_0_samples_##_letter_++;		\
+			(_hcd_)->hfnum_0_frrem_accum_##_letter_ +=	\
+				_hfnum_.b.frrem;			\
+			break;						\
+		default:						\
+			(_hcd_)->hfnum_other_samples_##_letter_++;	\
+			(_hcd_)->hfnum_other_frrem_accum_##_letter_ +=	\
+				_hfnum_.b.frrem;			\
+			break;						\
+		}							\
+	}								\
+} while (0)
+#else
+#define dwc2_sample_frrem(_hcd_, _qh_, _letter_)	do {} while (0)
+#endif
+
+#endif /* __DWC_HCD_H__ */
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
new file mode 100644
index 0000000..20f55e0
--- /dev/null
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -0,0 +1,2029 @@
+/*
+ * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
+ *
+ * Copyright (C) 2004-2012 Synopsys, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file contains the interrupt handlers for Host mode
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch11.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/ch9.h>
+
+#include "core.h"
+#include "hcd.h"
+
+#ifdef DWC_TRACK_MISSED_SOFS
+#warning Compiling code to track missed SOFs
+#define FRAME_NUM_ARRAY_SIZE 1000
+
+/* This function is for debug only */
+static void track_missed_sofs(struct dwc2_hcd *hcd)
+{
+	static u16 frame_num_array[FRAME_NUM_ARRAY_SIZE];
+	static u16 last_frame_num_array[FRAME_NUM_ARRAY_SIZE];
+	static int frame_num_idx;
+	static u16 last_frame_num = HFNUM_MAX_FRNUM;
+	static int dumped_frame_num_array;
+	u16 curr_frame_number = hcd->frame_number;
+
+	if (frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
+		if ((last_frame_num + 1 & HFNUM_MAX_FRNUM) !=
+		    curr_frame_number) {
+			frame_num_array[frame_num_idx] = curr_frame_number;
+			last_frame_num_array[frame_num_idx++] = last_frame_num;
+		}
+	} else if (!dumped_frame_num_array) {
+		int i;
+
+		dev_info(hcd->dev, "Frame     Last Frame\n");
+		dev_info(hcd->dev, "-----     ----------\n");
+		for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
+			dev_info(hcd->dev, "0x%04x    0x%04x\n",
+				 frame_num_array[i], last_frame_num_array[i]);
+		}
+		dumped_frame_num_array = 1;
+	}
+	last_frame_num = curr_frame_number;
+}
+#endif
+
+/*
+ * Handles the start-of-frame interrupt in host mode. Non-periodic
+ * transactions may be queued to the DWC_otg controller for the current
+ * (micro)frame. Periodic transactions may be queued to the controller
+ * for the next (micro)frame.
+ */
+static int handle_sof_intr(struct dwc2_hcd *hcd)
+{
+	struct list_head *qh_entry, *qh_tmp;
+	struct dwc2_qh *qh;
+	u32 hfnum;
+	u32 gintsts;
+	enum dwc2_transaction_type tr_type;
+
+#ifdef DEBUG_SOF
+	dev_dbg(hcd->dev, "--Start of Frame Interrupt--\n");
+#endif
+
+	hfnum = readl(hcd->regs + HFNUM);
+	hcd->frame_number = hfnum >> HFNUM_FRNUM_SHIFT &
+			    HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT;
+
+#ifdef DWC_TRACK_MISSED_SOFS
+	track_missed_sofs(hcd);
+#endif
+	/* Determine whether any periodic QHs should be executed */
+	list_for_each_safe(qh_entry, qh_tmp, &hcd->periodic_sched_inactive) {
+		qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
+		if (dwc2_frame_num_le(qh->sched_frame, hcd->frame_number))
+			/*
+			 * Move QH to the ready list to be executed next
+			 * (micro)frame
+			 */
+			list_move(&qh->qh_list_entry,
+				  &hcd->periodic_sched_ready);
+	}
+	tr_type = dwc2_hcd_select_transactions(hcd);
+	if (tr_type != DWC2_TRANSACTION_NONE)
+		dwc2_hcd_queue_transactions(hcd, tr_type);
+
+	/* Clear interrupt */
+	gintsts = readl(hcd->regs + GINTSTS);
+	gintsts |= GINTSTS_SOF;
+	writel(gintsts, hcd->regs + GINTSTS);
+	return 1;
+}
+
+/*
+ * Handles the Rx Status Queue Level Interrupt, which indicates that there is
+ * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
+ * memory if the DWC_otg controller is operating in Slave mode.
+ */
+static int handle_rx_status_q_level_intr(struct dwc2_hcd *hcd)
+{
+	u32 grxsts, chnum, bcnt, dpid, pktsts;
+	struct dwc2_hc *hc;
+
+	dev_dbg(hcd->dev, "--RxStsQ Level Interrupt--\n");
+
+	grxsts = readl(hcd->regs + GRXSTSP);
+
+	chnum = grxsts >> GRXSTS_HChNum_SHIFT &
+		GRXSTS_HChNum_MASK >> GRXSTS_HChNum_SHIFT;
+	hc = hcd->hc_ptr_array[chnum];
+	if (!hc) {
+		dev_err(hcd->dev, "Unable to get corresponding channel\n");
+		return 0;
+	}
+
+	bcnt = grxsts >> GRXSTS_ByteCnt_SHIFT &
+	       GRXSTS_ByteCnt_MASK >> GRXSTS_ByteCnt_SHIFT;
+	dpid = grxsts >> GRXSTS_DPID_SHIFT &
+	       GRXSTS_DPID_MASK >> GRXSTS_DPID_SHIFT;
+	pktsts = grxsts & GRXSTS_PktSts_MASK;
+
+	/* Packet Status */
+	dev_dbg(hcd->dev, "    Ch num = %d\n", chnum);
+	dev_dbg(hcd->dev, "    Count = %d\n", bcnt);
+	dev_dbg(hcd->dev, "    DPID = %d, hc.dpid = %d\n", dpid,
+		hc->data_pid_start);
+	dev_dbg(hcd->dev, "    PStatus = %d\n",
+		pktsts >> GRXSTS_PktSts_SHIFT &
+		GRXSTS_PktSts_MASK >> GRXSTS_PktSts_SHIFT);
+
+	switch (pktsts) {
+	case GRXSTS_PktSts_HChIn:
+		/* Read the data into the host buffer */
+		if (bcnt > 0) {
+			dwc2_read_packet(hcd, hc->xfer_buff, bcnt);
+
+			/* Update the HC fields for the next packet received */
+			hc->xfer_count += bcnt;
+			hc->xfer_buff += bcnt;
+		}
+
+	case GRXSTS_PktSts_HChIn_XFER_COMP:
+	case GRXSTS_PktSts_DataToggleErr:
+	case GRXSTS_PktSts_HChHalted:
+		/* Handled in interrupt, just ignore data */
+		break;
+	default:
+		dev_err(hcd->dev, "RX_STS_Q Interrupt: Unknown status %d\n",
+			pktsts);
+		break;
+	}
+
+	return 1;
+}
+
+/*
+ * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
+ * data packets may be written to the FIFO for OUT transfers. More requests
+ * may be written to the non-periodic request queue for IN transfers. This
+ * interrupt is enabled only in Slave mode.
+ */
+static int handle_np_tx_fifo_empty_intr(struct dwc2_hcd *hcd)
+{
+	dev_dbg(hcd->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
+	dwc2_hcd_queue_transactions(hcd, DWC2_TRANSACTION_NON_PERIODIC);
+	return 1;
+}
+
+/*
+ * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
+ * packets may be written to the FIFO for OUT transfers. More requests may be
+ * written to the periodic request queue for IN transfers. This interrupt is
+ * enabled only in Slave mode.
+ */
+static int handle_perio_tx_fifo_empty_intr(struct dwc2_hcd *hcd)
+{
+	dev_dbg(hcd->dev, "--Periodic TxFIFO Empty Interrupt--\n");
+	dwc2_hcd_queue_transactions(hcd, DWC2_TRANSACTION_PERIODIC);
+	return 1;
+}
+
+static void handle_hprt0_enable(struct dwc2_hcd *hcd, u32 hprt0,
+				u32 *hprt0_modify)
+{
+	struct dwc2_core_params *params = hcd->core_params;
+	int do_reset = 0;
+	u32 usbcfg;
+	u32 prtspd;
+	u32 hcfg;
+	u32 hfir;
+
+	dev_dbg(hcd->dev, "%s(%p)\n", __func__, hcd);
+
+	/* Every time when port enables calculate HFIR.FrInterval */
+	hfir = readl(hcd->regs + HFIR);
+	hfir &= ~HFIR_FRINT_MASK;
+	hfir |= dwc2_calc_frame_interval(hcd) << HFIR_FRINT_SHIFT &
+		HFIR_FRINT_MASK;
+	writel(hfir, hcd->regs + HFIR);
+
+	/*
+	 * Check if we need to adjust the PHY clock speed for low power
+	 * and adjust it if so
+	 */
+	if (params->host_support_fs_ls_low_power) {
+		usbcfg = readl(hcd->regs + GUSBCFG);
+		prtspd = hprt0 & HPRT0_SPD_MASK;
+
+		if (prtspd == HPRT0_SPD_LOW_SPEED ||
+		    prtspd == HPRT0_SPD_FULL_SPEED) {
+			/* Low power */
+			if (!(usbcfg & GUSBCFG_PHYLPClkSel)) {
+				/*
+				 * Set PHY low power clock select for FS/LS
+				 * devices
+				 */
+				usbcfg |= GUSBCFG_PHYLPClkSel;
+				writel(usbcfg, hcd->regs + GUSBCFG);
+				do_reset = 1;
+			}
+
+			hcfg = readl(hcd->regs + HCFG);
+
+			if (prtspd == HPRT0_SPD_LOW_SPEED &&
+			    params->host_ls_low_power_phy_clk ==
+			    DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) {
+				/* 6 MHZ */
+				dev_dbg(hcd->dev,
+					"FS_PHY programming HCFG to 6 MHz\n");
+				if ((hcfg & HCFG_FSLSPCLKSEL_MASK) !=
+				    HCFG_FSLSPCLKSEL_6_MHZ) {
+					hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
+					hcfg |= HCFG_FSLSPCLKSEL_6_MHZ;
+					writel(hcfg, hcd->regs + HCFG);
+					do_reset = 1;
+				}
+			} else {
+				/* 48 MHZ */
+				dev_dbg(hcd->dev,
+					"FS_PHY programming HCFG to 48 MHz\n");
+				if ((hcfg & HCFG_FSLSPCLKSEL_MASK) !=
+				    HCFG_FSLSPCLKSEL_48_MHZ) {
+					hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
+					hcfg |= HCFG_FSLSPCLKSEL_48_MHZ;
+					writel(hcfg, hcd->regs + HCFG);
+					do_reset = 1;
+				}
+			}
+		} else {
+			/* Not low power */
+			if (usbcfg & GUSBCFG_PHYLPClkSel) {
+				usbcfg &= ~GUSBCFG_PHYLPClkSel;
+				writel(usbcfg, hcd->regs + GUSBCFG);
+				do_reset = 1;
+			}
+		}
+
+		if (do_reset) {
+			*hprt0_modify |= HPRT0_RST;
+			queue_delayed_work(hcd->wq_otg, &hcd->reset_work,
+					   msecs_to_jiffies(60));
+		}
+	}
+
+	if (!do_reset)
+		/* Port has been enabled, set the reset change flag */
+		hcd->flags.b.port_reset_change = 1;
+}
+
+/*
+ * There are multiple conditions that can cause a port interrupt. This function
+ * determines which interrupt conditions have occurred and handles them
+ * appropriately.
+ */
+static int handle_port_intr(struct dwc2_hcd *hcd)
+{
+	int retval = 0;
+	u32 hprt0;
+	u32 hprt0_modify;
+
+	dev_dbg(hcd->dev, "--Port Interrupt--\n");
+
+	hprt0 = readl(hcd->regs + HPRT0);
+	hprt0_modify = hprt0;
+
+	/*
+	 * Clear appropriate bits in HPRT0 to clear the interrupt bit in
+	 * GINTSTS
+	 */
+	hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
+			  HPRT0_OVRCURRCHG);
+
+	/*
+	 * Port Connect Detected
+	 * Set flag and clear if detected
+	 */
+	if (hprt0 & HPRT0_CONNDET) {
+		dev_dbg(hcd->dev,
+			"--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
+			hprt0);
+		hcd->flags.b.port_connect_status_change = 1;
+		hcd->flags.b.port_connect_status = 1;
+		hprt0_modify |= HPRT0_CONNDET;
+
+		/* B-Device has connected, delete connection timer */
+		del_timer(&hcd->conn_timer);
+
+		/*
+		 * The Hub driver asserts a reset when it sees port connect
+		 * status change flag
+		 */
+		retval |= 1;
+	}
+
+	/*
+	 * Port Enable Changed
+	 * Clear if detected - Set internal flag if disabled
+	 */
+	if (hprt0 & HPRT0_ENACHG) {
+		dev_dbg(hcd->dev,
+			"  --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
+			hprt0, !!(hprt0 & HPRT0_ENA));
+		hprt0_modify |= HPRT0_ENACHG;
+		if (hprt0 & HPRT0_ENA)
+			handle_hprt0_enable(hcd, hprt0, &hprt0_modify);
+		else
+			hcd->flags.b.port_enable_change = 1;
+		retval |= 1;
+	}
+
+	/* Overcurrent Change Interrupt */
+	if (hprt0 & HPRT0_OVRCURRCHG) {
+		dev_dbg(hcd->dev,
+			"  --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
+			hprt0);
+		hcd->flags.b.port_over_current_change = 1;
+		hprt0_modify |= HPRT0_OVRCURRCHG;
+		retval |= 1;
+	}
+
+	/* Clear Port Interrupts */
+	writel(hprt0_modify, hcd->regs + HPRT0);
+
+	return retval;
+}
+
+/*
+ * Gets the actual length of a transfer after the transfer halts. halt_status
+ * holds the reason for the halt.
+ *
+ * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
+ * is set to 1 upon return if less than the requested number of bytes were
+ * transferred. short_read may also be NULL on entry, in which case it remains
+ * unchanged.
+ */
+static u32 get_actual_xfer_length(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+				  int chnum, struct dwc2_qtd *qtd,
+				  enum dwc2_halt_status halt_status,
+				  int *short_read)
+{
+	u32 hctsiz, count, length;
+
+	hctsiz = readl(hcd->regs + HCTSIZ(chnum));
+
+	if (halt_status == DWC2_HC_XFER_COMPLETE) {
+		if (hc->ep_is_in) {
+			count = hctsiz >> TSIZ_XFERSIZE_SHIFT &
+				TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT;
+			length = hc->xfer_len - count;
+			if (short_read != NULL)
+				*short_read = (count != 0);
+		} else if (hc->qh->do_split) {
+			length = qtd->ssplit_out_xfer_count;
+		} else {
+			length = hc->xfer_len;
+		}
+	} else {
+		/*
+		 * Must use the hctsiz.pktcnt field to determine how much data
+		 * has been transferred. This field reflects the number of
+		 * packets that have been transferred via the USB. This is
+		 * always an integral number of packets if the transfer was
+		 * halted before its normal completion. (Can't use the
+		 * hctsiz.xfersize field because that reflects the number of
+		 * bytes transferred via the AHB, not the USB).
+		 */
+		count = hctsiz >> TSIZ_PKTCNT_SHIFT &
+			TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT;
+		length = (hc->start_pkt_count - count) * hc->max_packet;
+	}
+
+	return length;
+}
+
+/**
+ * update_urb_state_xfer_comp() - Updates the state of the URB after a Transfer
+ * Complete interrupt on the host channel. Updates the actual_length field of
+ * the URB based on the number of bytes transferred via the host channel. Sets
+ * the URB status if the data transfer is finished.
+ *
+ * Return: 1 if the data transfer specified by the URB is completely finished,
+ * 0 otherwise
+ */
+static int update_urb_state_xfer_comp(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+				      int chnum, struct dwc2_hcd_urb *urb,
+				      struct dwc2_qtd *qtd)
+{
+	int xfer_done = 0;
+	int short_read = 0;
+	int xfer_length = get_actual_xfer_length(hcd, hc, chnum, qtd,
+						 DWC2_HC_XFER_COMPLETE,
+						 &short_read);
+
+	/* Non DWORD-aligned buffer case handling */
+	if (hc->align_buff && xfer_length && hc->ep_is_in) {
+		dev_dbg(hcd->dev, "non-aligned buffer\n");
+		memcpy(urb->buf + urb->actual_length, hc->qh->dw_align_buf,
+		       xfer_length);
+	}
+
+	dev_dbg(hcd->dev, "urb->actual_length=%d xfer_length=%d\n",
+		urb->actual_length, xfer_length);
+	urb->actual_length += xfer_length;
+
+	if (xfer_length && hc->ep_type == DWC2_EP_TYPE_BULK &&
+	    (urb->flags & URB_SEND_ZERO_PACKET) &&
+	    urb->actual_length == urb->length &&
+	    !(urb->length % hc->max_packet)) {
+		xfer_done = 0;
+	} else if (short_read || urb->actual_length == urb->length) {
+		xfer_done = 1;
+		urb->status = 0;
+	}
+
+#ifdef DEBUG
+	{
+		u32 hctsiz = readl(hcd->regs + HCTSIZ(chnum));
+
+		dev_dbg(hcd->dev, "DWC_otg: %s: %s, channel %d\n",
+			__func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num);
+		dev_dbg(hcd->dev, "  hc->xfer_len %d\n", hc->xfer_len);
+		dev_dbg(hcd->dev, "  hctsiz.xfersize %d\n",
+			hctsiz >> TSIZ_XFERSIZE_SHIFT &
+			TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT);
+		dev_dbg(hcd->dev, "  urb->transfer_buffer_length %d\n",
+			urb->length);
+		dev_dbg(hcd->dev, "  urb->actual_length %d\n",
+			urb->actual_length);
+		dev_dbg(hcd->dev, "  short_read %d, xfer_done %d\n",
+			short_read, xfer_done);
+	}
+#endif
+
+	return xfer_done;
+}
+
+/*
+ * Save the starting data toggle for the next transfer. The data toggle is
+ * saved in the QH for non-control transfers and it's saved in the QTD for
+ * control transfers.
+ */
+void dwc2_hcd_save_data_toggle(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+			       int chnum, struct dwc2_qtd *qtd)
+{
+	u32 hctsiz = readl(hcd->regs + HCTSIZ(chnum));
+	u32 pid = hctsiz & TSIZ_SC_MC_PID_MASK;
+
+	dev_dbg(hcd->dev, "%s()\n", __func__);
+
+	if (hc->ep_type != DWC2_EP_TYPE_CONTROL) {
+		if (pid == TSIZ_SC_MC_PID_DATA0)
+			hc->qh->data_toggle = DWC2_HC_PID_DATA0;
+		else
+			hc->qh->data_toggle = DWC2_HC_PID_DATA1;
+	} else {
+		if (pid == TSIZ_SC_MC_PID_DATA0)
+			qtd->data_toggle = DWC2_HC_PID_DATA0;
+		else
+			qtd->data_toggle = DWC2_HC_PID_DATA1;
+	}
+}
+
+/**
+ * update_isoc_urb_state() - Updates the state of an Isochronous URB when the
+ * transfer is stopped for any reason. The fields of the current entry in the
+ * frame descriptor array are set based on the transfer state and the input
+ * halt_status. Completes the Isochronous URB if all the URB frames have been
+ * completed.
+ *
+ * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
+ * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
+ */
+static enum dwc2_halt_status update_isoc_urb_state(struct dwc2_hcd *hcd,
+		struct dwc2_hc *hc, int chnum, struct dwc2_qtd *qtd,
+		enum dwc2_halt_status halt_status)
+{
+	struct dwc2_hcd_urb *urb = qtd->urb;
+	enum dwc2_halt_status ret_val = halt_status;
+	struct dwc2_hcd_iso_packet_desc *frame_desc;
+
+	frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
+	switch (halt_status) {
+	case DWC2_HC_XFER_COMPLETE:
+		frame_desc->status = 0;
+		frame_desc->actual_length = get_actual_xfer_length(hcd, hc,
+					chnum, qtd, halt_status, NULL);
+
+		/* Non DWORD-aligned buffer case handling */
+		if (hc->align_buff && frame_desc->actual_length && hc->ep_is_in)
+			memcpy(urb->buf + frame_desc->offset +
+			       qtd->isoc_split_offset, hc->qh->dw_align_buf,
+			       frame_desc->actual_length);
+		break;
+	case DWC2_HC_XFER_FRAME_OVERRUN:
+		urb->error_count++;
+		if (hc->ep_is_in)
+			frame_desc->status = -ENOSR;
+		else
+			frame_desc->status = -ECOMM;
+		frame_desc->actual_length = 0;
+		break;
+	case DWC2_HC_XFER_BABBLE_ERR:
+		urb->error_count++;
+		frame_desc->status = -EOVERFLOW;
+		/* Don't need to update actual_length in this case */
+		break;
+	case DWC2_HC_XFER_XACT_ERR:
+		urb->error_count++;
+		frame_desc->status = -EPROTO;
+		frame_desc->actual_length = get_actual_xfer_length(hcd, hc,
+						chnum, qtd, halt_status, NULL);
+
+		/* Non DWORD-aligned buffer case handling */
+		if (hc->align_buff && frame_desc->actual_length && hc->ep_is_in)
+			memcpy(urb->buf + frame_desc->offset +
+			       qtd->isoc_split_offset, hc->qh->dw_align_buf,
+			       frame_desc->actual_length);
+
+		/* Skip whole frame */
+		if (hc->qh->do_split && (hc->ep_type == DWC2_EP_TYPE_ISOC) &&
+		    hc->ep_is_in && hcd->dma_enable) {
+			qtd->complete_split = 0;
+			qtd->isoc_split_offset = 0;
+		}
+
+		break;
+	default:
+		dev_err(hcd->dev, "Unhandled halt_status (%d)\n", halt_status);
+		break;
+	}
+	if (++qtd->isoc_frame_index == urb->packet_count) {
+		/*
+		 * urb->status is not used for isoc transfers. The individual
+		 * frame_desc statuses are used instead.
+		 */
+		dwc2_host_complete(hcd, urb->priv, urb, 0);
+		ret_val = DWC2_HC_XFER_URB_COMPLETE;
+	} else {
+		ret_val = DWC2_HC_XFER_COMPLETE;
+	}
+	return ret_val;
+}
+
+/*
+ * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
+ * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
+ * still linked to the QH, the QH is added to the end of the inactive
+ * non-periodic schedule. For periodic QHs, removes the QH from the periodic
+ * schedule if no more QTDs are linked to the QH.
+ */
+static void deactivate_qh(struct dwc2_hcd *hcd, struct dwc2_qh *qh,
+			  int free_qtd)
+{
+	int continue_split = 0;
+	struct dwc2_qtd *qtd;
+
+	dev_dbg(hcd->dev, "  %s(%p,%p,%d)\n", __func__, hcd, qh, free_qtd);
+
+	if (list_empty(&qh->qtd_list)) {
+		dev_err(hcd->dev, "## QTD list empty ##\n");
+		return;
+	}
+
+	qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
+
+	if (qtd->complete_split) {
+		continue_split = 1;
+	} else if (qtd->isoc_split_pos == DWC_HCSPLT_XACTPOS_MID ||
+		   qtd->isoc_split_pos == DWC_HCSPLT_XACTPOS_END) {
+		continue_split = 1;
+	}
+
+	if (free_qtd) {
+		dwc2_hcd_qtd_remove_and_free(hcd, qtd, qh);
+		continue_split = 0;
+	}
+
+	qh->channel = NULL;
+	dwc2_hcd_qh_deactivate(hcd, qh, continue_split);
+}
+
+/**
+ * release_channel() - Releases a host channel for use by other transfers
+ *
+ * @hcd:         The HCD state structure
+ * @hc:          The host channel to release
+ * @qtd:         The QTD associated with the host channel. This QTD may be
+ *               freed if the transfer is complete or an error has occurred.
+ * @halt_status: Reason the channel is being released. This status
+ *               determines the actions taken by this function.
+ *
+ * Also attempts to select and queue more transactions since at least one host
+ * channel is available.
+ */
+static void release_channel(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+			    struct dwc2_qtd *qtd,
+			    enum dwc2_halt_status halt_status)
+{
+	enum dwc2_transaction_type tr_type;
+	int free_qtd;
+
+	dev_dbg(hcd->dev, "  %s: channel %d, halt_status %d\n",
+		__func__, hc->hc_num, halt_status);
+
+	switch (halt_status) {
+	case DWC2_HC_XFER_URB_COMPLETE:
+		free_qtd = 1;
+		break;
+	case DWC2_HC_XFER_AHB_ERR:
+	case DWC2_HC_XFER_STALL:
+	case DWC2_HC_XFER_BABBLE_ERR:
+		free_qtd = 1;
+		break;
+	case DWC2_HC_XFER_XACT_ERR:
+		if (qtd->error_count >= 3) {
+			dev_dbg(hcd->dev,
+				"  Complete URB with transaction error\n");
+			free_qtd = 1;
+			qtd->urb->status = -EPROTO;
+			dwc2_host_complete(hcd, qtd->urb->priv, qtd->urb,
+					   -EPROTO);
+		} else {
+			free_qtd = 0;
+		}
+		break;
+	case DWC2_HC_XFER_URB_DEQUEUE:
+		/*
+		 * The QTD has already been removed and the QH has been
+		 * deactivated. Don't want to do anything except release the
+		 * host channel and try to queue more transfers.
+		 */
+		goto cleanup;
+	case DWC2_HC_XFER_NO_HALT_STATUS:
+		free_qtd = 0;
+		break;
+	case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
+		dev_dbg(hcd->dev, "  Complete URB with I/O error\n");
+		free_qtd = 1;
+		qtd->urb->status = -EIO;
+		dwc2_host_complete(hcd, qtd->urb->priv, qtd->urb, -EIO);
+		break;
+	default:
+		free_qtd = 0;
+		break;
+	}
+
+	deactivate_qh(hcd, hc->qh, free_qtd);
+
+cleanup:
+	/*
+	 * Release the host channel for use by other transfers. The cleanup
+	 * function clears the channel interrupt enables and conditions, so
+	 * there's no need to clear the Channel Halted interrupt separately.
+	 */
+	dwc2_hc_cleanup(hcd, hc);
+	list_add_tail(&hc->hc_list_entry, &hcd->free_hc_list);
+
+	switch (hc->ep_type) {
+	case DWC2_EP_TYPE_CONTROL:
+	case DWC2_EP_TYPE_BULK:
+		hcd->non_periodic_channels--;
+		break;
+
+	default:
+		/*
+		 * Don't release reservations for periodic channels here.
+		 * That's done when a periodic transfer is descheduled (i.e.
+		 * when the QH is removed from the periodic schedule).
+		 */
+		break;
+	}
+
+	/* Try to queue more transfers now that there's a free channel */
+	tr_type = dwc2_hcd_select_transactions(hcd);
+	if (tr_type != DWC2_TRANSACTION_NONE)
+		dwc2_hcd_queue_transactions(hcd, tr_type);
+}
+
+/*
+ * Halts a host channel. If the channel cannot be halted immediately because
+ * the request queue is full, this function ensures that the FIFO empty
+ * interrupt for the appropriate queue is enabled so that the halt request can
+ * be queued when there is space in the request queue.
+ *
+ * This function may also be called in DMA mode. In that case, the channel is
+ * simply released since the core always halts the channel automatically in
+ * DMA mode.
+ */
+static void halt_channel(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+			 struct dwc2_qtd *qtd,
+			 enum dwc2_halt_status halt_status)
+{
+	dev_dbg(hcd->dev, "%s()\n", __func__);
+
+	if (hcd->dma_enable) {
+		dev_dbg(hcd->dev, "DMA enabled\n");
+		release_channel(hcd, hc, qtd, halt_status);
+		return;
+	}
+
+	/* Slave mode processing */
+	dwc2_hc_halt(hcd, hc, halt_status);
+
+	if (hc->halt_on_queue) {
+		u32 gintmsk;
+
+		dev_dbg(hcd->dev, "Halt on queue\n");
+		if (hc->ep_type == DWC2_EP_TYPE_CONTROL ||
+		    hc->ep_type == DWC2_EP_TYPE_BULK) {
+			dev_dbg(hcd->dev, "control/bulk\n");
+			/*
+			 * Make sure the Non-periodic Tx FIFO empty interrupt
+			 * is enabled so that the non-periodic schedule will
+			 * be processed
+			 */
+			gintmsk = readl(hcd->regs + GINTMSK);
+			gintmsk |= GINTSTS_NPTxFEmp;
+			writel(gintmsk, hcd->regs + GINTMSK);
+		} else {
+			dev_dbg(hcd->dev, "isoc/intr\n");
+			/*
+			 * Move the QH from the periodic queued schedule to
+			 * the periodic assigned schedule. This allows the
+			 * halt to be queued when the periodic schedule is
+			 * processed.
+			 */
+			list_move(&hc->qh->qh_list_entry,
+				  &hcd->periodic_sched_assigned);
+
+			/*
+			 * Make sure the Periodic Tx FIFO Empty interrupt is
+			 * enabled so that the periodic schedule will be
+			 * processed
+			 */
+			gintmsk = readl(hcd->regs + GINTMSK);
+			gintmsk |= GINTSTS_PTxFEmp;
+			writel(gintmsk, hcd->regs + GINTMSK);
+		}
+	}
+}
+
+/*
+ * Performs common cleanup for non-periodic transfers after a Transfer
+ * Complete interrupt. This function should be called after any endpoint type
+ * specific handling is finished to release the host channel.
+ */
+static void complete_non_periodic_xfer(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+				       int chnum, struct dwc2_qtd *qtd,
+				       enum dwc2_halt_status halt_status)
+{
+	u32 hcint;
+
+	dev_dbg(hcd->dev, "%s()\n", __func__);
+
+	qtd->error_count = 0;
+	hcint = readl(hcd->regs + HCINT(chnum));
+
+	if (hcint & HCINTMSK_NYET) {
+		/*
+		 * Got a NYET on the last transaction of the transfer. This
+		 * means that the endpoint should be in the PING state at the
+		 * beginning of the next transfer.
+		 */
+		dev_dbg(hcd->dev, "got NYET\n");
+		hc->qh->ping_state = 1;
+		clear_hc_int(hcd, chnum, HCINTMSK_NYET);
+	}
+
+	/*
+	 * Always halt and release the host channel to make it available for
+	 * more transfers. There may still be more phases for a control
+	 * transfer or more data packets for a bulk transfer at this point,
+	 * but the host channel is still halted. A channel will be reassigned
+	 * to the transfer when the non-periodic schedule is processed after
+	 * the channel is released. This allows transactions to be queued
+	 * properly via dwc2_hcd_queue_transactions, which also enables the
+	 * Tx FIFO Empty interrupt if necessary.
+	 */
+	if (hc->ep_is_in) {
+		/*
+		 * IN transfers in Slave mode require an explicit disable to
+		 * halt the channel. (In DMA mode, this call simply releases
+		 * the channel.)
+		 */
+		dev_dbg(hcd->dev, "IN xfer, halting channel\n");
+		halt_channel(hcd, hc, qtd, halt_status);
+	} else {
+		/*
+		 * The channel is automatically disabled by the core for OUT
+		 * transfers in Slave mode
+		 */
+		dev_dbg(hcd->dev, "OUT xfer, releasing channel\n");
+		release_channel(hcd, hc, qtd, halt_status);
+	}
+}
+
+/*
+ * Performs common cleanup for periodic transfers after a Transfer Complete
+ * interrupt. This function should be called after any endpoint type specific
+ * handling is finished to release the host channel.
+ */
+static void complete_periodic_xfer(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+				   int chnum, struct dwc2_qtd *qtd,
+				   enum dwc2_halt_status halt_status)
+{
+	u32 hctsiz;
+
+	qtd->error_count = 0;
+	hctsiz = readl(hcd->regs + HCTSIZ(chnum));
+
+	if (!hc->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
+		/* Core halts channel in these cases */
+		release_channel(hcd, hc, qtd, halt_status);
+	else
+		/* Flush any outstanding requests from the Tx queue */
+		halt_channel(hcd, hc, qtd, halt_status);
+}
+
+static int handle_xfercomp_isoc_split_in(struct dwc2_hcd *hcd,
+					 struct dwc2_hc *hc, int chnum,
+					 struct dwc2_qtd *qtd)
+{
+	u32 len;
+	struct dwc2_hcd_iso_packet_desc *frame_desc;
+
+	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
+
+	len = get_actual_xfer_length(hcd, hc, chnum, qtd,
+				     DWC2_HC_XFER_COMPLETE, NULL);
+
+	if (!len) {
+		qtd->complete_split = 0;
+		qtd->isoc_split_offset = 0;
+		return 0;
+	}
+	frame_desc->actual_length += len;
+
+	if (hc->align_buff && len)
+		memcpy(qtd->urb->buf + frame_desc->offset +
+		       qtd->isoc_split_offset, hc->qh->dw_align_buf, len);
+	qtd->isoc_split_offset += len;
+
+	if (frame_desc->length == frame_desc->actual_length) {
+		frame_desc->status = 0;
+		qtd->isoc_frame_index++;
+		qtd->complete_split = 0;
+		qtd->isoc_split_offset = 0;
+	}
+
+	if (qtd->isoc_frame_index == qtd->urb->packet_count) {
+		dwc2_host_complete(hcd, qtd->urb->priv, qtd->urb, 0);
+		release_channel(hcd, hc, qtd, DWC2_HC_XFER_URB_COMPLETE);
+	} else {
+		release_channel(hcd, hc, qtd, DWC2_HC_XFER_NO_HALT_STATUS);
+	}
+
+	return 1;	/* Indicates that channel released */
+}
+
+/*
+ * Handles a host channel Transfer Complete interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int handle_hc_xfercomp_intr(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+				   int chnum, struct dwc2_qtd *qtd)
+{
+	struct dwc2_hcd_urb *urb = qtd->urb;
+	int pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+	enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
+	int urb_xfer_done;
+
+	dev_dbg(hcd->dev, "--Host Channel %d Interrupt: Transfer Complete--\n",
+		hc->hc_num);
+
+	if (hcd->dma_desc_enable) {
+		dwc2_hcd_complete_xfer_ddma(hcd, hc, chnum, halt_status);
+		if (pipe_type == USB_ENDPOINT_XFER_ISOC) {
+			/* Do not disable the interrupt, just clear it */
+			clear_hc_int(hcd, chnum, HCINTMSK_XFERCOMPL);
+			return 1;
+		}
+		goto handle_xfercomp_done;
+	}
+
+	/* Handle xfer complete on CSPLIT */
+	if (hc->qh->do_split) {
+		if (hc->ep_type == DWC2_EP_TYPE_ISOC && hc->ep_is_in
+		    && hcd->dma_enable) {
+			if (qtd->complete_split &&
+			    handle_xfercomp_isoc_split_in(hcd, hc, chnum, qtd))
+				goto handle_xfercomp_done;
+		} else {
+			qtd->complete_split = 0;
+		}
+	}
+
+	/* Update the QTD and URB states */
+	switch (pipe_type) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		switch (qtd->control_phase) {
+		case DWC2_CONTROL_SETUP:
+			if (urb->length > 0)
+				qtd->control_phase = DWC2_CONTROL_DATA;
+			else
+				qtd->control_phase = DWC2_CONTROL_STATUS;
+			dev_dbg(hcd->dev, "  Control setup transaction done\n");
+			halt_status = DWC2_HC_XFER_COMPLETE;
+			break;
+		case DWC2_CONTROL_DATA:
+			urb_xfer_done = update_urb_state_xfer_comp(hcd, hc,
+						chnum, urb, qtd);
+			if (urb_xfer_done) {
+				qtd->control_phase = DWC2_CONTROL_STATUS;
+				dev_dbg(hcd->dev,
+					"  Control data transfer done\n");
+			} else {
+				dwc2_hcd_save_data_toggle(hcd, hc, chnum, qtd);
+			}
+			halt_status = DWC2_HC_XFER_COMPLETE;
+			break;
+		case DWC2_CONTROL_STATUS:
+			dev_dbg(hcd->dev, "  Control transfer complete\n");
+			if (urb->status == -EINPROGRESS)
+				urb->status = 0;
+			dwc2_host_complete(hcd, urb->priv, urb, urb->status);
+			halt_status = DWC2_HC_XFER_URB_COMPLETE;
+			break;
+		}
+
+		complete_non_periodic_xfer(hcd, hc, chnum, qtd, halt_status);
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		dev_dbg(hcd->dev, "  Bulk transfer complete\n");
+		urb_xfer_done = update_urb_state_xfer_comp(hcd, hc, chnum,
+							   urb, qtd);
+		if (urb_xfer_done) {
+			dwc2_host_complete(hcd, urb->priv, urb, urb->status);
+			halt_status = DWC2_HC_XFER_URB_COMPLETE;
+		} else {
+			halt_status = DWC2_HC_XFER_COMPLETE;
+		}
+
+		dwc2_hcd_save_data_toggle(hcd, hc, chnum, qtd);
+		complete_non_periodic_xfer(hcd, hc, chnum, qtd, halt_status);
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		dev_dbg(hcd->dev, "  Interrupt transfer complete\n");
+		urb_xfer_done = update_urb_state_xfer_comp(hcd, hc, chnum,
+							   urb, qtd);
+
+		/*
+		 * Interrupt URB is done on the first transfer complete
+		 * interrupt
+		 */
+		if (urb_xfer_done) {
+				dwc2_host_complete(hcd, urb->priv, urb,
+						   urb->status);
+				halt_status = DWC2_HC_XFER_URB_COMPLETE;
+		} else {
+				halt_status = DWC2_HC_XFER_COMPLETE;
+		}
+
+		dwc2_hcd_save_data_toggle(hcd, hc, chnum, qtd);
+		complete_periodic_xfer(hcd, hc, chnum, qtd, halt_status);
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		dev_dbg(hcd->dev, "  Isochronous transfer complete\n");
+		if (qtd->isoc_split_pos == DWC_HCSPLT_XACTPOS_ALL)
+			halt_status = update_isoc_urb_state(hcd, hc, chnum,
+						qtd, DWC2_HC_XFER_COMPLETE);
+		complete_periodic_xfer(hcd, hc, chnum, qtd, halt_status);
+		break;
+	}
+
+handle_xfercomp_done:
+	disable_hc_int(hcd, chnum, HCINTMSK_XFERCOMPL);
+	return 1;
+}
+
+/*
+ * Handles a host channel STALL interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static int handle_hc_stall_intr(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+				int chnum, struct dwc2_qtd *qtd)
+{
+	struct dwc2_hcd_urb *urb = qtd->urb;
+	int pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+
+	dev_dbg(hcd->dev, "--Host Channel %d Interrupt: STALL Received--\n",
+		hc->hc_num);
+
+	if (hcd->dma_desc_enable) {
+		dwc2_hcd_complete_xfer_ddma(hcd, hc, chnum,
+					    DWC2_HC_XFER_STALL);
+		goto handle_stall_done;
+	}
+
+	if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
+		dwc2_host_complete(hcd, urb->priv, urb, -EPIPE);
+
+	if (pipe_type == USB_ENDPOINT_XFER_BULK ||
+	    pipe_type == USB_ENDPOINT_XFER_INT) {
+		dwc2_host_complete(hcd, urb->priv, urb, -EPIPE);
+		/*
+		 * USB protocol requires resetting the data toggle for bulk
+		 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
+		 * setup command is issued to the endpoint. Anticipate the
+		 * CLEAR_FEATURE command since a STALL has occurred and reset
+		 * the data toggle now.
+		 */
+		hc->qh->data_toggle = 0;
+	}
+
+	halt_channel(hcd, hc, qtd, DWC2_HC_XFER_STALL);
+
+handle_stall_done:
+	disable_hc_int(hcd, chnum, HCINTMSK_STALL);
+	return 1;
+}
+
+/*
+ * Updates the state of the URB when a transfer has been stopped due to an
+ * abnormal condition before the transfer completes. Modifies the
+ * actual_length field of the URB to reflect the number of bytes that have
+ * actually been transferred via the host channel.
+ */
+static void update_urb_state_xfer_intr(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+				       int chnum, struct dwc2_hcd_urb *urb,
+				       struct dwc2_qtd *qtd,
+				       enum dwc2_halt_status halt_status)
+{
+	u32 bytes_transferred = get_actual_xfer_length(hcd, hc, chnum, qtd,
+						       halt_status, NULL);
+
+	/* Non DWORD-aligned buffer case handling */
+	if (hc->align_buff && bytes_transferred && hc->ep_is_in)
+		memcpy(urb->buf + urb->actual_length, hc->qh->dw_align_buf,
+		       bytes_transferred);
+
+	urb->actual_length += bytes_transferred;
+
+#ifdef DEBUG
+	{
+		u32 hctsiz;
+
+		hctsiz = readl(hcd->regs + HCTSIZ(chnum));
+		dev_dbg(hcd->dev, "DWC_otg: %s: %s, channel %d\n",
+			__func__, (hc->ep_is_in ? "IN" : "OUT"), hc->hc_num);
+		dev_dbg(hcd->dev, "  hc->start_pkt_count %d\n",
+			hc->start_pkt_count);
+		dev_dbg(hcd->dev, "  hctsiz.pktcnt %d\n",
+			hctsiz >> TSIZ_PKTCNT_SHIFT &
+			TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT);
+		dev_dbg(hcd->dev, "  hc->max_packet %d\n", hc->max_packet);
+		dev_dbg(hcd->dev, "  bytes_transferred %d\n",
+			bytes_transferred);
+		dev_dbg(hcd->dev, "  urb->actual_length %d\n",
+			urb->actual_length);
+		dev_dbg(hcd->dev, "  urb->transfer_buffer_length %d\n",
+			urb->length);
+	}
+#endif
+}
+
+/*
+ * Handles a host channel NAK interrupt. This handler may be called in either
+ * DMA mode or Slave mode.
+ */
+static int handle_hc_nak_intr(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+			      int chnum, struct dwc2_qtd *qtd)
+{
+	dev_dbg(hcd->dev, "--Host Channel %d Interrupt: NAK Received--\n",
+		hc->hc_num);
+
+	/*
+	 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
+	 * interrupt. Re-start the SSPLIT transfer.
+	 */
+	if (hc->do_split) {
+		if (hc->complete_split)
+			qtd->error_count = 0;
+		qtd->complete_split = 0;
+		halt_channel(hcd, hc, qtd, DWC2_HC_XFER_NAK);
+		goto handle_nak_done;
+	}
+
+	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
+	case USB_ENDPOINT_XFER_CONTROL:
+	case USB_ENDPOINT_XFER_BULK:
+		if (hcd->dma_enable && hc->ep_is_in) {
+			/*
+			 * NAK interrupts are enabled on bulk/control IN
+			 * transfers in DMA mode for the sole purpose of
+			 * resetting the error count after a transaction error
+			 * occurs. The core will continue transferring data.
+			 */
+			qtd->error_count = 0;
+			goto handle_nak_done;
+		}
+
+		/*
+		 * NAK interrupts normally occur during OUT transfers in DMA
+		 * or Slave mode. For IN transfers, more requests will be
+		 * queued as request queue space is available.
+		 */
+		qtd->error_count = 0;
+
+		if (!hc->qh->ping_state) {
+			update_urb_state_xfer_intr(hcd, hc, chnum, qtd->urb,
+						   qtd, DWC2_HC_XFER_NAK);
+			dwc2_hcd_save_data_toggle(hcd, hc, chnum, qtd);
+
+			if (hc->speed == DWC2_EP_SPEED_HIGH)
+				hc->qh->ping_state = 1;
+		}
+
+		/*
+		 * Halt the channel so the transfer can be re-started from
+		 * the appropriate point or the PING protocol will
+		 * start/continue
+		 */
+		halt_channel(hcd, hc, qtd, DWC2_HC_XFER_NAK);
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		qtd->error_count = 0;
+		halt_channel(hcd, hc, qtd, DWC2_HC_XFER_NAK);
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		/* Should never get called for isochronous transfers */
+		dev_err(hcd->dev, "NACK interrupt for ISOC transfer\n");
+		break;
+	}
+
+handle_nak_done:
+	disable_hc_int(hcd, chnum, HCINTMSK_NAK);
+	return 1;
+}
+
+/*
+ * Handles a host channel ACK interrupt. This interrupt is enabled when
+ * performing the PING protocol in Slave mode, when errors occur during
+ * either Slave mode or DMA mode, and during Start Split transactions.
+ */
+static int handle_hc_ack_intr(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+			      int chnum, struct dwc2_qtd *qtd)
+{
+	struct dwc2_hcd_iso_packet_desc *frame_desc;
+
+	dev_dbg(hcd->dev, "--Host Channel %d Interrupt: ACK Received--\n",
+		hc->hc_num);
+
+	if (hc->do_split) {
+		/* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
+		if (!hc->ep_is_in && hc->data_pid_start != DWC2_HC_PID_SETUP)
+			qtd->ssplit_out_xfer_count = hc->xfer_len;
+		if (!(hc->ep_type == DWC2_EP_TYPE_ISOC && !hc->ep_is_in))
+			/* Don't need complete for isochronous out transfers */
+			qtd->complete_split = 1;
+
+		/* ISOC OUT */
+		if (hc->ep_type == DWC2_EP_TYPE_ISOC && !hc->ep_is_in) {
+			switch (hc->xact_pos) {
+			case DWC_HCSPLT_XACTPOS_ALL:
+				break;
+			case DWC_HCSPLT_XACTPOS_END:
+				qtd->isoc_split_pos = DWC_HCSPLT_XACTPOS_ALL;
+				qtd->isoc_split_offset = 0;
+				break;
+			case DWC_HCSPLT_XACTPOS_BEGIN:
+			case DWC_HCSPLT_XACTPOS_MID:
+				/*
+				 * For BEGIN or MID, calculate the length for
+				 * the next microframe to determine the correct
+				 * SSPLIT token, either MID or END
+				 */
+				frame_desc = &qtd->urb->iso_descs[
+						qtd->isoc_frame_index];
+				qtd->isoc_split_offset += 188;
+
+				if (frame_desc->length -
+						qtd->isoc_split_offset <= 188)
+					qtd->isoc_split_pos =
+							DWC_HCSPLT_XACTPOS_END;
+				else
+					qtd->isoc_split_pos =
+							DWC_HCSPLT_XACTPOS_MID;
+				break;
+			}
+		} else {
+			halt_channel(hcd, hc, qtd, DWC2_HC_XFER_ACK);
+		}
+	} else {
+		qtd->error_count = 0;
+
+		if (hc->qh->ping_state) {
+			hc->qh->ping_state = 0;
+			/*
+			 * Halt the channel so the transfer can be re-started
+			 * from the appropriate point. This only happens in
+			 * Slave mode. In DMA mode, the ping_state is cleared
+			 * when the transfer is started because the core
+			 * automatically executes the PING, then the transfer.
+			 */
+			halt_channel(hcd, hc, qtd, DWC2_HC_XFER_ACK);
+		}
+	}
+
+	/*
+	 * If the ACK occurred when _not_ in the PING state, let the channel
+	 * continue transferring data after clearing the error count
+	 */
+	disable_hc_int(hcd, chnum, HCINTMSK_ACK);
+	return 1;
+}
+
+/*
+ * Handles a host channel NYET interrupt. This interrupt should only occur on
+ * Bulk and Control OUT endpoints and for complete split transactions. If a
+ * NYET occurs at the same time as a Transfer Complete interrupt, it is
+ * handled in the xfercomp interrupt handler, not here. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int handle_hc_nyet_intr(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+			       int chnum, struct dwc2_qtd *qtd)
+{
+	dev_dbg(hcd->dev, "--Host Channel %d Interrupt: NYET Received--\n",
+		hc->hc_num);
+
+	/*
+	 * NYET on CSPLIT
+	 * re-do the CSPLIT immediately on non-periodic
+	 */
+	if (hc->do_split && hc->complete_split) {
+		if (hc->ep_is_in && hc->ep_type == DWC2_EP_TYPE_ISOC &&
+		    hcd->dma_enable) {
+			qtd->complete_split = 0;
+			qtd->isoc_split_offset = 0;
+			if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
+				dwc2_host_complete(hcd, qtd->urb->priv,
+						   qtd->urb, 0);
+				release_channel(hcd, hc, qtd,
+						DWC2_HC_XFER_URB_COMPLETE);
+			} else {
+				release_channel(hcd, hc, qtd,
+						DWC2_HC_XFER_NO_HALT_STATUS);
+			}
+			goto handle_nyet_done;
+		}
+
+		if (hc->ep_type == DWC2_EP_TYPE_INTR ||
+		    hc->ep_type == DWC2_EP_TYPE_ISOC) {
+			int frnum = dwc2_hcd_get_frame_number(hcd);
+
+			if (dwc2_full_frame_num(frnum) !=
+			    dwc2_full_frame_num(hc->qh->sched_frame)) {
+				/*
+				 * No longer in the same full speed frame.
+				 * Treat this as a transaction error.
+				 */
+#if 0
+				/*
+				 * Todo: Fix system performance so this can
+				 * be treated as an error. Right now complete
+				 * splits cannot be scheduled precisely enough
+				 * due to other system activity, so this error
+				 * occurs regularly in Slave mode.
+				 */
+				qtd->error_count++;
+#endif
+				qtd->complete_split = 0;
+				halt_channel(hcd, hc, qtd,
+					     DWC2_HC_XFER_XACT_ERR);
+				/* Todo: add support for isoc release */
+				goto handle_nyet_done;
+			}
+		}
+
+		halt_channel(hcd, hc, qtd, DWC2_HC_XFER_NYET);
+		goto handle_nyet_done;
+	}
+
+	hc->qh->ping_state = 1;
+	qtd->error_count = 0;
+
+	update_urb_state_xfer_intr(hcd, hc, chnum, qtd->urb, qtd,
+				   DWC2_HC_XFER_NYET);
+	dwc2_hcd_save_data_toggle(hcd, hc, chnum, qtd);
+
+	/*
+	 * Halt the channel and re-start the transfer so the PING protocol
+	 * will start
+	 */
+	halt_channel(hcd, hc, qtd, DWC2_HC_XFER_NYET);
+
+handle_nyet_done:
+	disable_hc_int(hcd, chnum, HCINTMSK_NYET);
+	return 1;
+}
+
+/*
+ * Handles a host channel babble interrupt. This handler may be called in
+ * either DMA mode or Slave mode.
+ */
+static int handle_hc_babble_intr(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+				 int chnum, struct dwc2_qtd *qtd)
+{
+	dev_dbg(hcd->dev, "--Host Channel %d Interrupt: Babble Error--\n",
+		hc->hc_num);
+
+	if (hcd->dma_desc_enable) {
+		dwc2_hcd_complete_xfer_ddma(hcd, hc, chnum,
+					    DWC2_HC_XFER_BABBLE_ERR);
+		goto handle_babble_done;
+	}
+
+	if (hc->ep_type != DWC2_EP_TYPE_ISOC) {
+		dwc2_host_complete(hcd, qtd->urb->priv, qtd->urb, -EOVERFLOW);
+		halt_channel(hcd, hc, qtd, DWC2_HC_XFER_BABBLE_ERR);
+	} else {
+		enum dwc2_halt_status halt_status;
+
+		halt_status = update_isoc_urb_state(hcd, hc, chnum, qtd,
+						    DWC2_HC_XFER_BABBLE_ERR);
+		halt_channel(hcd, hc, qtd, halt_status);
+	}
+
+handle_babble_done:
+	disable_hc_int(hcd, chnum, HCINTMSK_BBLERR);
+	return 1;
+}
+
+/*
+ * Handles a host channel AHB error interrupt. This handler is only called in
+ * DMA mode.
+ */
+static int handle_hc_ahberr_intr(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+				 int chnum, struct dwc2_qtd *qtd)
+{
+	struct dwc2_hcd_urb *urb = qtd->urb;
+	char *pipetype, *speed;
+	u32 hcchar;
+	u32 hcsplt;
+	u32 hctsiz;
+	u32 hcdma;
+
+	dev_dbg(hcd->dev, "--Host Channel %d Interrupt: AHB Error--\n",
+		hc->hc_num);
+
+	hcchar = readl(hcd->regs + HCCHAR(chnum));
+	hcsplt = readl(hcd->regs + HCSPLT(chnum));
+	hctsiz = readl(hcd->regs + HCTSIZ(chnum));
+	hcdma = readl(hcd->regs + HCDMA(chnum));
+
+	dev_err(hcd->dev, "AHB ERROR, Channel %d\n", hc->hc_num);
+	dev_err(hcd->dev, "  hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
+	dev_err(hcd->dev, "  hctsiz 0x%08x, hcdma 0x%08x\n", hctsiz, hcdma);
+	dev_dbg(hcd->dev, "DWC OTG HCD URB Enqueue\n");
+	dev_err(hcd->dev, "  Device address: %d\n",
+		dwc2_hcd_get_dev_addr(&urb->pipe_info));
+	dev_err(hcd->dev, "  Endpoint: %d, %s\n",
+		dwc2_hcd_get_ep_num(&urb->pipe_info),
+		dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
+
+	switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
+	case USB_ENDPOINT_XFER_CONTROL:
+		pipetype = "CONTROL";
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		pipetype = "BULK";
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		pipetype = "INTERRUPT";
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		pipetype = "ISOCHRONOUS";
+		break;
+	default:
+		pipetype = "UNKNOWN";
+		break;
+	}
+
+	dev_err(hcd->dev, "  Endpoint type: %s\n", pipetype);
+
+	switch (hc->speed) {
+	case DWC2_EP_SPEED_HIGH:
+		speed = "HIGH";
+		break;
+	case DWC2_EP_SPEED_FULL:
+		speed = "FULL";
+		break;
+	case DWC2_EP_SPEED_LOW:
+		speed = "LOW";
+		break;
+	default:
+		speed = "UNKNOWN";
+		break;
+	};
+
+	dev_err(hcd->dev, "  Speed: %s\n", speed);
+
+	dev_err(hcd->dev, "  Max packet size: %d\n",
+		dwc2_hcd_get_mps(&urb->pipe_info));
+	dev_err(hcd->dev, "  Data buffer length: %d\n", urb->length);
+	dev_err(hcd->dev, "  Transfer buffer: %p, Transfer DMA: %p\n",
+		urb->buf, (void *)urb->dma);
+	dev_err(hcd->dev, "  Setup buffer: %p, Setup DMA: %p\n",
+		urb->setup_packet, (void *)urb->setup_dma);
+	dev_err(hcd->dev, "  Interval: %d\n", urb->interval);
+
+	/* Core halts the channel for Descriptor DMA mode */
+	if (hcd->dma_desc_enable) {
+		dwc2_hcd_complete_xfer_ddma(hcd, hc, chnum,
+					    DWC2_HC_XFER_AHB_ERR);
+		goto handle_ahberr_done;
+	}
+
+	dwc2_host_complete(hcd, urb->priv, urb, -EIO);
+
+	/*
+	 * Force a channel halt. Don't call halt_channel because that won't
+	 * write to the HCCHARn register in DMA mode to force the halt.
+	 */
+	dwc2_hc_halt(hcd, hc, DWC2_HC_XFER_AHB_ERR);
+
+handle_ahberr_done:
+	disable_hc_int(hcd, chnum, HCINTMSK_AHBERR);
+	return 1;
+}
+
+/*
+ * Handles a host channel transaction error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int handle_hc_xacterr_intr(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+				  int chnum, struct dwc2_qtd *qtd)
+{
+	dev_dbg(hcd->dev, "--Host Channel %d Interrupt: Transaction Error--\n",
+		hc->hc_num);
+
+	if (hcd->dma_desc_enable) {
+		dwc2_hcd_complete_xfer_ddma(hcd, hc, chnum,
+					    DWC2_HC_XFER_XACT_ERR);
+		goto handle_xacterr_done;
+	}
+
+	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
+	case USB_ENDPOINT_XFER_CONTROL:
+	case USB_ENDPOINT_XFER_BULK:
+		qtd->error_count++;
+		if (!hc->qh->ping_state) {
+
+			update_urb_state_xfer_intr(hcd, hc, chnum, qtd->urb,
+						   qtd, DWC2_HC_XFER_XACT_ERR);
+			dwc2_hcd_save_data_toggle(hcd, hc, chnum, qtd);
+			if (!hc->ep_is_in && hc->speed == DWC2_EP_SPEED_HIGH)
+				hc->qh->ping_state = 1;
+		}
+
+		/*
+		 * Halt the channel so the transfer can be re-started from
+		 * the appropriate point or the PING protocol will start
+		 */
+		halt_channel(hcd, hc, qtd, DWC2_HC_XFER_XACT_ERR);
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		qtd->error_count++;
+		if (hc->do_split && hc->complete_split)
+			qtd->complete_split = 0;
+		halt_channel(hcd, hc, qtd, DWC2_HC_XFER_XACT_ERR);
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		{
+			enum dwc2_halt_status halt_status;
+
+			halt_status = update_isoc_urb_state(hcd, hc, chnum,
+					qtd, DWC2_HC_XFER_XACT_ERR);
+			halt_channel(hcd, hc, qtd, halt_status);
+		}
+		break;
+	}
+
+handle_xacterr_done:
+	disable_hc_int(hcd, chnum, HCINTMSK_XACTERR);
+	return 1;
+}
+
+/*
+ * Handles a host channel frame overrun interrupt. This handler may be called
+ * in either DMA mode or Slave mode.
+ */
+static int handle_hc_frmovrun_intr(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+				   int chnum, struct dwc2_qtd *qtd)
+{
+	enum dwc2_halt_status halt_status;
+
+	dev_dbg(hcd->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
+		hc->hc_num);
+
+	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
+	case USB_ENDPOINT_XFER_CONTROL:
+	case USB_ENDPOINT_XFER_BULK:
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		halt_channel(hcd, hc, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
+		break;
+	case USB_ENDPOINT_XFER_ISOC:
+		halt_status = update_isoc_urb_state(hcd, hc, chnum, qtd,
+					DWC2_HC_XFER_FRAME_OVERRUN);
+
+		halt_channel(hcd, hc, qtd, halt_status);
+		break;
+	}
+
+	disable_hc_int(hcd, chnum, HCINTMSK_FRMOVRUN);
+	return 1;
+}
+
+/*
+ * Handles a host channel data toggle error interrupt. This handler may be
+ * called in either DMA mode or Slave mode.
+ */
+static int handle_hc_datatglerr_intr(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+				     int chnum, struct dwc2_qtd *qtd)
+{
+	dev_dbg(hcd->dev, "--Host Channel %d Interrupt: Data Toggle Error--\n",
+		hc->hc_num);
+
+	if (hc->ep_is_in)
+		qtd->error_count = 0;
+	else
+		dev_err(hcd->dev,
+			"Data Toggle Error on OUT transfer, channel %d\n",
+			hc->hc_num);
+
+	disable_hc_int(hcd, chnum, HCINTMSK_DATATGLERR);
+	return 1;
+}
+
+#ifdef DEBUG
+/*
+ * For debug only. It checks that a valid halt status is set and that
+ * HCCHARn.chdis is clear. If there's a problem, corrective action is
+ * taken and a warning is issued.
+ *
+ * Return: 1 if halt status is ok, 0 otherwise
+ */
+static int halt_status_ok(struct dwc2_hcd *hcd, struct dwc2_hc *hc, int chnum,
+			  struct dwc2_qtd *qtd)
+{
+	u32 hcchar;
+	u32 hctsiz;
+	u32 hcint;
+	u32 hcintmsk;
+	u32 hcsplt;
+
+	if (hc->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
+		/*
+		 * This code is here only as a check. This condition should
+		 * never happen. Ignore the halt if it does occur.
+		 */
+		hcchar = readl(hcd->regs + HCCHAR(chnum));
+		hctsiz = readl(hcd->regs + HCTSIZ(chnum));
+		hcint = readl(hcd->regs + HCINT(chnum));
+		hcintmsk = readl(hcd->regs + HCINTMSK(chnum));
+		hcsplt = readl(hcd->regs + HCSPLT(chnum));
+		dev_warn(hcd->dev,
+			 "%s: hc->halt_status=DWC2_HC_XFER_NO_HALT_STATUS, "
+			 "channel %d, hcchar 0x%08x, hctsiz 0x%08x, "
+			 "hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x, "
+			 "qtd->complete_split %d\n", __func__, hc->hc_num,
+			 hcchar, hctsiz, hcint, hcintmsk, hcsplt,
+			 qtd->complete_split);
+
+		dev_warn(hcd->dev,
+			 "%s: no halt status, channel %d, ignoring interrupt\n",
+			 __func__, hc->hc_num);
+		dev_warn(hcd->dev, "\n");
+		clear_hc_int(hcd, chnum, HCINTMSK_CHHLTD);
+		return 0;
+	}
+
+	/*
+	 * This code is here only as a check. hcchar.chdis should never be set
+	 * when the halt interrupt occurs. Halt the channel again if it does
+	 * occur.
+	 */
+	hcchar = readl(hcd->regs + HCCHAR(chnum));
+	if (hcchar & HCCHAR_CHDIS) {
+		dev_warn(hcd->dev,
+			 "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
+			 __func__, hcchar);
+		clear_hc_int(hcd, chnum, HCINTMSK_CHHLTD);
+		hc->halt_pending = 0;
+		halt_channel(hcd, hc, qtd, hc->halt_status);
+		return 0;
+	}
+
+	return 1;
+}
+#endif
+
+/*
+ * Handles a host Channel Halted interrupt in DMA mode. This handler
+ * determines the reason the channel halted and proceeds accordingly.
+ */
+static void handle_hc_chhltd_intr_dma(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+				      int chnum, struct dwc2_qtd *qtd)
+{
+	u32 hcint;
+	u32 hcintmsk;
+	int out_nak_enh = 0;
+
+	dev_dbg(hcd->dev, "--Host Channel %d Interrupt: DMA Channel Halted--\n",
+		hc->hc_num);
+
+	/*
+	 * For core with OUT NAK enhancement, the flow for high-speed
+	 * CONTROL/BULK OUT is handled a little differently
+	 */
+	if (hcd->snpsid >= DWC2_CORE_REV_2_71a) {
+		if (hc->speed == DWC2_EP_SPEED_HIGH && !hc->ep_is_in &&
+		    (hc->ep_type == DWC2_EP_TYPE_CONTROL ||
+		     hc->ep_type == DWC2_EP_TYPE_BULK)) {
+			out_nak_enh = 1;
+		}
+	}
+
+	if (hc->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
+	    (hc->halt_status == DWC2_HC_XFER_AHB_ERR &&
+	     !hcd->dma_desc_enable)) {
+		/*
+		 * Just release the channel. A dequeue can happen on a
+		 * transfer timeout. In the case of an AHB Error, the channel
+		 * was forced to halt because there's no way to gracefully
+		 * recover.
+		 */
+		if (hcd->dma_desc_enable)
+			dwc2_hcd_complete_xfer_ddma(hcd, hc, chnum,
+						    hc->halt_status);
+		else
+			release_channel(hcd, hc, qtd, hc->halt_status);
+		return;
+	}
+
+	/* Read the HCINTn register to determine the cause for the halt */
+	hcint = readl(hcd->regs + HCINT(chnum));
+	hcintmsk = readl(hcd->regs + HCINTMSK(chnum));
+
+	if (hcint & HCINTMSK_XFERCOMPL) {
+		/*
+		 * Todo: This is here because of a possible hardware bug. Spec
+		 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
+		 * interrupt w/ACK bit set should occur, but I only see the
+		 * XFERCOMP bit, even with it masked out. This is a workaround
+		 * for that behavior. Should fix this when hardware is fixed.
+		 */
+		if (hc->ep_type == DWC2_EP_TYPE_ISOC && !hc->ep_is_in)
+			handle_hc_ack_intr(hcd, hc, chnum, qtd);
+		handle_hc_xfercomp_intr(hcd, hc, chnum, qtd);
+	} else if (hcint & HCINTMSK_STALL) {
+		handle_hc_stall_intr(hcd, hc, chnum, qtd);
+	} else if ((hcint & HCINTMSK_XACTERR) && !hcd->dma_desc_enable) {
+		if (out_nak_enh) {
+			if (hcint &
+			    (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
+				dev_dbg(hcd->dev,
+					"XactErr with NYET/NAK/ACK\n");
+				qtd->error_count = 0;
+			} else {
+				dev_dbg(hcd->dev,
+					"XactErr without NYET/NAK/ACK\n");
+			}
+		}
+
+		/*
+		 * Must handle xacterr before nak or ack. Could get a xacterr
+		 * at the same time as either of these on a BULK/CONTROL OUT
+		 * that started with a PING. The xacterr takes precedence.
+		 */
+		handle_hc_xacterr_intr(hcd, hc, chnum, qtd);
+	} else if ((hcint & HCINTMSK_XCS_XACT) && hcd->dma_desc_enable) {
+		handle_hc_xacterr_intr(hcd, hc, chnum, qtd);
+	} else if ((hcint & HCINTMSK_AHBERR) && hcd->dma_desc_enable) {
+		handle_hc_ahberr_intr(hcd, hc, chnum, qtd);
+	} else if (hcint & HCINTMSK_BBLERR) {
+		handle_hc_babble_intr(hcd, hc, chnum, qtd);
+	} else if (hcint & HCINTMSK_FRMOVRUN) {
+		handle_hc_frmovrun_intr(hcd, hc, chnum, qtd);
+	} else if (!out_nak_enh) {
+		if (hcint & HCINTMSK_NYET) {
+			/*
+			 * Must handle nyet before nak or ack. Could get a nyet
+			 * at the same time as either of those on a BULK/CONTROL
+			 * OUT that started with a PING. The nyet takes
+			 * precedence.
+			 */
+			handle_hc_nyet_intr(hcd, hc, chnum, qtd);
+		} else if ((hcint & HCINTMSK_NAK) &&
+			   !(hcintmsk & HCINTMSK_NAK)) {
+			/*
+			 * If nak is not masked, it's because a non-split IN
+			 * transfer is in an error state. In that case, the nak
+			 * is handled by the nak interrupt handler, not here.
+			 * Handle nak here for BULK/CONTROL OUT transfers, which
+			 * halt on a NAK to allow rewinding the buffer pointer.
+			 */
+			handle_hc_nak_intr(hcd, hc, chnum, qtd);
+		} else if ((hcint & HCINTMSK_ACK) &&
+			   !(hcintmsk & HCINTMSK_ACK)) {
+			/*
+			 * If ack is not masked, it's because a non-split IN
+			 * transfer is in an error state. In that case, the ack
+			 * is handled by the ack interrupt handler, not here.
+			 * Handle ack here for split transfers. Start splits
+			 * halt on ACK.
+			 */
+			handle_hc_ack_intr(hcd, hc, chnum, qtd);
+		} else {
+			if (hc->ep_type == DWC2_EP_TYPE_INTR ||
+			    hc->ep_type == DWC2_EP_TYPE_ISOC) {
+				/*
+				 * A periodic transfer halted with no other
+				 * channel interrupts set. Assume it was halted
+				 * by the core because it could not be completed
+				 * in its scheduled (micro)frame.
+				 */
+#ifdef DEBUG
+				dev_info(hcd->dev,
+					 "%s: Halt channel %d (assume "
+					 "incomplete periodic transfer)\n",
+					 __func__, hc->hc_num);
+#endif
+				halt_channel(hcd, hc, qtd,
+					     DWC2_HC_XFER_PERIODIC_INCOMPLETE);
+			} else {
+				dev_err(hcd->dev,
+					"%s: Channel %d, DMA Mode -- ChHltd "
+					"set, but reason for halting is "
+					"unknown, hcint 0x%08x, intsts "
+					"0x%08x\n", __func__, hc->hc_num, hcint,
+					readl(hcd->regs + GINTSTS));
+			}
+		}
+	} else {
+		dev_info(hcd->dev,
+			 "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
+			 hcint);
+	}
+}
+
+/*
+ * Handles a host channel Channel Halted interrupt
+ *
+ * In slave mode, this handler is called only when the driver specifically
+ * requests a halt. This occurs during handling other host channel interrupts
+ * (e.g. nak, xacterr, stall, nyet, etc.).
+ *
+ * In DMA mode, this is the interrupt that occurs when the core has finished
+ * processing a transfer on a channel. Other host channel interrupts (except
+ * ahberr) are disabled in DMA mode.
+ */
+static int handle_hc_chhltd_intr(struct dwc2_hcd *hcd, struct dwc2_hc *hc,
+				 int chnum, struct dwc2_qtd *qtd)
+{
+	dev_dbg(hcd->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
+		hc->hc_num);
+
+	if (hcd->dma_enable) {
+		handle_hc_chhltd_intr_dma(hcd, hc, chnum, qtd);
+	} else {
+#ifdef DEBUG
+		if (!halt_status_ok(hcd, hc, chnum, qtd))
+			return 1;
+#endif
+		release_channel(hcd, hc, qtd, hc->halt_status);
+	}
+
+	return 1;
+}
+
+/* Handles interrupt for a specific Host Channel */
+static int handle_hc_n_intr(struct dwc2_hcd *hcd, u32 chnum)
+{
+	int retval = 0;
+	u32 hcint;
+	u32 hcintmsk;
+	struct dwc2_hc *hc;
+	struct dwc2_qtd *qtd;
+
+	dev_dbg(hcd->dev, "--Host Channel Interrupt--, Channel %d\n", chnum);
+
+	hcint = readl(hcd->regs + HCINT(chnum));
+	hcintmsk = readl(hcd->regs + HCINTMSK(chnum));
+	dev_dbg(hcd->dev,
+		"  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
+		hcint, hcintmsk, (hcint & hcintmsk));
+	hcint = hcint & hcintmsk;
+
+	hc = hcd->hc_ptr_array[chnum];
+	if (!hc) {
+		dev_err(hcd->dev, "## hc_ptr_array for channel is NULL ##\n");
+		return 1;
+	}
+	if (list_empty(&hc->qh->qtd_list)) {
+		dev_err(hcd->dev, "## no QTD queued for channel ##\n");
+		return 1;
+	}
+
+	qtd = list_first_entry(&hc->qh->qtd_list, struct dwc2_qtd,
+			       qtd_list_entry);
+
+	if (!hcd->dma_enable) {
+		if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
+			hcint &= ~HCINTMSK_CHHLTD;
+	}
+
+	if (hcint & HCINTMSK_XFERCOMPL) {
+		retval |= handle_hc_xfercomp_intr(hcd, hc, chnum, qtd);
+		/*
+		 * If NYET occurred at same time as Xfer Complete, the NYET is
+		 * handled by the Xfer Complete interrupt handler. Don't want
+		 * to call the NYET interrupt handler in this case.
+		 */
+		hcint &= ~HCINTMSK_NYET;
+	}
+	if (hcint & HCINTMSK_CHHLTD)
+		retval |= handle_hc_chhltd_intr(hcd, hc, chnum, qtd);
+	if (hcint & HCINTMSK_AHBERR)
+		retval |= handle_hc_ahberr_intr(hcd, hc, chnum, qtd);
+	if (hcint & HCINTMSK_STALL)
+		retval |= handle_hc_stall_intr(hcd, hc, chnum, qtd);
+	if (hcint & HCINTMSK_NAK)
+		retval |= handle_hc_nak_intr(hcd, hc, chnum, qtd);
+	if (hcint & HCINTMSK_ACK)
+		retval |= handle_hc_ack_intr(hcd, hc, chnum, qtd);
+	if (hcint & HCINTMSK_NYET)
+		retval |= handle_hc_nyet_intr(hcd, hc, chnum, qtd);
+	if (hcint & HCINTMSK_XACTERR)
+		retval |= handle_hc_xacterr_intr(hcd, hc, chnum, qtd);
+	if (hcint & HCINTMSK_BBLERR)
+		retval |= handle_hc_babble_intr(hcd, hc, chnum, qtd);
+	if (hcint & HCINTMSK_FRMOVRUN)
+		retval |= handle_hc_frmovrun_intr(hcd, hc, chnum, qtd);
+	if (hcint & HCINTMSK_DATATGLERR)
+		retval |= handle_hc_datatglerr_intr(hcd, hc, chnum, qtd);
+
+	return retval;
+}
+
+/*
+ * This interrupt indicates that one or more host channels has a pending
+ * interrupt. There are multiple conditions that can cause each host channel
+ * interrupt. This function determines which conditions have occurred for each
+ * host channel interrupt and handles them appropriately.
+ */
+static int handle_hc_intr(struct dwc2_hcd *hcd)
+{
+	u32 haint;
+	int i;
+	int retval = 0;
+
+	dev_dbg(hcd->dev, "%s()\n", __func__);
+
+	/*
+	 * Clear appropriate bits in HCINTn to clear the interrupt bit in
+	 * GINTSTS
+	 */
+	haint = readl(hcd->regs + HAINT);
+	dev_dbg(hcd->dev, "HAINT=%08x\n", haint);
+
+	for (i = 0; i < hcd->core_params->host_channels; i++) {
+		if (haint & (1 << i))
+			retval |= handle_hc_n_intr(hcd, i);
+	}
+
+	return retval;
+}
+
+/* This function handles interrupts for the HCD */
+int dwc2_hcd_handle_intr(struct dwc2_hcd *hcd)
+{
+	u32 gintsts;
+	int retval = 0;
+
+	if (dwc2_check_core_status(hcd) < 0) {
+		dev_warn(hcd->dev, "Controller is disconnected");
+		return retval;
+	}
+
+	spin_lock(&hcd->lock);
+
+	/* Check if HOST Mode */
+	if (dwc2_is_host_mode(hcd)) {
+		gintsts = dwc2_read_core_intr(hcd);
+		if (!gintsts) {
+			spin_unlock(&hcd->lock);
+			return 0;
+		}
+
+#ifdef DEBUG
+#ifndef DEBUG_SOF
+		/* Don't print debug message in the interrupt handler on SOF */
+		if (gintsts != GINTSTS_SOF)
+#endif
+			dev_dbg(hcd->dev, "\n");
+#endif
+
+#ifdef DEBUG
+#ifndef DEBUG_SOF
+		if (gintsts != GINTSTS_SOF)
+#endif
+			dev_dbg(hcd->dev,
+				"DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
+				gintsts);
+#endif
+
+		if (gintsts & GINTSTS_SOF)
+			retval |= handle_sof_intr(hcd);
+		if (gintsts & GINTSTS_RxFLvl)
+			retval |= handle_rx_status_q_level_intr(hcd);
+		if (gintsts & GINTSTS_NPTxFEmp)
+			retval |= handle_np_tx_fifo_empty_intr(hcd);
+		if (gintsts & GINTSTS_I2CInt)
+			/* Todo: Implement i2cintr handler */
+			retval |= 1;
+		if (gintsts & GINTSTS_PrtInt)
+			retval |= handle_port_intr(hcd);
+		if (gintsts & GINTSTS_HChInt)
+			retval |= handle_hc_intr(hcd);
+		if (gintsts & GINTSTS_PTxFEmp)
+			retval |= handle_perio_tx_fifo_empty_intr(hcd);
+
+#ifdef DEBUG
+#ifndef DEBUG_SOF
+		if (gintsts != GINTSTS_SOF) {
+#endif
+			dev_dbg(hcd->dev,
+				"DWC OTG HCD Finished Servicing Interrupts\n");
+			dev_dbg(hcd->dev, "DWC OTG HCD gintsts=0x%08x\n",
+				readl(hcd->regs + GINTSTS));
+			dev_dbg(hcd->dev, "DWC OTG HCD gintmsk=0x%08x\n",
+				readl(hcd->regs + GINTMSK));
+#ifndef DEBUG_SOF
+		}
+#endif
+#endif
+
+#ifdef DEBUG
+#ifndef DEBUG_SOF
+		if (gintsts != GINTSTS_SOF)
+#endif
+			dev_dbg(hcd->dev, "\n");
+#endif
+	}
+
+	spin_unlock(&hcd->lock);
+
+	return retval;
+}
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
new file mode 100644
index 0000000..00201f4
--- /dev/null
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -0,0 +1,749 @@
+/*
+ * hcd_queue.c - DesignWare HS OTG Controller host queuing routines
+ *
+ * Copyright (C) 2004-2012 Synopsys, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ *    to endorse or promote products derived from this software without
+ *    specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation; either version 2 of the License, or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * This file contains the functions to manage Queue Heads and Queue
+ * Transfer Descriptors for Host mode
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <linux/usb/hcd.h>
+#include <linux/usb/ch11.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/ch9.h>
+
+#include "core.h"
+#include "hcd.h"
+
+/**
+ * dwc2_hcd_qh_free() - Frees each QTD in the QH's QTD-list then frees the QH
+ *
+ * @hcd: HCD instance
+ * @qh:  The QH to free
+ *
+ * QH should already be removed from the list. QTD list should already be empty
+ * if called from URB Dequeue.
+ */
+void dwc2_hcd_qh_free(struct dwc2_hcd *hcd, struct dwc2_qh *qh)
+{
+	struct list_head *qtd_item, *qtd_tmp;
+	struct dwc2_qtd *qtd;
+	unsigned long flags;
+
+	spin_lock_irqsave(&hcd->lock, flags);
+
+	/* Free each QTD in the QTD list */
+	list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
+		qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
+		list_del_init(&qtd->qtd_list_entry);
+		kfree(qtd);
+	}
+
+	spin_unlock_irqrestore(&hcd->lock, flags);
+
+	if (hcd->dma_desc_enable) {
+		dwc2_hcd_qh_free_ddma(hcd, qh);
+	} else if (qh->dw_align_buf) {
+		u32 buf_size;
+
+		if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
+			buf_size = 4096;
+		else
+			buf_size = hcd->core_params->max_transfer_size;
+		dma_free_coherent(hcd->dev, buf_size, qh->dw_align_buf,
+				  qh->dw_align_buf_dma);
+	}
+
+	kfree(qh);
+}
+
+#define BITSTUFFTIME(bytecount)	((8 * 7 * (bytecount)) / 6)
+#define HS_HOST_DELAY		5	/* nanoseconds */
+#define FS_LS_HOST_DELAY	1000	/* nanoseconds */
+#define HUB_LS_SETUP		333	/* nanoseconds */
+
+static u32 calc_bus_time(struct dwc2_hcd *hcd, int speed, int is_in,
+			 int is_isoc, int bytecount)
+{
+	unsigned long retval;
+
+	switch (speed) {
+	case USB_SPEED_HIGH:
+		if (is_isoc)
+			retval =
+			    ((38 * 8 * 2083) +
+			     (2083 * (3 + BITSTUFFTIME(bytecount)))) / 1000 +
+			    HS_HOST_DELAY;
+		else
+			retval =
+			    ((55 * 8 * 2083) +
+			     (2083 * (3 + BITSTUFFTIME(bytecount)))) / 1000 +
+			    HS_HOST_DELAY;
+		break;
+	case USB_SPEED_FULL:
+		if (is_isoc) {
+			retval =
+			    (8354 * (31 + 10 * BITSTUFFTIME(bytecount))) / 1000;
+			if (is_in)
+				retval = 7268 + FS_LS_HOST_DELAY + retval;
+			else
+				retval = 6265 + FS_LS_HOST_DELAY + retval;
+		} else {
+			retval =
+			    (8354 * (31 + 10 * BITSTUFFTIME(bytecount))) / 1000;
+			retval = 9107 + FS_LS_HOST_DELAY + retval;
+		}
+		break;
+	case USB_SPEED_LOW:
+		if (is_in) {
+			retval =
+			    (67667 * (31 + 10 * BITSTUFFTIME(bytecount))) /
+			    1000;
+			retval =
+			    64060 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
+			    retval;
+		} else {
+			retval =
+			    (66700 * (31 + 10 * BITSTUFFTIME(bytecount))) /
+			    1000;
+			retval =
+			    64107 + (2 * HUB_LS_SETUP) + FS_LS_HOST_DELAY +
+			    retval;
+		}
+		break;
+	default:
+		dev_warn(hcd->dev, "Unknown device speed\n");
+		retval = -1;
+	}
+
+	return NS_TO_US(retval);
+}
+
+/**
+ * qh_init() - Initializes a QH structure
+ *
+ * @hcd: The HCD state structure for the DWC OTG controller
+ * @qh:  The QH to init
+ * @urb: Holds the information about the device/endpoint needed to initialize
+ *       the QH
+ */
+#define SCHEDULE_SLOP 10
+static void qh_init(struct dwc2_hcd *hcd, struct dwc2_qh *qh,
+		    struct dwc2_hcd_urb *urb)
+{
+	char *speed, *type;
+	int dev_speed;
+	u32 hub_addr, hub_port;
+
+	dev_dbg(hcd->dev, "%s()\n", __func__);
+
+	/* Initialize QH */
+	qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
+	qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
+
+	qh->data_toggle = DWC2_HC_PID_DATA0;
+	qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
+	INIT_LIST_HEAD(&qh->qtd_list);
+	INIT_LIST_HEAD(&qh->qh_list_entry);
+	qh->channel = NULL;
+
+	/* FS/LS Endpoint on HS Hub, NOT virtual root hub */
+	dev_speed = dwc2_host_speed(hcd, urb->priv);
+
+	dwc2_host_hub_info(hcd, urb->priv, &hub_addr, &hub_port);
+	qh->do_split = 0;
+
+	if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) &&
+	    hub_addr != 0 && hub_addr != 1) {
+		dev_dbg(hcd->dev,
+			"QH init: EP %d: TT found at hub addr %d, for port %d\n",
+			dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr,
+			hub_port);
+		qh->do_split = 1;
+	}
+
+	if (qh->ep_type == USB_ENDPOINT_XFER_INT ||
+	    qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
+		/* Compute scheduling parameters once and save them */
+		u32 hprt, prtspd;
+
+		/* Todo: Account for split transfers in the bus time */
+		int bytecount =
+			dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
+
+		qh->usecs = calc_bus_time(hcd, qh->do_split ? USB_SPEED_HIGH :
+				dev_speed, qh->ep_is_in,
+				qh->ep_type == USB_ENDPOINT_XFER_ISOC,
+				bytecount);
+		/* Start in a slightly future (micro)frame */
+		qh->sched_frame = dwc2_frame_num_inc(hcd->frame_number,
+						     SCHEDULE_SLOP);
+		qh->interval = urb->interval;
+#if 0
+		/* Increase interrupt polling rate for debugging */
+		if (qh->ep_type == USB_ENDPOINT_XFER_INT)
+			qh->interval = 8;
+#endif
+		hprt = readl(hcd->regs + HPRT0);
+		prtspd = hprt & HPRT0_SPD_MASK;
+		if (prtspd == HPRT0_SPD_HIGH_SPEED &&
+		    (dev_speed == USB_SPEED_LOW ||
+		     dev_speed == USB_SPEED_FULL)) {
+			qh->interval *= 8;
+			qh->sched_frame |= 0x7;
+			qh->start_split_frame = qh->sched_frame;
+		}
+	}
+
+	dev_dbg(hcd->dev, "DWC OTG HCD QH Initialized\n");
+	dev_dbg(hcd->dev, "DWC OTG HCD QH - qh = %p\n", qh);
+	dev_dbg(hcd->dev, "DWC OTG HCD QH - Device Address = %d\n",
+		dwc2_hcd_get_dev_addr(&urb->pipe_info));
+	dev_dbg(hcd->dev, "DWC OTG HCD QH - Endpoint %d, %s\n",
+		dwc2_hcd_get_ep_num(&urb->pipe_info),
+		dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
+
+	switch (dev_speed) {
+	case USB_SPEED_LOW:
+		qh->dev_speed = DWC2_EP_SPEED_LOW;
+		speed = "low";
+		break;
+	case USB_SPEED_FULL:
+		qh->dev_speed = DWC2_EP_SPEED_FULL;
+		speed = "full";
+		break;
+	case USB_SPEED_HIGH:
+		qh->dev_speed = DWC2_EP_SPEED_HIGH;
+		speed = "high";
+		break;
+	default:
+		speed = "?";
+		break;
+	}
+	dev_dbg(hcd->dev, "DWC OTG HCD QH - Speed = %s\n", speed);
+
+	switch (qh->ep_type) {
+	case USB_ENDPOINT_XFER_ISOC:
+		type = "isochronous";
+		break;
+	case USB_ENDPOINT_XFER_INT:
+		type = "interrupt";
+		break;
+	case USB_ENDPOINT_XFER_CONTROL:
+		type = "control";
+		break;
+	case USB_ENDPOINT_XFER_BULK:
+		type = "bulk";
+		break;
+	default:
+		type = "?";
+		break;
+	}
+
+	dev_dbg(hcd->dev, "DWC OTG HCD QH - Type = %s\n", type);
+
+#ifdef DEBUG
+	if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
+		dev_dbg(hcd->dev, "DWC OTG HCD QH - usecs = %d\n", qh->usecs);
+		dev_dbg(hcd->dev, "DWC OTG HCD QH - interval = %d\n",
+			qh->interval);
+	}
+#endif
+}
+
+/**
+ * hcd_qh_create() - Allocates and initializes a QH
+ *
+ * @hcd:          The HCD state structure for the DWC OTG controller
+ * @urb:          Holds the information about the device/endpoint needed
+ *                to initialize the QH
+ * @atomic_alloc: Flag to do atomic allocation if needed
+ *
+ * Return: Pointer to the newly allocated QH, or NULL on error
+ */
+static struct dwc2_qh *hcd_qh_create(struct dwc2_hcd *hcd,
+				     struct dwc2_hcd_urb *urb, gfp_t mem_flags)
+{
+	struct dwc2_qh *qh;
+
+	/* Allocate memory */
+	qh = kzalloc(sizeof(*qh), mem_flags);
+	if (!qh) {
+		dev_err(hcd->dev, "qh allocation failed");
+		return NULL;
+	}
+
+	qh_init(hcd, qh, urb);
+
+	if (hcd->dma_desc_enable &&
+	    dwc2_hcd_qh_init_ddma(hcd, qh, mem_flags) < 0) {
+		dwc2_hcd_qh_free(hcd, qh);
+		return NULL;
+	}
+
+	return qh;
+}
+
+/**
+ * periodic_channel_available() - Checks that a channel is available for a
+ * periodic transfer
+ *
+ * @hcd: The HCD state structure for the DWC OTG controller
+ *
+ * Return: 0 if successful, negative error code otherise
+ */
+static int periodic_channel_available(struct dwc2_hcd *hcd)
+{
+	/*
+	 * Currently assuming that there is a dedicated host channnel for
+	 * each periodic transaction plus at least one host channel for
+	 * non-periodic transactions
+	 */
+	int status;
+	int num_channels;
+
+	num_channels = hcd->core_params->host_channels;
+	if (hcd->periodic_channels + hcd->non_periodic_channels < num_channels
+	    && hcd->periodic_channels < num_channels - 1) {
+		status = 0;
+	} else {
+		dev_info(hcd->dev,
+			 "%s: Total channels: %d, Periodic: %d, "
+			 "Non-periodic: %d\n", __func__, num_channels,
+			 hcd->periodic_channels, hcd->non_periodic_channels);
+		status = -ENOSPC;
+	}
+
+	return status;
+}
+
+/**
+ * check_periodic_bandwidth() - Checks that there is sufficient bandwidth for
+ * the specified QH in the periodic schedule
+ *
+ * @hcd: The HCD state structure for the DWC OTG controller
+ * @qh:  QH containing periodic bandwidth required
+ *
+ * Return: 0 if successful, negative error code otherwise
+ *
+ * For simplicity, this calculation assumes that all the transfers in the
+ * periodic schedule may occur in the same (micro)frame
+ */
+static int check_periodic_bandwidth(struct dwc2_hcd *hcd, struct dwc2_qh *qh)
+{
+	int status;
+	s16 max_claimed_usecs;
+
+	status = 0;
+
+	if (qh->dev_speed == DWC2_EP_SPEED_HIGH || qh->do_split)
+		/*
+		 * High speed mode
+		 * Max periodic usecs is 80% x 125 usec = 100 usec
+		 */
+		max_claimed_usecs = 100 - qh->usecs;
+	else
+		/*
+		 * Full speed mode
+		 * Max periodic usecs is 90% x 1000 usec = 900 usec
+		 */
+		max_claimed_usecs = 900 - qh->usecs;
+
+	if (hcd->periodic_usecs > max_claimed_usecs) {
+		dev_info(hcd->dev,
+			 "%s: already claimed usecs %d, required usecs %d\n",
+			 __func__, hcd->periodic_usecs, qh->usecs);
+		status = -ENOSPC;
+	}
+
+	return status;
+}
+
+/**
+ * check_max_xfer_size() - Checks that the max transfer size allowed in a host
+ * channel is large enough to handle the maximum data transfer in a single
+ * (micro)frame for a periodic transfer
+ *
+ * @hcd: The HCD state structure for the DWC OTG controller
+ * @qh:  QH for a periodic endpoint
+ *
+ * Return: 0 if successful, negative error code otherwise
+ */
+static int check_max_xfer_size(struct dwc2_hcd *hcd, struct dwc2_qh *qh)
+{
+	u32 max_xfer_size;
+	u32 max_channel_xfer_size;
+	int status = 0;
+
+	max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
+	max_channel_xfer_size = hcd->core_params->max_transfer_size;
+
+	if (max_xfer_size > max_channel_xfer_size) {
+		dev_info(hcd->dev,
+			 "%s: Periodic xfer length %d > max xfer length for channel %d\n",
+			 __func__, max_xfer_size, max_channel_xfer_size);
+		status = -ENOSPC;
+	}
+
+	return status;
+}
+
+/**
+ * schedule_periodic() - Schedules an interrupt or isochronous transfer in the
+ * periodic schedule
+ *
+ * @hcd: The HCD state structure for the DWC OTG controller
+ * @qh:  QH for the periodic transfer. The QH should already contain the
+ *       scheduling information.
+ *
+ * Return: 0 if successful, negative error code otherwise
+ */
+static int schedule_periodic(struct dwc2_hcd *hcd, struct dwc2_qh *qh)
+{
+	int status;
+
+	status = periodic_channel_available(hcd);
+	if (status) {
+		dev_info(hcd->dev,
+			 "%s: No host channel available for periodic transfer\n",
+			 __func__);
+		return status;
+	}
+
+	status = check_periodic_bandwidth(hcd, qh);
+	if (status) {
+		dev_info(hcd->dev,
+			 "%s: Insufficient periodic bandwidth for periodic transfer\n",
+			 __func__);
+		return status;
+	}
+
+	status = check_max_xfer_size(hcd, qh);
+	if (status) {
+		dev_info(hcd->dev,
+			 "%s: Channel max transfer size too small for periodic transfer\n",
+			 __func__);
+		return status;
+	}
+
+	if (hcd->dma_desc_enable)
+		/* Don't rely on SOF and start in ready schedule */
+		list_add_tail(&qh->qh_list_entry, &hcd->periodic_sched_ready);
+	else
+		/* Always start in inactive schedule */
+		list_add_tail(&qh->qh_list_entry,
+			      &hcd->periodic_sched_inactive);
+
+	/* Reserve periodic channel */
+	hcd->periodic_channels++;
+
+	/* Update claimed usecs per (micro)frame */
+	hcd->periodic_usecs += qh->usecs;
+
+	return status;
+}
+
+/**
+ * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
+ * schedule if it is not already in the schedule. If the QH is already in
+ * the schedule, no action is taken.
+ *
+ * @hcd: The HCD state structure for the DWC OTG controller
+ * @qh:  The QH to add
+ *
+ * Return: 0 if successful, negative error code otherwise
+ */
+int dwc2_hcd_qh_add(struct dwc2_hcd *hcd, struct dwc2_qh *qh)
+{
+	int status = 0;
+	u32 intr_mask;
+
+	dev_dbg(hcd->dev, "%s()\n", __func__);
+
+	if (!list_empty(&qh->qh_list_entry))
+		/* QH already in a schedule */
+		return status;
+
+	/* Add the new QH to the appropriate schedule */
+	if (dwc2_qh_is_non_per(qh)) {
+		/* Always start in inactive schedule */
+		list_add_tail(&qh->qh_list_entry,
+			      &hcd->non_periodic_sched_inactive);
+	} else {
+		status = schedule_periodic(hcd, qh);
+		if (status == 0) {
+			if (!hcd->periodic_qh_count) {
+				intr_mask = readl(hcd->regs + GINTMSK);
+				intr_mask |= GINTSTS_SOF;
+				writel(intr_mask, hcd->regs + GINTMSK);
+			}
+			hcd->periodic_qh_count++;
+		}
+	}
+
+	return status;
+}
+
+/**
+ * deschedule_periodic() - Removes an interrupt or isochronous transfer from the
+ * periodic schedule
+ *
+ * @hcd: The HCD state structure for the DWC OTG controller
+ * @qh:  QH for the periodic transfer
+ */
+static void deschedule_periodic(struct dwc2_hcd *hcd, struct dwc2_qh *qh)
+{
+	list_del_init(&qh->qh_list_entry);
+
+	/* Release periodic channel reservation */
+	hcd->periodic_channels--;
+
+	/* Update claimed usecs per (micro)frame */
+	hcd->periodic_usecs -= qh->usecs;
+}
+
+/**
+ * dwc2_hcd_qh_remove() - Removes a QH from either the non-periodic or periodic
+ * schedule. Memory is not freed.
+ *
+ * @hcd: The HCD state structure
+ * @qh:  QH to remove from schedule
+ */
+void dwc2_hcd_qh_remove(struct dwc2_hcd *hcd, struct dwc2_qh *qh)
+{
+	u32 intr_mask;
+
+	dev_dbg(hcd->dev, "%s()\n", __func__);
+
+	if (list_empty(&qh->qh_list_entry))
+		/* QH is not in a schedule */
+		return;
+
+	if (dwc2_qh_is_non_per(qh)) {
+		if (hcd->non_periodic_qh_ptr == &qh->qh_list_entry)
+			hcd->non_periodic_qh_ptr =
+					hcd->non_periodic_qh_ptr->next;
+		list_del_init(&qh->qh_list_entry);
+	} else {
+		deschedule_periodic(hcd, qh);
+		hcd->periodic_qh_count--;
+		if (!hcd->periodic_qh_count) {
+			intr_mask = readl(hcd->regs + GINTMSK);
+			intr_mask &= ~GINTSTS_SOF;
+			writel(intr_mask, hcd->regs + GINTMSK);
+		}
+	}
+}
+
+/*
+ * Schedule the next continuing periodic split transfer
+ */
+static void sched_periodic_split(struct dwc2_hcd *hcd, struct dwc2_qh *qh,
+				 u16 frame_number,
+				 int sched_next_periodic_split)
+{
+	u16 incr;
+
+	if (sched_next_periodic_split) {
+		qh->sched_frame = frame_number;
+		incr = dwc2_frame_num_inc(qh->start_split_frame, 1);
+		if (dwc2_frame_num_le(frame_number, incr)) {
+			/*
+			 * Allow one frame to elapse after start split
+			 * microframe before scheduling complete split, but
+			 * DON'T if we are doing the next start split in the
+			 * same frame for an ISOC out
+			 */
+			if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
+			    qh->ep_is_in != 0) {
+				qh->sched_frame =
+					dwc2_frame_num_inc(qh->sched_frame, 1);
+			}
+		}
+	} else {
+		qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame,
+						     qh->interval);
+		if (dwc2_frame_num_le(qh->sched_frame, frame_number))
+			qh->sched_frame = frame_number;
+		qh->sched_frame |= 0x7;
+		qh->start_split_frame = qh->sched_frame;
+	}
+}
+
+/*
+ * Deactivates a QH. For non-periodic QHs, removes the QH from the active
+ * non-periodic schedule. The QH is added to the inactive non-periodic
+ * schedule if any QTDs are still attached to the QH.
+ *
+ * For periodic QHs, the QH is removed from the periodic queued schedule. If
+ * there are any QTDs still attached to the QH, the QH is added to either the
+ * periodic inactive schedule or the periodic ready schedule and its next
+ * scheduled frame is calculated. The QH is placed in the ready schedule if
+ * the scheduled frame has been reached already. Otherwise it's placed in the
+ * inactive schedule. If there are no QTDs attached to the QH, the QH is
+ * completely removed from the periodic schedule.
+ */
+void dwc2_hcd_qh_deactivate(struct dwc2_hcd *hcd, struct dwc2_qh *qh,
+			     int sched_next_periodic_split)
+{
+	dev_dbg(hcd->dev, "%s()\n", __func__);
+
+	if (dwc2_qh_is_non_per(qh)) {
+		dwc2_hcd_qh_remove(hcd, qh);
+		if (!list_empty(&qh->qtd_list))
+			/* Add back to inactive non-periodic schedule */
+			dwc2_hcd_qh_add(hcd, qh);
+	} else {
+		u16 frame_number = dwc2_hcd_get_frame_number(hcd);
+
+		if (qh->do_split) {
+			sched_periodic_split(hcd, qh, frame_number,
+					     sched_next_periodic_split);
+		} else {
+			qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame,
+							     qh->interval);
+			if (dwc2_frame_num_le(qh->sched_frame, frame_number))
+				qh->sched_frame = frame_number;
+		}
+
+		if (list_empty(&qh->qtd_list)) {
+			dwc2_hcd_qh_remove(hcd, qh);
+		/*
+		 * Remove from periodic_sched_queued and move to appropriate
+		 * queue
+		 */
+		} else {
+			if (qh->sched_frame == frame_number)
+				list_move(&qh->qh_list_entry,
+					  &hcd->periodic_sched_ready);
+			else
+				list_move(&qh->qh_list_entry,
+					  &hcd->periodic_sched_inactive);
+		}
+	}
+}
+
+/**
+ * dwc2_hcd_qtd_init() - Initializes a QTD structure
+ *
+ * @qtd: The QTD to initialize
+ * @urb: The associated URB
+ */
+void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
+{
+	qtd->urb = urb;
+	if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
+			USB_ENDPOINT_XFER_CONTROL) {
+		/*
+		 * The only time the QTD data toggle is used is on the data
+		 * phase of control transfers. This phase always starts with
+		 * DATA1.
+		 */
+		qtd->data_toggle = DWC2_HC_PID_DATA1;
+		qtd->control_phase = DWC2_CONTROL_SETUP;
+	}
+
+	/* Start split */
+	qtd->complete_split = 0;
+	qtd->isoc_split_pos = DWC_HCSPLT_XACTPOS_ALL;
+	qtd->isoc_split_offset = 0;
+	qtd->in_process = 0;
+
+	/* Store the qtd ptr in the urb to reference the QTD */
+	urb->qtd = qtd;
+}
+
+/**
+ * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
+ *
+ * @qtd:          The QTD to add
+ * @hcd:          The DWC HCD structure
+ * @qh:           Out parameter to return queue head
+ * @atomic_alloc: Flag to do atomic alloc if needed
+ *
+ * Return: 0 if successful, negative error code otherwise
+ *
+ * Finds the correct QH to place the QTD into. If it does not find a QH, it
+ * will create a new QH. If the QH to which the QTD is added is not currently
+ * scheduled, it is placed into the proper schedule based on its EP type.
+ */
+int dwc2_hcd_qtd_add(struct dwc2_qtd *qtd, struct dwc2_hcd *hcd,
+		     struct dwc2_qh **qh, gfp_t mem_flags)
+{
+	struct dwc2_hcd_urb *urb = qtd->urb;
+	struct dwc2_qh *qh_tmp;
+	unsigned long flags;
+	int allocated = 0;
+	int retval = 0;
+
+	/*
+	 * Get the QH which holds the QTD-list to insert to. Create QH if it
+	 * doesn't exist.
+	 */
+	if (*qh == NULL) {
+		*qh = hcd_qh_create(hcd, urb, mem_flags);
+		if (*qh == NULL) {
+			retval = -1;
+			goto done;
+		}
+		allocated = 1;
+	}
+	spin_lock_irqsave(&hcd->lock, flags);
+	retval = dwc2_hcd_qh_add(hcd, *qh);
+	if (retval && allocated) {
+		qh_tmp = *qh;
+		*qh = NULL;
+		spin_unlock_irqrestore(&hcd->lock, flags);
+		dwc2_hcd_qh_free(hcd, qh_tmp);
+	} else {
+		list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list);
+		spin_unlock_irqrestore(&hcd->lock, flags);
+	}
+
+done:
+	return retval;
+}
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux