Signed-off-by: Paul Zimmerman <paulz@xxxxxxxxxxxx> --- drivers/usb/dwc2/core.c | 2533 ++++++++++++++++++++++++++++++++++++++++++ drivers/usb/dwc2/core.h | 745 +++++++++++++ drivers/usb/dwc2/core_intr.c | 726 ++++++++++++ drivers/usb/dwc2/hw.h | 512 +++++++++ 4 files changed, 4516 insertions(+), 0 deletions(-) create mode 100644 drivers/usb/dwc2/core.c create mode 100644 drivers/usb/dwc2/core.h create mode 100644 drivers/usb/dwc2/core_intr.c create mode 100644 drivers/usb/dwc2/hw.h diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c new file mode 100644 index 0000000..2a26456 --- /dev/null +++ b/drivers/usb/dwc2/core.c @@ -0,0 +1,2533 @@ +/* + * core.c - DesignWare HS OTG Controller common routines + * + * Copyright (C) 2004-2012 Synopsys, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the above-listed copyright holders may not be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * The Core code provides basic services for accessing and managing the + * DWC_otg hardware. These services are used by both the Host Controller + * Driver and the Peripheral Controller Driver. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/usb.h> + +#include <linux/usb/hcd.h> +#include <linux/usb/ch11.h> +#include <linux/usb/gadget.h> +#include <linux/usb/ch9.h> + +#include "core.h" +#include "hcd.h" + +/** + * dwc2_enable_global_interrupts() - Enables the controller's Global + * Interrupt in the AHB Config register + * + * @hcd: Programming view of DWC_otg controller + */ +void dwc2_enable_global_interrupts(struct dwc2_hcd *hcd) +{ + u32 ahbcfg = readl(hcd->regs + GAHBCFG); + + ahbcfg |= GAHBCFG_GlblIntrEn; + writel(ahbcfg, hcd->regs + GAHBCFG); +} + +/** + * dwc2_disable_global_interrupts() - Disables the controller's Global + * Interrupt in the AHB Config register + * + * @hcd: Programming view of DWC_otg controller + */ +void dwc2_disable_global_interrupts(struct dwc2_hcd *hcd) +{ + u32 ahbcfg = readl(hcd->regs + GAHBCFG); + + ahbcfg &= ~GAHBCFG_GlblIntrEn; + writel(ahbcfg, hcd->regs + GAHBCFG); +} + +/** + * enable_common_interrupts() - Initializes the commmon interrupts, + * used in both device and host modes + * + * @hcd: Programming view of the DWC_otg controller + */ +static void enable_common_interrupts(struct dwc2_hcd *hcd) +{ + u32 intmsk; + + /* Clear any pending OTG Interrupts */ + writel(0xffffffff, hcd->regs + GOTGINT); + + /* Clear any pending interrupts */ + writel(0xffffffff, hcd->regs + GINTSTS); + + /* Enable the interrupts in the GINTMSK */ + intmsk = GINTSTS_ModeMis; + intmsk |= GINTSTS_OTGInt; + + if (!hcd->dma_enable) + intmsk |= GINTSTS_RxFLvl; + + intmsk |= GINTSTS_ConIDStsChng; + intmsk |= GINTSTS_WkUpInt; + intmsk |= GINTSTS_USBSusp; + intmsk |= GINTSTS_SessReqInt; + + if (hcd->core_params->lpm_enable) + intmsk |= GINTSTS_LPMTranRcvd; + + writel(intmsk, hcd->regs + GINTMSK); +} + +/* + * Initializes the FSLSPClkSel field of the HCFG register depending on the + * PHY type + */ +static void init_fslspclksel(struct dwc2_hcd *hcd) +{ + u32 hcfg; + u32 val; + + if (((hcd->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) == + GHWCFG2_HS_PHY_TYPE_ULPI && + (hcd->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) == + GHWCFG2_FS_PHY_TYPE_DEDICATED && + hcd->core_params->ulpi_fs_ls > 0) || + hcd->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) { + /* Full speed PHY */ + val = HCFG_FSLSPCLKSEL_48_MHZ; + } else { + /* High speed PHY running at full speed or high speed */ + val = HCFG_FSLSPCLKSEL_30_60_MHZ; + } + + dev_dbg(hcd->dev, "Initializing HCFG.FSLSPClkSel to %08x\n", val); + hcfg = readl(hcd->regs + HCFG); + hcfg &= ~HCFG_FSLSPCLKSEL_MASK; + hcfg |= val; + writel(hcfg, hcd->regs + HCFG); +} + +/* + * Do core a soft reset of the core. Be careful with this because it + * resets all the internal state machines of the core. + */ +static void core_reset(struct dwc2_hcd *hcd) +{ + u32 greset; + int count = 0; + + dev_dbg(hcd->dev, "%s\n", __func__); + + /* Wait for AHB master IDLE state */ + do { + msleep(20); + greset = readl(hcd->regs + GRSTCTL); + if (++count > 500) { + dev_warn(hcd->dev, "%s() HANG! AHB Idle GRSTCTL=%0x\n", + __func__, greset); + return; + } + } while (!(greset & GRSTCTL_AHBIdle)); + + /* Core Soft Reset */ + count = 0; + greset |= GRSTCTL_CSftRst; + writel(greset, hcd->regs + GRSTCTL); + do { + msleep(20); + greset = readl(hcd->regs + GRSTCTL); + if (++count > 500) { + dev_warn(hcd->dev, + "%s() HANG! Soft Reset GRSTCTL=%0x\n", + __func__, greset); + break; + } + } while (greset & GRSTCTL_CSftRst); + + /* + * NOTE: This long sleep is _very_ important, otherwise the core will + * not stay in host mode after a connector ID change! + */ + msleep(100); +} + +/** + * dwc2_core_init() - Initializes the DWC_otg controller registers and + * prepares the core for device mode or host mode operation + * + * @hcd: Programming view of the DWC_otg controller + */ +void dwc2_core_init(struct dwc2_hcd *hcd) +{ + int i; + u8 brst_sz; + u32 usbcfg; + u32 i2cctl; + u32 gotgctl; + u32 ahbcfg = 0; + + dev_dbg(hcd->dev, "dwc2_core_init(%p)\n", hcd); + + usbcfg = readl(hcd->regs + GUSBCFG); + + /* Program the ULPI External VBUS bit if needed */ + usbcfg &= ~GUSBCFG_ULPIExtVBusDrv; + if (hcd->core_params->phy_ulpi_ext_vbus == DWC_PHY_ULPI_EXTERNAL_VBUS) + usbcfg |= GUSBCFG_ULPIExtVBusDrv; + + /* Set external TS Dline pulsing */ + usbcfg &= ~GUSBCFG_TermSelDlPulse; + if (hcd->core_params->ts_dline > 0) + usbcfg |= GUSBCFG_TermSelDlPulse; + writel(usbcfg, hcd->regs + GUSBCFG); + + /* Reset the Controller */ + core_reset(hcd); + + dev_dbg(hcd->dev, "num_dev_perio_in_ep=%d\n", + hcd->hwcfg4 >> GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT & + GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK >> + GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT); + + hcd->total_fifo_size = hcd->hwcfg3 >> GHWCFG3_DFIFO_DEPTH_SHIFT & + GHWCFG3_DFIFO_DEPTH_MASK >> GHWCFG3_DFIFO_DEPTH_SHIFT; + hcd->rx_fifo_size = readl(hcd->regs + GRXFSIZ); + hcd->nperio_tx_fifo_size = readl(hcd->regs + GNPTXFSIZ) >> 16 & 0xffff; + + dev_dbg(hcd->dev, "Total FIFO SZ=%d\n", hcd->total_fifo_size); + dev_dbg(hcd->dev, "RxFIFO SZ=%d\n", hcd->rx_fifo_size); + dev_dbg(hcd->dev, "NP TxFIFO SZ=%d\n", hcd->nperio_tx_fifo_size); + + /* + * This programming sequence needs to happen in FS mode before any other + * programming occurs + */ + if (hcd->core_params->speed == DWC_SPEED_PARAM_FULL && + hcd->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) { + /* If FS mode with FS PHY */ + + /* + * core_init() is now called on every switch so only call the + * following for the first time through + */ + if (!hcd->phy_init_done) { + hcd->phy_init_done = 1; + dev_dbg(hcd->dev, "FS_PHY detected\n"); + usbcfg = readl(hcd->regs + GUSBCFG); + usbcfg |= GUSBCFG_PHYSel; + writel(usbcfg, hcd->regs + GUSBCFG); + + /* Reset after a PHY select */ + core_reset(hcd); + } + + /* + * Program DCFG.DevSpd or HCFG.FSLSPclkSel to 48Mhz in FS. Also + * do this on HNP Dev/Host mode switches (done in dev_init and + * host_init). + */ + if (dwc2_is_host_mode(hcd)) + init_fslspclksel(hcd); + + if (hcd->core_params->i2c_enable > 0) { + dev_dbg(hcd->dev, "FS_PHY Enabling I2c\n"); + + /* Program GUSBCFG.OtgUtmiFsSel to I2C */ + usbcfg = readl(hcd->regs + GUSBCFG); + usbcfg |= GUSBCFG_OTGUTMIFSSel; + writel(usbcfg, hcd->regs + GUSBCFG); + + /* Program GI2CCTL.I2CEn */ + i2cctl = readl(hcd->regs + GI2CCTL); + i2cctl &= GI2CCTL_I2CDEVADDR_MASK; + i2cctl |= 1 << GI2CCTL_I2CDEVADDR_SHIFT; + i2cctl &= ~GI2CCTL_I2CEN; + writel(i2cctl, hcd->regs + GI2CCTL); + i2cctl |= GI2CCTL_I2CEN; + writel(i2cctl, hcd->regs + GI2CCTL); + } + } else { + /* High speed PHY */ + if (!hcd->phy_init_done) { + hcd->phy_init_done = 1; + + /* + * HS PHY parameters. These parameters are preserved + * during soft reset so only program the first time. Do + * a soft reset immediately after setting phyif. + */ + if (hcd->core_params->phy_type == 2) { + /* ULPI interface */ + usbcfg |= GUSBCFG_ULPIUTMISel; + usbcfg &= ~(GUSBCFG_PHYIf16 | GUSBCFG_DDRSel); + if (hcd->core_params->phy_ulpi_ddr > 0) + usbcfg |= GUSBCFG_DDRSel; + } else if (hcd->core_params->phy_type == 1) { + /* UTMI+ interface */ + usbcfg &= ~(GUSBCFG_ULPIUTMISel | + GUSBCFG_PHYIf16); + if (hcd->core_params->phy_utmi_width == 16) + usbcfg |= GUSBCFG_PHYIf16; + } else { + dev_err(hcd->dev, "FS PHY TYPE\n"); + } + writel(usbcfg, hcd->regs + GUSBCFG); + + /* Reset after setting the PHY parameters */ + core_reset(hcd); + } + } + + if ((hcd->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK) == + GHWCFG2_HS_PHY_TYPE_ULPI && + (hcd->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) == + GHWCFG2_FS_PHY_TYPE_DEDICATED && hcd->core_params->ulpi_fs_ls > 0) { + dev_dbg(hcd->dev, "Setting ULPI FSLS\n"); + usbcfg = readl(hcd->regs + GUSBCFG); + usbcfg |= GUSBCFG_ULPIFSLS; + usbcfg |= GUSBCFG_ULPIClkSuspM; + writel(usbcfg, hcd->regs + GUSBCFG); + } else { + usbcfg = readl(hcd->regs + GUSBCFG); + usbcfg &= ~GUSBCFG_ULPIFSLS; + usbcfg &= ~GUSBCFG_ULPIClkSuspM; + writel(usbcfg, hcd->regs + GUSBCFG); + } + + /* Program the GAHBCFG Register */ + switch (hcd->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) { + case GHWCFG2_EXT_DMA_ARCH: + dev_dbg(hcd->dev, "External DMA Mode\n"); + brst_sz = hcd->core_params->dma_burst_size; + ahbcfg &= ~GAHBCFG_HBstLen_MASK; + while (brst_sz > 1) { + ahbcfg = (ahbcfg + (1 << GAHBCFG_HBstLen_SHIFT)) & + GAHBCFG_HBstLen_MASK; + brst_sz >>= 1; + } + hcd->dma_enable = hcd->core_params->dma_enable > 0; + hcd->dma_desc_enable = hcd->core_params->dma_desc_enable > 0; + break; + + case GHWCFG2_INT_DMA_ARCH: + dev_dbg(hcd->dev, "Internal DMA Mode\n"); + /* + * Old value was GAHBCFG_HBstLen_Incr - done for + * Host mode ISOC in issue fix - vahrama + */ + ahbcfg &= ~GAHBCFG_HBstLen_MASK; + ahbcfg |= GAHBCFG_HBstLen_Incr4; + hcd->dma_enable = hcd->core_params->dma_enable > 0; + hcd->dma_desc_enable = hcd->core_params->dma_desc_enable > 0; + break; + + case GHWCFG2_SLAVE_ONLY_ARCH: + default: + dev_dbg(hcd->dev, "Slave Only Mode\n"); + hcd->dma_enable = 0; + hcd->dma_desc_enable = 0; + break; + } + + dev_dbg(hcd->dev, "dma_enable:%d (%d) dma_desc_enable:%d (%d)\n", + hcd->dma_enable, hcd->core_params->dma_enable, + hcd->dma_desc_enable, hcd->core_params->dma_desc_enable); + + for (i = 0; i < hcd->core_params->host_channels; i++) { + writel(0, hcd->regs + HCDMA(i)); + writel(0, hcd->regs + HCDMAB(i)); + } + + if (hcd->dma_enable) { + if (hcd->dma_desc_enable) + dev_info(hcd->dev, "Using Descriptor DMA mode\n"); + else + dev_info(hcd->dev, "Using Buffer DMA mode\n"); + } else { + dev_info(hcd->dev, "Using Slave mode\n"); + hcd->dma_desc_enable = 0; + } + + if (hcd->core_params->ahb_single > 0) + ahbcfg |= GAHBCFG_AHBSingle; + + ahbcfg |= hcd->dma_enable << GAHBCFG_DMAEn_SHIFT & GAHBCFG_DMAEn; + writel(ahbcfg, hcd->regs + GAHBCFG); + + hcd->en_multiple_tx_fifo = !!(hcd->hwcfg4 & GHWCFG4_DED_FIFO_EN); + + /* Program the GUSBCFG register */ + usbcfg = readl(hcd->regs + GUSBCFG); + + switch (hcd->hwcfg2 & GHWCFG2_OP_MODE_MASK) { + case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE: + usbcfg &= ~GUSBCFG_HNPCap; + usbcfg |= (hcd->core_params->otg_cap == + DWC2_CAP_PARAM_HNP_SRP_CAPABLE) << + GUSBCFG_HNPCap_SHIFT; + usbcfg &= ~GUSBCFG_SRPCap; + usbcfg |= (hcd->core_params->otg_cap != + DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) << + GUSBCFG_SRPCap_SHIFT; + break; + + case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE: + usbcfg &= ~GUSBCFG_HNPCap; + usbcfg &= ~GUSBCFG_SRPCap; + usbcfg |= (hcd->core_params->otg_cap != + DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) << + GUSBCFG_SRPCap_SHIFT; + break; + + case GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE: + usbcfg &= ~GUSBCFG_HNPCap; + usbcfg &= ~GUSBCFG_SRPCap; + break; + + case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE: + usbcfg &= ~GUSBCFG_HNPCap; + usbcfg &= ~GUSBCFG_SRPCap; + usbcfg |= (hcd->core_params->otg_cap != + DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) << + GUSBCFG_SRPCap_SHIFT; + break; + + case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE: + usbcfg &= ~GUSBCFG_HNPCap; + usbcfg &= ~GUSBCFG_SRPCap; + break; + + case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST: + usbcfg &= ~GUSBCFG_HNPCap; + usbcfg &= ~GUSBCFG_SRPCap; + usbcfg |= (hcd->core_params->otg_cap != + DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE) << + GUSBCFG_SRPCap_SHIFT; + break; + + case GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST: + usbcfg &= ~GUSBCFG_HNPCap; + usbcfg &= ~GUSBCFG_SRPCap; + break; + } + + writel(usbcfg, hcd->regs + GUSBCFG); + + if (hcd->core_params->lpm_enable > 0) { + u32 lpmcfg = readl(hcd->regs + GLPMCFG); + + /* To enable LPM support set lpm_cap_en bit */ + lpmcfg |= GLPMCFG_LPM_CAP_EN; + + /* Make AppL1Res ACK */ + lpmcfg |= GLPMCFG_APPL_RESP; + + /* Retry 3 times */ + lpmcfg &= ~GLPMCFG_RETRY_COUNT_MASK; + lpmcfg |= 3 << GLPMCFG_RETRY_COUNT_SHIFT; + + writel(lpmcfg, hcd->regs + GLPMCFG); + } + + if (hcd->core_params->ic_usb_cap > 0) { + usbcfg = readl(hcd->regs + GUSBCFG); + usbcfg |= GUSBCFG_ICUSBCap; + writel(usbcfg, hcd->regs + GUSBCFG); + } + + gotgctl = readl(hcd->regs + GOTGCTL); + gotgctl &= ~GOTGCTL_OTGVER; + gotgctl |= ((hcd->core_params->otg_ver > 0) << GOTGCTL_OTGVER_SHIFT) & + GOTGCTL_OTGVER; + writel(gotgctl, hcd->regs + GOTGCTL); + + /* Set OTG version supported */ + hcd->otg_ver = hcd->core_params->otg_ver > 0; + dev_info(hcd->dev, "OTG VER PARAM: %d, OTG VER FLAG: %d\n", + hcd->core_params->otg_ver, hcd->otg_ver); + + /* Clear the SRP success bit for FS-I2c */ + hcd->srp_success = 0; + + /* Enable common interrupts */ + enable_common_interrupts(hcd); + + /* + * Do device or host intialization based on mode during PCD and + * HCD initialization + */ + if (dwc2_is_host_mode(hcd)) { + dev_dbg(hcd->dev, "Host Mode\n"); + hcd->op_state = A_HOST; + } else { + dev_dbg(hcd->dev, "Device Mode\n"); + hcd->op_state = B_PERIPHERAL; + } +} + +/** + * dwc2_enable_host_interrupts() - Enables the Host mode interrupts + * + * @hcd: Programming view of DWC_otg controller + */ +void dwc2_enable_host_interrupts(struct dwc2_hcd *hcd) +{ + u32 intmsk; + + dev_dbg(hcd->dev, "%s()\n", __func__); + + /* Disable all interrupts */ + writel(0, hcd->regs + GINTMSK); + + /* Clear any pending interrupts */ + writel(0xffffffff, hcd->regs + GINTSTS); + + /* Enable the common interrupts */ + enable_common_interrupts(hcd); + + /* Enable host mode interrupts without disturbing common interrupts */ + intmsk = readl(hcd->regs + GINTMSK); + intmsk |= GINTSTS_DisconnInt; + intmsk |= GINTSTS_PrtInt; + intmsk |= GINTSTS_HChInt; + writel(intmsk, hcd->regs + GINTMSK); +} + +/** + * dwc2_disable_host_interrupts() - Disables the Host Mode interrupts + * + * @hcd: Programming view of DWC_otg controller + */ +void dwc2_disable_host_interrupts(struct dwc2_hcd *hcd) +{ + u32 intmsk; + + dev_dbg(hcd->dev, "%s()\n", __func__); + + /* Disable host mode interrupts without disturbing common interrupts */ + intmsk = readl(hcd->regs + GINTMSK); + intmsk &= ~GINTSTS_SOF; + intmsk &= ~GINTSTS_PrtInt; + intmsk &= ~GINTSTS_HChInt; + intmsk &= ~GINTSTS_PTxFEmp; + intmsk &= ~GINTSTS_NPTxFEmp; + writel(intmsk, hcd->regs + GINTMSK); +} + +/** + * dwc2_core_host_init() - Initializes the DWC_otg controller registers for + * Host mode + * + * @hcd: Programming view of DWC_otg controller + * + * This function flushes the Tx and Rx FIFOs and flushes any entries in the + * request queues. Host channels are reset to ensure that they are ready for + * performing transfers. + */ +void dwc2_core_host_init(struct dwc2_hcd *hcd) +{ + struct dwc2_core_params *params = hcd->core_params; + u32 gdfifocfg; + u32 gotgctl; + u32 hprt0; + u32 nptxfifosize; + u32 ptxfifosize; + u16 rxfsiz, nptxfsiz, hptxfsiz; + int i; + u32 hcchar; + u32 hcfg; + u32 hfir; + int num_channels; + + dev_dbg(hcd->dev, "%s(%p)\n", __func__, hcd); + + /* Restart the Phy Clock */ + writel(0, hcd->regs + PCGCTL); + + /* Initialize Host Configuration Register */ + init_fslspclksel(hcd); + if (hcd->core_params->speed == DWC_SPEED_PARAM_FULL) { + hcfg = readl(hcd->regs + HCFG); + hcfg |= HCFG_FSLSSUPP; + writel(hcfg, hcd->regs + HCFG); + } + + /* + * This bit allows dynamic reloading of the HFIR register during + * runtime. This bit needs to be programmed during inital configuration + * and its value must not be changed during runtime. */ + if (hcd->core_params->reload_ctl > 0) { + hfir = readl(hcd->regs + HFIR); + hfir |= HFIR_RLDCTRL; + writel(hfir, hcd->regs + HFIR); + } + + if (hcd->core_params->dma_desc_enable > 0) { + u32 op_mode = hcd->hwcfg2 & GHWCFG2_OP_MODE_MASK; + + if (!((hcd->hwcfg4 & GHWCFG4_DESC_DMA) && + hcd->snpsid >= DWC2_CORE_REV_2_90a && + (op_mode == GHWCFG2_OP_MODE_HNP_SRP_CAPABLE || + op_mode == GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE || + op_mode == GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE || + op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_HOST || + op_mode == GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST))) { + + dev_err(hcd->dev, + "Host can't operate in Descriptor DMA mode.\n" + "Either core version is below 2.90a or " + "GHWCFG2, GHWCFG4 register values do not " + "allow Descriptor DMA in host mode.\nTo " + "run the driver in Buffer DMA host mode set " + "dma_desc_enable module parameter to 0.\n"); + return; + } + hcfg = readl(hcd->regs + HCFG); + hcfg |= HCFG_DESCDMA; + writel(hcfg, hcd->regs + HCFG); + } + + /* Configure data FIFO sizes */ + if ((hcd->hwcfg2 & GHWCFG2_DYNAMIC_FIFO) && + params->enable_dynamic_fifo) { + dev_dbg(hcd->dev, "Total FIFO Size=%d\n", + hcd->total_fifo_size); + dev_dbg(hcd->dev, "Rx FIFO Size=%d\n", + params->host_rx_fifo_size); + dev_dbg(hcd->dev, "NP Tx FIFO Size=%d\n", + params->host_nperio_tx_fifo_size); + dev_dbg(hcd->dev, "P Tx FIFO Size=%d\n", + params->host_perio_tx_fifo_size); + + /* Rx FIFO */ + dev_dbg(hcd->dev, "initial grxfsiz=%08x\n", + readl(hcd->regs + GRXFSIZ)); + writel(params->host_rx_fifo_size, hcd->regs + GRXFSIZ); + dev_dbg(hcd->dev, "new grxfsiz=%08x\n", + readl(hcd->regs + GRXFSIZ)); + + /* Non-periodic Tx FIFO */ + dev_dbg(hcd->dev, "initial gnptxfsiz=%08x\n", + readl(hcd->regs + GNPTXFSIZ)); + nptxfifosize = params->host_nperio_tx_fifo_size << + FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; + nptxfifosize |= params->host_rx_fifo_size << + FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; + writel(nptxfifosize, hcd->regs + GNPTXFSIZ); + dev_dbg(hcd->dev, "new gnptxfsiz=%08x\n", + readl(hcd->regs + GNPTXFSIZ)); + + /* Periodic Tx FIFO */ + dev_dbg(hcd->dev, "initial hptxfsiz=%08x\n", + readl(hcd->regs + HPTXFSIZ)); + ptxfifosize = params->host_perio_tx_fifo_size << + FIFOSIZE_DEPTH_SHIFT & FIFOSIZE_DEPTH_MASK; + ptxfifosize |= (params->host_rx_fifo_size + + params->host_nperio_tx_fifo_size) << + FIFOSIZE_STARTADDR_SHIFT & FIFOSIZE_STARTADDR_MASK; + writel(ptxfifosize, hcd->regs + HPTXFSIZ); + dev_dbg(hcd->dev, "new hptxfsiz=%08x\n", + readl(hcd->regs + HPTXFSIZ)); + + if (hcd->en_multiple_tx_fifo && + hcd->snpsid <= DWC2_CORE_REV_2_94a) { + /* + * Global DFIFOCFG calculation for Host mode - + * include RxFIFO, NPTXFIFO and HPTXFIFO + */ + gdfifocfg = readl(hcd->regs + GDFIFOCFG); + rxfsiz = readl(hcd->regs + GRXFSIZ) & 0x0000ffff; + nptxfsiz = readl(hcd->regs + GNPTXFSIZ) >> 16 & + 0x0000ffff; + hptxfsiz = readl(hcd->regs + HPTXFSIZ) >> 16 & + 0x0000ffff; + gdfifocfg &= ~GDFIFOCFG_EPINFOBASE_MASK; + gdfifocfg |= (rxfsiz + nptxfsiz + hptxfsiz) << + GDFIFOCFG_EPINFOBASE_SHIFT & + GDFIFOCFG_EPINFOBASE_MASK; + writel(gdfifocfg, hcd->regs + GDFIFOCFG); + } + } + + /* TODO - check this */ + /* Clear Host Set HNP Enable in the OTG Control Register */ + gotgctl = readl(hcd->regs + GOTGCTL); + gotgctl &= ~GOTGCTL_HSTSETHNPEN; + writel(gotgctl, hcd->regs + GOTGCTL); + + /* Make sure the FIFOs are flushed */ + dwc2_flush_tx_fifo(hcd, 0x10 /* all TX FIFOs */); + dwc2_flush_rx_fifo(hcd); + + /* Clear Host Set HNP Enable in the OTG Control Register */ + gotgctl = readl(hcd->regs + GOTGCTL); + gotgctl &= ~GOTGCTL_HSTSETHNPEN; + writel(gotgctl, hcd->regs + GOTGCTL); + + if (hcd->core_params->dma_desc_enable <= 0) { + /* Flush out any leftover queued requests */ + num_channels = hcd->core_params->host_channels; + for (i = 0; i < num_channels; i++) { + hcchar = readl(hcd->regs + HCCHAR(i)); + hcchar &= ~HCCHAR_CHENA; + hcchar |= HCCHAR_CHDIS; + hcchar &= ~HCCHAR_EPDIR; + writel(hcchar, hcd->regs + HCCHAR(i)); + } + + /* Halt all channels to put them into a known state */ + for (i = 0; i < num_channels; i++) { + int count = 0; + + hcchar = readl(hcd->regs + HCCHAR(i)); + hcchar |= HCCHAR_CHENA; + hcchar |= HCCHAR_CHDIS; + hcchar &= ~HCCHAR_EPDIR; + writel(hcchar, hcd->regs + HCCHAR(i)); + dev_dbg(hcd->dev, "%s: Halt channel %d\n", __func__, i); + do { + hcchar = readl(hcd->regs + HCCHAR(i)); + if (++count > 1000) { + dev_err(hcd->dev, + "%s: Unable to clear enable on channel %d\n", + __func__, i); + break; + } + udelay(1); + } while (hcchar & HCCHAR_CHENA); + } + } + + /* Turn on the vbus power */ + dev_info(hcd->dev, "Init: Port Power? op_state=%d\n", hcd->op_state); + if (hcd->op_state == A_HOST) { + hprt0 = dwc2_read_hprt0(hcd); + dev_info(hcd->dev, "Init: Power Port (%d)\n", + hprt0 >> HPRT0_PWR_SHIFT & + HPRT0_PWR >> HPRT0_PWR_SHIFT); + if (!(hprt0 & HPRT0_PWR)) { + hprt0 |= HPRT0_PWR; + writel(hprt0, hcd->regs + HPRT0); + } + } + + dwc2_enable_host_interrupts(hcd); +} + +/** + * dwc2_hc_init() - Prepares a host channel for transferring packets to/from a + * specific endpoint + * + * @hcd: Programming view of DWC_otg controller + * @hc: Information needed to initialize the host channel + * + * The HCCHARn register is set up with the characteristics specified in hc. Host + * channel interrupts that may need to be serviced while this transfer is in + * progress are enabled. + */ +void dwc2_hc_init(struct dwc2_hcd *hcd, struct dwc2_hc *hc) +{ + u8 hc_num = hc->hc_num; + u32 hc_intr_mask; + u32 intr_enable; + u32 gintmsk; + u32 hcchar; + u32 hcsplt = 0; + + dev_dbg(hcd->dev, "%s()\n", __func__); + + /* Clear old interrupt conditions for this host channel */ + hc_intr_mask = 0xffffffff; + hc_intr_mask &= ~HCINTMSK_RESERVED14_31; + writel(hc_intr_mask, hcd->regs + HCINT(hc_num)); + + /* Enable channel interrupts required for this transfer */ + hc_intr_mask = HCINTMSK_CHHLTD; + if (hcd->dma_enable) { + dev_dbg(hcd->dev, "DMA enabled\n"); + /* + * For Descriptor DMA mode core halts the channel on AHB error. + * Interrupt is not required. + */ + if (!hcd->dma_desc_enable) { + dev_dbg(hcd->dev, "desc DMA disabled\n"); + hc_intr_mask |= HCINTMSK_AHBERR; + } else { + dev_dbg(hcd->dev, "desc DMA enabled\n"); + if (hc->ep_type == DWC2_EP_TYPE_ISOC) + hc_intr_mask |= HCINTMSK_XFERCOMPL; + } + + if (hc->error_state && !hc->do_split && + hc->ep_type != DWC2_EP_TYPE_ISOC) { + dev_dbg(hcd->dev, "setting ACK\n"); + hc_intr_mask |= HCINTMSK_ACK; + if (hc->ep_is_in) { + hc_intr_mask |= HCINTMSK_DATATGLERR; + if (hc->ep_type != DWC2_EP_TYPE_INTR) + hc_intr_mask |= HCINTMSK_NAK; + } + } + } else { + dev_dbg(hcd->dev, "DMA disabled\n"); + switch (hc->ep_type) { + case DWC2_EP_TYPE_CONTROL: + case DWC2_EP_TYPE_BULK: + dev_dbg(hcd->dev, "control/bulk\n"); + hc_intr_mask |= HCINTMSK_XFERCOMPL; + hc_intr_mask |= HCINTMSK_STALL; + hc_intr_mask |= HCINTMSK_XACTERR; + hc_intr_mask |= HCINTMSK_DATATGLERR; + if (hc->ep_is_in) { + hc_intr_mask |= HCINTMSK_BBLERR; + } else { + hc_intr_mask |= HCINTMSK_NAK; + hc_intr_mask |= HCINTMSK_NYET; + if (hc->do_ping) + hc_intr_mask |= HCINTMSK_ACK; + } + + if (hc->do_split) { + hc_intr_mask |= HCINTMSK_NAK; + if (hc->complete_split) + hc_intr_mask |= HCINTMSK_NYET; + else + hc_intr_mask |= HCINTMSK_ACK; + } + + if (hc->error_state) + hc_intr_mask |= HCINTMSK_ACK; + break; + case DWC2_EP_TYPE_INTR: + dev_dbg(hcd->dev, "intr\n"); + hc_intr_mask |= HCINTMSK_XFERCOMPL; + hc_intr_mask |= HCINTMSK_NAK; + hc_intr_mask |= HCINTMSK_STALL; + hc_intr_mask |= HCINTMSK_XACTERR; + hc_intr_mask |= HCINTMSK_DATATGLERR; + hc_intr_mask |= HCINTMSK_FRMOVRUN; + + if (hc->ep_is_in) + hc_intr_mask |= HCINTMSK_BBLERR; + if (hc->error_state) + hc_intr_mask |= HCINTMSK_ACK; + if (hc->do_split) { + if (hc->complete_split) + hc_intr_mask |= HCINTMSK_NYET; + else + hc_intr_mask |= HCINTMSK_ACK; + } + break; + case DWC2_EP_TYPE_ISOC: + dev_dbg(hcd->dev, "isoc\n"); + hc_intr_mask |= HCINTMSK_XFERCOMPL; + hc_intr_mask |= HCINTMSK_FRMOVRUN; + hc_intr_mask |= HCINTMSK_ACK; + + if (hc->ep_is_in) { + hc_intr_mask |= HCINTMSK_XACTERR; + hc_intr_mask |= HCINTMSK_BBLERR; + } + break; + default: + dev_err(hcd->dev, "## Unknown EP type ##\n"); + break; + } + } + + writel(hc_intr_mask, hcd->regs + HCINTMSK(hc_num)); + dev_dbg(hcd->dev, "set HCINTMSK to %08x\n", hc_intr_mask); + + /* Enable the top level host channel interrupt */ + intr_enable = 1 << hc_num; + writel(intr_enable, hcd->regs + HAINTMSK); + dev_dbg(hcd->dev, "set HAINTMSK to %08x\n", intr_enable); + + /* Make sure host channel interrupts are enabled */ + gintmsk = readl(hcd->regs + GINTMSK); + gintmsk |= GINTSTS_HChInt; + writel(gintmsk, hcd->regs + GINTMSK); + dev_dbg(hcd->dev, "set GINTMSK to %08x\n", gintmsk); + + /* + * Program the HCCHARn register with the endpoint characteristics for + * the current transfer + */ + hcchar = hc->dev_addr << HCCHAR_DEVADDR_SHIFT & HCCHAR_DEVADDR_MASK; + hcchar |= hc->ep_num << HCCHAR_EPNUM_SHIFT & HCCHAR_EPNUM_MASK; + hcchar |= hc->ep_is_in << HCCHAR_EPDIR_SHIFT & HCCHAR_EPDIR; + if (hc->speed == DWC2_EP_SPEED_LOW) + hcchar |= HCCHAR_LSPDDEV; + hcchar |= hc->ep_type << HCCHAR_EPTYPE_SHIFT & HCCHAR_EPTYPE_MASK; + hcchar |= hc->max_packet << HCCHAR_MPS_SHIFT & HCCHAR_MPS_MASK; + writel(hcchar, hcd->regs + HCCHAR(hc_num)); + dev_dbg(hcd->dev, "set HCCHAR(%d) to %08x\n", hc_num, hcchar); + + dev_dbg(hcd->dev, "%s: Channel %d\n", __func__, hc_num); + dev_dbg(hcd->dev, " Dev Addr: %d\n", + hcchar >> HCCHAR_DEVADDR_SHIFT & + HCCHAR_DEVADDR_MASK >> HCCHAR_DEVADDR_SHIFT); + dev_dbg(hcd->dev, " Ep Num: %d\n", + hcchar >> HCCHAR_EPNUM_SHIFT & + HCCHAR_EPNUM_MASK >> HCCHAR_EPNUM_SHIFT); + dev_dbg(hcd->dev, " Is In: %d\n", !!(hcchar & HCCHAR_EPDIR)); + dev_dbg(hcd->dev, " Is Low Speed: %d\n", + !!(hcchar & HCCHAR_LSPDDEV)); + dev_dbg(hcd->dev, " Ep Type: %d\n", + hcchar >> HCCHAR_EPTYPE_SHIFT & + HCCHAR_EPTYPE_MASK >> HCCHAR_EPTYPE_SHIFT); + dev_dbg(hcd->dev, " Max Pkt: %d\n", + hcchar >> HCCHAR_MPS_SHIFT & + HCCHAR_MPS_MASK >> HCCHAR_MPS_SHIFT); + dev_dbg(hcd->dev, " Multi Cnt: %d\n", + hcchar >> HCCHAR_MULTICNT_SHIFT & + HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT); + + /* Program the HCSPLT register for SPLITs */ + if (hc->do_split) { + dev_dbg(hcd->dev, "Programming HC %d with split --> %s\n", + hc_num, hc->complete_split ? "CSPLIT" : "SSPLIT"); + hcsplt |= hc->complete_split << HCSPLT_COMPSPLT_SHIFT; + hcsplt |= hc->xact_pos << HCSPLT_XACTPOS_SHIFT & + HCSPLT_XACTPOS_MASK; + hcsplt |= hc->hub_addr << HCSPLT_HUBADDR_SHIFT & + HCSPLT_HUBADDR_MASK; + hcsplt |= hc->port_addr << HCSPLT_PRTADDR_SHIFT & + HCSPLT_PRTADDR_MASK; + dev_dbg(hcd->dev, " comp split %d\n", hc->complete_split); + dev_dbg(hcd->dev, " xact pos %d\n", hc->xact_pos); + dev_dbg(hcd->dev, " hub addr %d\n", hc->hub_addr); + dev_dbg(hcd->dev, " port addr %d\n", hc->port_addr); + dev_dbg(hcd->dev, " is_in %d\n", hc->ep_is_in); + dev_dbg(hcd->dev, " Max Pkt %d\n", + hcchar >> HCCHAR_MPS_SHIFT & + HCCHAR_MPS_MASK >> HCCHAR_MPS_SHIFT); + dev_dbg(hcd->dev, " xferlen %d\n", hc->xfer_len); + } + + writel(hcsplt, hcd->regs + HCSPLT(hc_num)); +} + +/** + * dwc2_hc_halt() - Attempts to halt a host channel + * + * @hcd: Controller register interface + * @hc: Host channel to halt + * @halt_status: Reason for halting the channel + * + * This function should only be called in Slave mode or to abort a transfer in + * either Slave mode or DMA mode. Under normal circumstances in DMA mode, the + * controller halts the channel when the transfer is complete or a condition + * occurs that requires application intervention. + * + * In slave mode, checks for a free request queue entry, then sets the Channel + * Enable and Channel Disable bits of the Host Channel Characteristics + * register of the specified channel to intiate the halt. If there is no free + * request queue entry, sets only the Channel Disable bit of the HCCHARn + * register to flush requests for this channel. In the latter case, sets a + * flag to indicate that the host channel needs to be halted when a request + * queue slot is open. + * + * In DMA mode, always sets the Channel Enable and Channel Disable bits of the + * HCCHARn register. The controller ensures there is space in the request + * queue before submitting the halt request. + * + * Some time may elapse before the core flushes any posted requests for this + * host channel and halts. The Channel Halted interrupt handler completes the + * deactivation of the host channel. + */ +void dwc2_hc_halt(struct dwc2_hcd *hcd, struct dwc2_hc *hc, + enum dwc2_halt_status halt_status) +{ + u32 nptxsts; + u32 hptxsts; + u32 hcchar; + + dev_dbg(hcd->dev, "%s()\n", __func__); + if (halt_status == DWC2_HC_XFER_NO_HALT_STATUS) + dev_err(hcd->dev, "!!! halt_status = %d !!!\n", halt_status); + + if (halt_status == DWC2_HC_XFER_URB_DEQUEUE || + halt_status == DWC2_HC_XFER_AHB_ERR) { + /* + * Disable all channel interrupts except Ch Halted. The QTD + * and QH state associated with this transfer has been cleared + * (in the case of URB_DEQUEUE), so the channel needs to be + * shut down carefully to prevent crashes. + */ + u32 hcintmsk = HCINTMSK_CHHLTD; + + dev_dbg(hcd->dev, "dequeue/error\n"); + writel(hcintmsk, hcd->regs + HCINTMSK(hc->hc_num)); + + /* + * Make sure no other interrupts besides halt are currently + * pending. Handling another interrupt could cause a crash due + * to the QTD and QH state. + */ + writel(~hcintmsk, hcd->regs + HCINT(hc->hc_num)); + + /* + * Make sure the halt status is set to URB_DEQUEUE or AHB_ERR + * even if the channel was already halted for some other + * reason + */ + hc->halt_status = halt_status; + + hcchar = readl(hcd->regs + HCCHAR(hc->hc_num)); + if (!(hcchar & HCCHAR_CHENA)) { + /* + * The channel is either already halted or it hasn't + * started yet. In DMA mode, the transfer may halt if + * it finishes normally or a condition occurs that + * requires driver intervention. Don't want to halt + * the channel again. In either Slave or DMA mode, + * it's possible that the transfer has been assigned + * to a channel, but not started yet when an URB is + * dequeued. Don't want to halt a channel that hasn't + * started yet. + */ + return; + } + } + if (hc->halt_pending) { + /* + * A halt has already been issued for this channel. This might + * happen when a transfer is aborted by a higher level in + * the stack. + */ +#ifdef DEBUG + dev_info(hcd->dev, + "*** %s: Channel %d, hc->halt_pending already set ***\n", + __func__, hc->hc_num); + +#endif + return; + } + + hcchar = readl(hcd->regs + HCCHAR(hc->hc_num)); + + /* No need to set the bit in DDMA for disabling the channel */ + /* TODO check it everywhere channel is disabled */ + if (hcd->core_params->dma_desc_enable <= 0) { + dev_dbg(hcd->dev, "desc DMA disabled\n"); + hcchar |= HCCHAR_CHENA; + } else + dev_dbg(hcd->dev, "desc DMA enabled\n"); + hcchar |= HCCHAR_CHDIS; + + if (!hcd->dma_enable) { + dev_dbg(hcd->dev, "DMA not enabled\n"); + hcchar |= HCCHAR_CHENA; + + /* Check for space in the request queue to issue the halt */ + if (hc->ep_type == DWC2_EP_TYPE_CONTROL || + hc->ep_type == DWC2_EP_TYPE_BULK) { + dev_dbg(hcd->dev, "control/bulk\n"); + nptxsts = readl(hcd->regs + GNPTXSTS); + if ((nptxsts & TXSTS_QSPCAVAIL_MASK) == 0) { + dev_dbg(hcd->dev, "Disabling channel\n"); + hcchar &= ~HCCHAR_CHENA; + } + } else { + dev_dbg(hcd->dev, "isoc/intr\n"); + hptxsts = readl(hcd->regs + HPTXSTS); + if ((hptxsts & TXSTS_QSPCAVAIL_MASK) == 0 || + hcd->queuing_high_bandwidth) { + dev_dbg(hcd->dev, "Disabling channel\n"); + hcchar &= ~HCCHAR_CHENA; + } + } + } else + dev_dbg(hcd->dev, "DMA enabled\n"); + + writel(hcchar, hcd->regs + HCCHAR(hc->hc_num)); + hc->halt_status = halt_status; + + if (hcchar & HCCHAR_CHENA) { + dev_dbg(hcd->dev, "Channel enabled\n"); + hc->halt_pending = 1; + hc->halt_on_queue = 0; + } else { + dev_dbg(hcd->dev, "Channel disabled\n"); + hc->halt_on_queue = 1; + } + + dev_dbg(hcd->dev, "%s: Channel %d\n", __func__, hc->hc_num); + dev_dbg(hcd->dev, " hcchar: 0x%08x\n", hcchar); + dev_dbg(hcd->dev, " halt_pending: %d\n", hc->halt_pending); + dev_dbg(hcd->dev, " halt_on_queue: %d\n", hc->halt_on_queue); + dev_dbg(hcd->dev, " halt_status: %d\n", hc->halt_status); +} + +/** + * dwc2_hc_cleanup() - Clears the transfer state for a host channel + * + * @hcd: Programming view of DWC_otg controller + * @hc: Identifies the host channel to clean up + * + * This function is normally called after a transfer is done and the host + * channel is being released + */ +void dwc2_hc_cleanup(struct dwc2_hcd *hcd, struct dwc2_hc *hc) +{ + hc->xfer_started = 0; + + /* + * Clear channel interrupt enables and any unhandled channel interrupt + * conditions + */ + writel(0, hcd->regs + HCINTMSK(hc->hc_num)); + writel(0xffffffff, hcd->regs + HCINT(hc->hc_num)); +} + +/** + * hc_set_even_odd_frame() - Sets the channel property that indicates in which + * frame a periodic transfer should occur + * + * @hcd: Programming view of DWC_otg controller + * @hc: Identifies the host channel to set up and its properties + * @hcchar: Current value of the HCCHAR register for the specified host channel + * + * This function has no effect on non-periodic transfers + */ +static void hc_set_even_odd_frame(struct dwc2_hcd *hcd, struct dwc2_hc *hc, + u32 *hcchar) +{ + if (hc->ep_type == DWC2_EP_TYPE_INTR || + hc->ep_type == DWC2_EP_TYPE_ISOC) { + u32 hfnum, frnum; + + hfnum = readl(hcd->regs + HFNUM); + frnum = hfnum >> HFNUM_FRNUM_SHIFT & + HFNUM_FRNUM_MASK >> HFNUM_FRNUM_SHIFT; + + /* 1 if _next_ frame is odd, 0 if it's even */ + if (frnum & 0x1) + *hcchar |= HCCHAR_ODDFRM; + } +} + +static void set_pid_isoc(struct dwc2_hc *hc) +{ + /* Set up the initial PID for the transfer */ + if (hc->speed == DWC2_EP_SPEED_HIGH) { + if (hc->ep_is_in) { + if (hc->multi_count == 1) + hc->data_pid_start = DWC2_HC_PID_DATA0; + else if (hc->multi_count == 2) + hc->data_pid_start = DWC2_HC_PID_DATA1; + else + hc->data_pid_start = DWC2_HC_PID_DATA2; + } else { + if (hc->multi_count == 1) + hc->data_pid_start = DWC2_HC_PID_DATA0; + else + hc->data_pid_start = DWC2_HC_PID_MDATA; + } + } else { + hc->data_pid_start = DWC2_HC_PID_DATA0; + } +} + +/** + * dwc2_hc_start_transfer() - Does the setup for a data transfer for a host + * channel and starts the transfer + * + * @hcd: Programming view of DWC_otg controller + * @hc: Information needed to initialize the host channel. The xfer_len value + * may be reduced to accommodate the max widths of the XferSize and + * PktCnt fields in the HCTSIZn register. The multi_count value may be + * changed to reflect the final xfer_len value. + * + * This function may be called in either Slave mode or DMA mode. In Slave mode, + * the caller must ensure that there is sufficient space in the request queue + * and Tx Data FIFO. + * + * For an OUT transfer in Slave mode, it loads a data packet into the + * appropriate FIFO. If necessary, additional data packets are loaded in the + * Host ISR. + * + * For an IN transfer in Slave mode, a data packet is requested. The data + * packets are unloaded from the Rx FIFO in the Host ISR. If necessary, + * additional data packets are requested in the Host ISR. + * + * For a PING transfer in Slave mode, the Do Ping bit is set in the HCTSIZ + * register along with a packet count of 1 and the channel is enabled. This + * causes a single PING transaction to occur. Other fields in HCTSIZ are + * simply set to 0 since no data transfer occurs in this case. + * + * For a PING transfer in DMA mode, the HCTSIZ register is initialized with + * all the information required to perform the subsequent data transfer. In + * addition, the Do Ping bit is set in the HCTSIZ register. In this case, the + * controller performs the entire PING protocol, then starts the data + * transfer. + */ +void dwc2_hc_start_transfer(struct dwc2_hcd *hcd, struct dwc2_hc *hc) +{ + u32 max_hc_xfer_size = hcd->core_params->max_transfer_size; + u16 max_hc_pkt_count = hcd->core_params->max_packet_count; + u32 hcchar; + u32 hctsiz = 0; + u16 num_packets; + + dev_dbg(hcd->dev, "%s()\n", __func__); + if (hc->do_ping) { + if (!hcd->dma_enable) { + dev_dbg(hcd->dev, "ping, no DMA\n"); + dwc2_hc_do_ping(hcd, hc); + hc->xfer_started = 1; + return; + } else { + dev_dbg(hcd->dev, "ping, DMA\n"); + hctsiz |= TSIZ_DOPNG; + } + } + + if (hc->do_split) { + dev_dbg(hcd->dev, "split\n"); + num_packets = 1; + + if (hc->complete_split && !hc->ep_is_in) + /* + * For CSPLIT OUT Transfer, set the size to 0 so the + * core doesn't expect any data written to the FIFO + */ + hc->xfer_len = 0; + else if (hc->ep_is_in || hc->xfer_len > hc->max_packet) + hc->xfer_len = hc->max_packet; + else if (!hc->ep_is_in && hc->xfer_len > 188) + hc->xfer_len = 188; + + hctsiz |= hc->xfer_len << TSIZ_XFERSIZE_SHIFT & + TSIZ_XFERSIZE_MASK; + } else { + dev_dbg(hcd->dev, "no split\n"); + /* + * Ensure that the transfer length and packet count will fit + * in the widths allocated for them in the HCTSIZn register + */ + if (hc->ep_type == DWC2_EP_TYPE_INTR || + hc->ep_type == DWC2_EP_TYPE_ISOC) { + /* + * Make sure the transfer size is no larger than one + * (micro)frame's worth of data. (A check was done + * when the periodic transfer was accepted to ensure + * that a (micro)frame's worth of data can be + * programmed into a channel.) + */ + u32 max_periodic_len = hc->multi_count * hc->max_packet; + + if (hc->xfer_len > max_periodic_len) + hc->xfer_len = max_periodic_len; + } else if (hc->xfer_len > max_hc_xfer_size) { + /* + * Make sure that xfer_len is a multiple of max packet + * size + */ + hc->xfer_len = max_hc_xfer_size - hc->max_packet + 1; + } + + if (hc->xfer_len > 0) { + num_packets = (hc->xfer_len + hc->max_packet - 1) / + hc->max_packet; + if (num_packets > max_hc_pkt_count) { + num_packets = max_hc_pkt_count; + hc->xfer_len = num_packets * hc->max_packet; + } + } else { + /* Need 1 packet for transfer length of 0 */ + num_packets = 1; + } + + if (hc->ep_is_in) + /* + * Always program an integral # of max packets for IN + * transfers + */ + hc->xfer_len = num_packets * hc->max_packet; + + if (hc->ep_type == DWC2_EP_TYPE_INTR || + hc->ep_type == DWC2_EP_TYPE_ISOC) + /* + * Make sure that the multi_count field matches the + * actual transfer length + */ + hc->multi_count = num_packets; + + if (hc->ep_type == DWC2_EP_TYPE_ISOC) + set_pid_isoc(hc); + + hctsiz |= hc->xfer_len << TSIZ_XFERSIZE_SHIFT & + TSIZ_XFERSIZE_MASK; + } + + hc->start_pkt_count = num_packets; + hctsiz |= num_packets << TSIZ_PKTCNT_SHIFT & TSIZ_PKTCNT_MASK; + hctsiz |= hc->data_pid_start << TSIZ_SC_MC_PID_SHIFT & + TSIZ_SC_MC_PID_MASK; + writel(hctsiz, hcd->regs + HCTSIZ(hc->hc_num)); + dev_dbg(hcd->dev, "Wrote %08x to HCTSIZ(%d)\n", hctsiz, hc->hc_num); + + dev_dbg(hcd->dev, "%s: Channel %d\n", __func__, hc->hc_num); + dev_dbg(hcd->dev, " Xfer Size: %d\n", + hctsiz >> TSIZ_XFERSIZE_SHIFT & + TSIZ_XFERSIZE_MASK >> TSIZ_XFERSIZE_SHIFT); + dev_dbg(hcd->dev, " Num Pkts: %d\n", + hctsiz >> TSIZ_PKTCNT_SHIFT & + TSIZ_PKTCNT_MASK >> TSIZ_PKTCNT_SHIFT); + dev_dbg(hcd->dev, " Start PID: %d\n", + hctsiz >> TSIZ_SC_MC_PID_SHIFT & + TSIZ_SC_MC_PID_MASK >> TSIZ_SC_MC_PID_SHIFT); + + if (hcd->dma_enable) { + dma_addr_t dma_addr; + + if (hc->align_buff) { + dev_dbg(hcd->dev, "align_buf\n"); + dma_addr = hc->align_buff; + } else { + dma_addr = (unsigned long)hc->xfer_buff & 0xffffffff; + } + writel((u32)dma_addr, hcd->regs + HCDMA(hc->hc_num)); + dev_dbg(hcd->dev, "Wrote %08lx to HCDMA(%d)\n", + (unsigned long)dma_addr, hc->hc_num); + } + + /* Start the split */ + if (hc->do_split) { + u32 hcsplt; + + hcsplt = readl(hcd->regs + HCSPLT(hc->hc_num)); + hcsplt |= HCSPLT_SPLTENA; + writel(hcsplt, hcd->regs + HCSPLT(hc->hc_num)); + } + + hcchar = readl(hcd->regs + HCCHAR(hc->hc_num)); + hcchar &= ~HCCHAR_MULTICNT_MASK; + hcchar |= hc->multi_count << HCCHAR_MULTICNT_SHIFT & + HCCHAR_MULTICNT_MASK; + hc_set_even_odd_frame(hcd, hc, &hcchar); + + if (hcchar & HCCHAR_CHDIS) + dev_warn(hcd->dev, "%s: chdis set, channel %d, hcchar 0x%08x\n", + __func__, hc->hc_num, hcchar); + + /* Set host channel enable after all other setup is complete */ + hcchar |= HCCHAR_CHENA; + hcchar &= ~HCCHAR_CHDIS; + + dev_dbg(hcd->dev, " Multi Cnt: %d\n", + hcchar >> HCCHAR_MULTICNT_SHIFT & + HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT); + + writel(hcchar, hcd->regs + HCCHAR(hc->hc_num)); + dev_dbg(hcd->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, hc->hc_num); + + hc->xfer_started = 1; + hc->requests++; + + if (!hcd->dma_enable && !hc->ep_is_in && hc->xfer_len > 0) + /* Load OUT packet into the appropriate Tx FIFO */ + dwc2_hc_write_packet(hcd, hc); +} + +/** + * dwc2_hc_start_transfer_ddma() - Does the setup for a data transfer for a + * host channel and starts the transfer in Descriptor DMA mode + * + * @hcd: Programming view of DWC_otg controller + * @hc: Information needed to initialize the host channel + * + * Initializes HCTSIZ register. For a PING transfer the Do Ping bit is set. + * Sets PID and NTD values. For periodic transfers initializes SCHED_INFO field + * with micro-frame bitmap. + * + * Initializes HCDMA register with descriptor list address and CTD value then + * starts the transfer via enabling the channel. + */ +void dwc2_hc_start_transfer_ddma(struct dwc2_hcd *hcd, struct dwc2_hc *hc) +{ + u32 hcchar; + u32 hcdma; + u32 hctsiz = 0; + + if (hc->do_ping) + hctsiz |= TSIZ_DOPNG; + + if (hc->ep_type == DWC2_EP_TYPE_ISOC) + set_pid_isoc(hc); + + /* Packet Count and Xfer Size are not used in Descriptor DMA mode */ + hctsiz |= hc->data_pid_start << TSIZ_SC_MC_PID_SHIFT & + TSIZ_SC_MC_PID_MASK; + + /* 0 - 1 descriptor, 1 - 2 descriptors, etc */ + hctsiz |= (hc->ntd - 1) << TSIZ_NTD_SHIFT & TSIZ_NTD_MASK; + + /* Non-zero only for high-speed interrupt endpoints */ + hctsiz |= hc->schinfo << TSIZ_SCHINFO_SHIFT & TSIZ_SCHINFO_MASK; + + dev_dbg(hcd->dev, "%s: Channel %d\n", __func__, hc->hc_num); + dev_dbg(hcd->dev, " Start PID: %d\n", hc->data_pid_start); + dev_dbg(hcd->dev, " NTD: %d\n", hc->ntd - 1); + + writel(hctsiz, hcd->regs + HCTSIZ(hc->hc_num)); + + hcdma = (u32)hc->desc_list_addr & HCDMA_DMA_ADDR_MASK; + + /* Always start from first descriptor */ + hcdma &= ~HCDMA_CTD_MASK; + writel(hcdma, hcd->regs + HCDMA(hc->hc_num)); + dev_dbg(hcd->dev, "Wrote %08x to HCDMA(%d)\n", hcdma, hc->hc_num); + + hcchar = readl(hcd->regs + HCCHAR(hc->hc_num)); + hcchar &= ~HCCHAR_MULTICNT_MASK; + hcchar |= hc->multi_count << HCCHAR_MULTICNT_SHIFT & + HCCHAR_MULTICNT_MASK; + + if (hcchar & HCCHAR_CHDIS) + dev_warn(hcd->dev, "%s: chdis set, channel %d, hcchar 0x%08x\n", + __func__, hc->hc_num, hcchar); + + /* Set host channel enable after all other setup is complete */ + hcchar |= HCCHAR_CHENA; + hcchar &= ~HCCHAR_CHDIS; + + dev_dbg(hcd->dev, " Multi Cnt: %d\n", + hcchar >> HCCHAR_MULTICNT_SHIFT & + HCCHAR_MULTICNT_MASK >> HCCHAR_MULTICNT_SHIFT); + + writel(hcchar, hcd->regs + HCCHAR(hc->hc_num)); + dev_dbg(hcd->dev, "Wrote %08x to HCCHAR(%d)\n", hcchar, hc->hc_num); + + hc->xfer_started = 1; + hc->requests++; +} + +/** + * dwc2_hc_continue_transfer() - Continues a data transfer that was started by + * a previous call to dwc2_hc_start_transfer() + * + * @hcd: Programming view of DWC_otg controller + * @hc: Information needed to initialize the host channel + * + * The caller must ensure there is sufficient space in the request queue and Tx + * Data FIFO. This function should only be called in Slave mode. In DMA mode, + * the controller acts autonomously to complete transfers programmed to a host + * channel. + * + * For an OUT transfer, a new data packet is loaded into the appropriate FIFO + * if there is any data remaining to be queued. For an IN transfer, another + * data packet is always requested. For the SETUP phase of a control transfer, + * this function does nothing. + * + * Return: 1 if a new request is queued, 0 if no more requests are required + * for this transfer + */ +int dwc2_hc_continue_transfer(struct dwc2_hcd *hcd, struct dwc2_hc *hc) +{ + dev_dbg(hcd->dev, "%s: Channel %d\n", __func__, hc->hc_num); + + if (hc->do_split) { + /* SPLITs always queue just once per channel */ + return 0; + } else if (hc->data_pid_start == DWC2_HC_PID_SETUP) { + /* SETUPs are queued only once since they can't be NAK'd */ + return 0; + } else if (hc->ep_is_in) { + /* + * Always queue another request for other IN transfers. If + * back-to-back INs are issued and NAKs are received for both, + * the driver may still be processing the first NAK when the + * second NAK is received. When the interrupt handler clears + * the NAK interrupt for the first NAK, the second NAK will + * not be seen. So we can't depend on the NAK interrupt + * handler to requeue a NAK'd request. Instead, IN requests + * are issued each time this function is called. When the + * transfer completes, the extra requests for the channel will + * be flushed. + */ + u32 hcchar = readl(hcd->regs + HCCHAR(hc->hc_num)); + + hc_set_even_odd_frame(hcd, hc, &hcchar); + hcchar |= HCCHAR_CHENA; + hcchar &= ~HCCHAR_CHDIS; + dev_dbg(hcd->dev, " IN xfer: hcchar = 0x%08x\n", hcchar); + writel(hcchar, hcd->regs + HCCHAR(hc->hc_num)); + hc->requests++; + return 1; + } else { + /* OUT transfers */ + if (hc->xfer_count < hc->xfer_len) { + if (hc->ep_type == DWC2_EP_TYPE_INTR || + hc->ep_type == DWC2_EP_TYPE_ISOC) { + u32 hcchar = readl(hcd->regs + + HCCHAR(hc->hc_num)); + + hc_set_even_odd_frame(hcd, hc, &hcchar); + } + + /* Load OUT packet into the appropriate Tx FIFO */ + dwc2_hc_write_packet(hcd, hc); + hc->requests++; + return 1; + } else { + return 0; + } + } +} + +/** + * dwc2_hc_do_ping() - Starts a PING transfer + * + * @hcd: Programming view of DWC_otg controller + * @hc: Information needed to initialize the host channel + * + * This function should only be called in Slave mode. The Do Ping bit is set in + * the HCTSIZ register, then the channel is enabled. + */ +void dwc2_hc_do_ping(struct dwc2_hcd *hcd, struct dwc2_hc *hc) +{ + u32 hcchar; + u32 hctsiz; + + dev_dbg(hcd->dev, "%s: Channel %d\n", __func__, hc->hc_num); + + hctsiz = TSIZ_DOPNG; + hctsiz |= 1 << TSIZ_PKTCNT_SHIFT; + writel(hctsiz, hcd->regs + HCTSIZ(hc->hc_num)); + + hcchar = readl(hcd->regs + HCCHAR(hc->hc_num)); + hcchar |= HCCHAR_CHENA; + hcchar &= ~HCCHAR_CHDIS; + writel(hcchar, hcd->regs + HCCHAR(hc->hc_num)); +} + +/** + * dwc2_hc_write_packet() - Writes a packet into the Tx FIFO associated with + * the Host Channel + * + * @hcd: Programming view of DWC_otg controller + * @hc: Information needed to initialize the host channel + * + * This function should only be called in Slave mode. For a channel associated + * with a non-periodic EP, the non-periodic Tx FIFO is written. For a channel + * associated with a periodic EP, the periodic Tx FIFO is written. + * + * Upon return the xfer_buff and xfer_count fields in hc are incremented by + * the number of bytes written to the Tx FIFO. + */ +void dwc2_hc_write_packet(struct dwc2_hcd *hcd, struct dwc2_hc *hc) +{ + u32 i; + u32 remaining_count; + u32 byte_count; + u32 dword_count; + u32 __iomem *data_fifo; + u32 *data_buff = (u32 *)hc->xfer_buff; + + dev_dbg(hcd->dev, "%s()\n", __func__); + + data_fifo = (u32 __iomem *)(hcd->regs + HCFIFO(hc->hc_num)); + + remaining_count = hc->xfer_len - hc->xfer_count; + if (remaining_count > hc->max_packet) + byte_count = hc->max_packet; + else + byte_count = remaining_count; + + dword_count = (byte_count + 3) / 4; + + if (((unsigned long)data_buff & 0x3) == 0) { + /* xfer_buff is DWORD aligned */ + for (i = 0; i < dword_count; i++, data_buff++) + writel(*data_buff, data_fifo); + } else { + /* xfer_buff is not DWORD aligned */ + for (i = 0; i < dword_count; i++, data_buff++) { + u32 data = data_buff[0] | data_buff[1] << 8 | + data_buff[2] << 16 | data_buff[3] << 24; + writel(data, data_fifo); + } + } + + hc->xfer_count += byte_count; + hc->xfer_buff += byte_count; +} + +/** + * dwc2_calc_frame_interval() - Calculates the correct frame Interval value for + * the HFIR register according to PHY type and speed + * + * @hcd: Programming view of DWC_otg controller + * + * NOTE: The caller can modify the value of the HFIR register only after the + * Port Enable bit of the Host Port Control and Status register (HPRT.EnaPort) + * has been set + */ +u32 dwc2_calc_frame_interval(struct dwc2_hcd *hcd) +{ + u32 usbcfg; + u32 hwcfg2; + u32 hprt0; + int clock = 60; /* default value */ + + usbcfg = readl(hcd->regs + GUSBCFG); + hwcfg2 = readl(hcd->regs + GHWCFG2); + hprt0 = readl(hcd->regs + HPRT0); + + if (!(usbcfg & GUSBCFG_PHYSel) && (usbcfg & GUSBCFG_ULPIUTMISel) && + !(usbcfg & GUSBCFG_PHYIf16)) + clock = 60; + if ((usbcfg & GUSBCFG_PHYSel) && (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) == + GHWCFG2_FS_PHY_TYPE_SHARED_ULPI) + clock = 48; + if (!(usbcfg & GUSBCFG_PHYLPClkSel) && !(usbcfg & GUSBCFG_PHYSel) && + !(usbcfg & GUSBCFG_ULPIUTMISel) && (usbcfg & GUSBCFG_PHYIf16)) + clock = 30; + if (!(usbcfg & GUSBCFG_PHYLPClkSel) && !(usbcfg & GUSBCFG_PHYSel) && + !(usbcfg & GUSBCFG_ULPIUTMISel) && !(usbcfg & GUSBCFG_PHYIf16)) + clock = 60; + if ((usbcfg & GUSBCFG_PHYLPClkSel) && !(usbcfg & GUSBCFG_PHYSel) && + !(usbcfg & GUSBCFG_ULPIUTMISel) && (usbcfg & GUSBCFG_PHYIf16)) + clock = 48; + if ((usbcfg & GUSBCFG_PHYSel) && !(usbcfg & GUSBCFG_PHYIf16) && + (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) == + GHWCFG2_FS_PHY_TYPE_SHARED_UTMI) + clock = 48; + if ((usbcfg & GUSBCFG_PHYSel) && (hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK) == + GHWCFG2_FS_PHY_TYPE_DEDICATED) + clock = 48; + + if ((hprt0 & HPRT0_SPD_MASK) == 0) + /* High speed case */ + return 125 * clock; + else + /* FS/LS case */ + return 1000 * clock; +} + +/** + * dwc2_read_packet() - Reads a packet from the Rx FIFO into the destination + * buffer + * + * @core_if: Programming view of DWC_otg controller + * @dest: Destination buffer for the packet + * @bytes: Number of bytes to copy to the destination + */ +void dwc2_read_packet(struct dwc2_hcd *hcd, u8 *dest, u16 bytes) +{ + u32 __iomem *fifo = hcd->regs + HCFIFO(0); + u32 *data_buff = (u32 *)dest; + int word_count = (bytes + 3) / 4; + int i; + + /* + * Todo: Account for the case where dest is not dword aligned. This + * requires reading data from the FIFO into a u32 temp buffer, then + * moving it into the data buffer. + */ + + dev_dbg(hcd->dev, "%s(%p,%p,%d)\n", __func__, hcd, dest, bytes); + + for (i = 0; i < word_count; i++, data_buff++) + *data_buff = readl(fifo); +} + +/** + * dwc2_dump_spram() - Reads the SPRAM and prints its content + * + * @hcd: Programming view of DWC_otg controller + */ +void dwc2_dump_spram(struct dwc2_hcd *hcd) +{ + u8 __iomem *addr, *start_addr, *end_addr; + + dev_info(hcd->dev, "SPRAM Data:\n"); + start_addr = (u8 __iomem *)hcd->regs; + dev_info(hcd->dev, "Base Address: 0x%08lX\n", + (unsigned long)start_addr); + start_addr += 0x00028000; + end_addr = (u8 __iomem *)hcd->regs; + end_addr += 0x000280e0; + + for (addr = start_addr; addr < end_addr; addr += 16) { + dev_info(hcd->dev, + "0x%8lX:\t%2X %2X %2X %2X %2X %2X %2X %2X %2X %2X %2X " + "%2X %2X %2X %2X %2X\n", (unsigned long)addr, + addr[0], addr[1], addr[2], addr[3], addr[4], addr[5], + addr[6], addr[7], addr[8], addr[9], addr[10], addr[11], + addr[12], addr[13], addr[14], addr[15]); + } +} + +/** + * dwc2_dump_host_registers() - Reads the host registers and prints them + * + * @hcd: Programming view of DWC_otg controller + */ +void dwc2_dump_host_registers(struct dwc2_hcd *hcd) +{ + int i; + u32 __iomem *addr; + + dev_info(hcd->dev, "Host Global Registers\n"); + addr = hcd->regs + HCFG; + dev_info(hcd->dev, "HCFG @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + HFIR; + dev_info(hcd->dev, "HFIR @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + HFNUM; + dev_info(hcd->dev, "HFNUM @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + HPTXSTS; + dev_info(hcd->dev, "HPTXSTS @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + HAINT; + dev_info(hcd->dev, "HAINT @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + HAINTMSK; + dev_info(hcd->dev, "HAINTMSK @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + if (hcd->dma_desc_enable) { + addr = hcd->regs + HFLBADDR; + dev_info(hcd->dev, "HFLBADDR @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + } + + addr = hcd->regs + HPRT0; + dev_info(hcd->dev, "HPRT0 @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + + for (i = 0; i < hcd->core_params->host_channels; i++) { + dev_info(hcd->dev, "Host Channel %d Specific Registers\n", i); + addr = hcd->regs + HCCHAR(i); + dev_info(hcd->dev, "HCCHAR @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + HCSPLT(i); + dev_info(hcd->dev, "HCSPLT @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + HCINT(i); + dev_info(hcd->dev, "HCINT @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + HCINTMSK(i); + dev_info(hcd->dev, "HCINTMSK @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + HCTSIZ(i); + dev_info(hcd->dev, "HCTSIZ @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + HCDMA(i); + dev_info(hcd->dev, "HCDMA @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + if (hcd->dma_desc_enable) { + addr = hcd->regs + HCDMAB(i); + dev_info(hcd->dev, "HCDMAB @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + } + } +} + +/** + * dwc2_dump_global_registers() - Reads the core global registers and prints + * them + * + * @hcd: Programming view of DWC_otg controller + */ +void dwc2_dump_global_registers(struct dwc2_hcd *hcd) +{ + int i, ep_num; + char *txfsiz; + u32 __iomem *addr; + + dev_info(hcd->dev, "Core Global Registers\n"); + addr = hcd->regs + GOTGCTL; + dev_info(hcd->dev, "GOTGCTL @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GOTGINT; + dev_info(hcd->dev, "GOTGINT @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GAHBCFG; + dev_info(hcd->dev, "GAHBCFG @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GUSBCFG; + dev_info(hcd->dev, "GUSBCFG @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GRSTCTL; + dev_info(hcd->dev, "GRSTCTL @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GINTSTS; + dev_info(hcd->dev, "GINTSTS @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GINTMSK; + dev_info(hcd->dev, "GINTMSK @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GRXSTSR; + dev_info(hcd->dev, "GRXSTSR @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GRXFSIZ; + dev_info(hcd->dev, "GRXFSIZ @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GNPTXFSIZ; + dev_info(hcd->dev, "GNPTXFSIZ @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GNPTXSTS; + dev_info(hcd->dev, "GNPTXSTS @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GI2CCTL; + dev_info(hcd->dev, "GI2CCTL @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GPVNDCTL; + dev_info(hcd->dev, "GPVNDCTL @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GGPIO; + dev_info(hcd->dev, "GGPIO @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GUID; + dev_info(hcd->dev, "GUID @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GSNPSID; + dev_info(hcd->dev, "GSNPSID @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GHWCFG1; + dev_info(hcd->dev, "GHWCFG1 @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GHWCFG2; + dev_info(hcd->dev, "GHWCFG2 @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GHWCFG3; + dev_info(hcd->dev, "GHWCFG3 @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GHWCFG4; + dev_info(hcd->dev, "GHWCFG4 @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GLPMCFG; + dev_info(hcd->dev, "GLPMCFG @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GPWRDN; + dev_info(hcd->dev, "GPWRDN @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + GDFIFOCFG; + dev_info(hcd->dev, "GDFIFOCFG @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + addr = hcd->regs + HPTXFSIZ; + dev_info(hcd->dev, "HPTXFSIZ @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); + + if (hcd->en_multiple_tx_fifo == 0) { + ep_num = hcd->hwcfg4 >> GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT & + GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK >> + GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; + txfsiz = "DPTXFSIZ"; + } else { + ep_num = hcd->hwcfg4 >> GHWCFG4_NUM_IN_EPS_SHIFT & + GHWCFG4_NUM_IN_EPS_MASK >> GHWCFG4_NUM_IN_EPS_SHIFT; + txfsiz = "DIENPTXF"; + } + + for (i = 0; i < ep_num; i++) { + addr = hcd->regs + DPTXFSIZn(i + 1); + dev_info(hcd->dev, "%s[%d] @0x%08lX : 0x%08X\n", txfsiz, i + 1, + (unsigned long)addr, readl(addr)); + } + + addr = hcd->regs + PCGCTL; + dev_info(hcd->dev, "PCGCTL @0x%08lX : 0x%08X\n", + (unsigned long)addr, readl(addr)); +} + +/** + * dwc2_flush_tx_fifo() - Flushes a Tx FIFO + * + * @hcd: Programming view of DWC_otg controller + * @num: Tx FIFO to flush + */ +void dwc2_flush_tx_fifo(struct dwc2_hcd *hcd, const int num) +{ + u32 greset; + int count = 0; + + dev_dbg(hcd->dev, "Flush Tx FIFO %d\n", num); + + greset = GRSTCTL_TxFFlsh; + greset |= num << GRSTCTL_TxFNum_SHIFT & GRSTCTL_TxFNum_MASK; + writel(greset, hcd->regs + GRSTCTL); + + do { + greset = readl(hcd->regs + GRSTCTL); + if (++count > 10000) { + dev_warn(hcd->dev, + "%s() HANG! GRSTCTL=%0x GNPTXSTS=0x%08x\n", + __func__, greset, readl(hcd->regs + GNPTXSTS)); + break; + } + udelay(1); + } while (greset & GRSTCTL_TxFFlsh); + + /* Wait for at least 3 PHY Clocks */ + udelay(1); +} + +/** + * dwc2_flush_rx_fifo() - Flushes the Rx FIFO + * + * @hcd: Programming view of DWC_otg controller + */ +void dwc2_flush_rx_fifo(struct dwc2_hcd *hcd) +{ + u32 greset; + int count = 0; + + dev_dbg(hcd->dev, "%s\n", __func__); + greset = GRSTCTL_RxFFlsh; + writel(greset, hcd->regs + GRSTCTL); + + do { + greset = readl(hcd->regs + GRSTCTL); + if (++count > 10000) { + dev_warn(hcd->dev, "%s() HANG! GRSTCTL=%0x\n", + __func__, greset); + break; + } + udelay(1); + } while (greset & GRSTCTL_RxFFlsh); + + /* Wait for at least 3 PHY Clocks */ + udelay(1); +} + +#define DWC2_PARAM_TEST(a, b, c) ((a) < (b) || (a) > (c)) + +/* Parameter access functions */ +int dwc2_set_param_otg_cap(struct dwc2_hcd *hcd, int val) +{ + int valid = 1; + int retval = 0; + u32 op_mode; + + op_mode = hcd->hwcfg2 & GHWCFG2_OP_MODE_MASK; + + switch (val) { + case DWC2_CAP_PARAM_HNP_SRP_CAPABLE: + if (op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE) + valid = 0; + break; + case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE: + if (op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE && + op_mode != GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE && + op_mode != GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE && + op_mode != GHWCFG2_OP_MODE_SRP_CAPABLE_HOST) + valid = 0; + break; + case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE: + /* always valid */ + break; + default: + valid = 0; + break; + } + + if (!valid) { + dev_err(hcd->dev, + "%d invalid for otg_cap parameter. Check HW configuration.\n", + val); + val = op_mode == GHWCFG2_OP_MODE_HNP_SRP_CAPABLE || + op_mode == GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE || + op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE || + op_mode == GHWCFG2_OP_MODE_SRP_CAPABLE_HOST ? + DWC2_CAP_PARAM_SRP_ONLY_CAPABLE : + DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; + retval = -EINVAL; + } + + hcd->core_params->otg_cap = val; + return retval; +} + +int dwc2_set_param_dma_enable(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + + dev_dbg(hcd->dev, "%s(%d)\n", __func__, val); + + if (val > 0 && (hcd->hwcfg2 & GHWCFG2_ARCHITECTURE_MASK) == + GHWCFG2_SLAVE_ONLY_ARCH) { + dev_err(hcd->dev, + "%d invalid for dma_enable parameter. Check HW configuration.\n", + val); + val = 0; + retval = -EINVAL; + } + + hcd->core_params->dma_enable = val; + return retval; +} + +int dwc2_set_param_dma_desc_enable(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + + if (val > 0 && (hcd->core_params->dma_enable <= 0 || + !(hcd->hwcfg4 & GHWCFG4_DESC_DMA))) { + dev_err(hcd->dev, + "%d invalid for dma_desc_enable parameter. Check HW configuration.\n", + val); + val = 0; + retval = -EINVAL; + } + + hcd->core_params->dma_desc_enable = val; + return retval; +} + +int dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + + if (DWC2_PARAM_TEST(val, 0, 1)) { + dev_err(hcd->dev, + "Wrong value for host_support_fs_low_power\n"); + dev_err(hcd->dev, "host_support_fs_low_power must be 0 or 1\n"); + val = 0; + retval = -EINVAL; + } + + hcd->core_params->host_support_fs_ls_low_power = val; + return retval; +} + +int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + + if (val > 0 && !(hcd->hwcfg2 & GHWCFG2_DYNAMIC_FIFO)) { + dev_err(hcd->dev, + "%d invalid for enable_dynamic_fifo parameter. Check HW configuration.\n", + val); + val = 0; + retval = -EINVAL; + } + + hcd->core_params->enable_dynamic_fifo = val; + return retval; +} + +int dwc2_set_param_host_rx_fifo_size(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + + if (val < 16 || val > readl(hcd->regs + GRXFSIZ)) { + dev_err(hcd->dev, + "%d invalid for host_rx_fifo_size. Check HW configuration.\n", + val); + val = readl(hcd->regs + GRXFSIZ); + retval = -EINVAL; + } + + hcd->core_params->host_rx_fifo_size = val; + return retval; +} + +int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + + if (val < 16 || val > (readl(hcd->regs + GNPTXFSIZ) >> 16 & 0xffff)) { + dev_err(hcd->dev, + "%d invalid for host_nperio_tx_fifo_size. Check HW configuration.\n", + val); + val = readl(hcd->regs + GNPTXFSIZ) >> 16 & 0xffff; + dev_err(hcd->dev, "Setting value to %d\n", val); + retval = -EINVAL; + } + + hcd->core_params->host_nperio_tx_fifo_size = val; + return retval; +} + +int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + + if (val < 16 || val > (hcd->hptxfsiz >> 16)) { + dev_err(hcd->dev, + "%d invalid for host_perio_tx_fifo_size. Check HW configuration.\n", + val); + val = hcd->hptxfsiz >> 16; + dev_err(hcd->dev, "Setting value to %d\n", val); + retval = -EINVAL; + } + + hcd->core_params->host_perio_tx_fifo_size = val; + return retval; +} + +int dwc2_set_param_max_transfer_size(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + int width = hcd->hwcfg3 >> GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT & + GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK >> + GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT; + + if (val < 2047 || val >= (1 << (width + 11))) { + dev_err(hcd->dev, + "%d invalid for max_transfer_size. Check HW configuration.\n", + val); + val = (1 << (width + 11)) - 1; + retval = -EINVAL; + } + + hcd->core_params->max_transfer_size = val; + return retval; +} + +int dwc2_set_param_max_packet_count(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + int width = hcd->hwcfg3 >> GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT & + GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK >> + GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT; + + if (val < 15 || val > (1 << (width + 4))) { + dev_err(hcd->dev, + "%d invalid for max_packet_count. Check HW configuration.\n", + val); + val = (1 << (width + 4)) - 1; + retval = -EINVAL; + } + + hcd->core_params->max_packet_count = val; + return retval; +} + +int dwc2_set_param_host_channels(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + int num_chan = hcd->hwcfg2 >> GHWCFG2_NUM_HOST_CHAN_SHIFT & + GHWCFG2_NUM_HOST_CHAN_MASK >> GHWCFG2_NUM_HOST_CHAN_SHIFT; + + if (val < 1 || val > num_chan + 1) { + dev_err(hcd->dev, + "%d invalid for host_channels. Check HW configuration.\n", + val); + val = num_chan + 1; + dev_err(hcd->dev, "Setting value to %d\n", val); + retval = -EINVAL; + } + + hcd->core_params->host_channels = val; + return retval; +} + +int dwc2_set_param_phy_type(struct dwc2_hcd *hcd, int val) +{ +#ifndef NO_FS_PHY_HW_CHECKS + int valid = 0; + u32 hs_phy_type; + u32 fs_phy_type; +#endif + int retval = 0; + + if (DWC2_PARAM_TEST(val, 0, 2)) { + dev_warn(hcd->dev, "Wrong value for phy_type\n"); + dev_warn(hcd->dev, "phy_type must be 0, 1 or 2\n"); + return -EINVAL; + } + +#ifndef NO_FS_PHY_HW_CHECKS + hs_phy_type = hcd->hwcfg2 & GHWCFG2_HS_PHY_TYPE_MASK; + fs_phy_type = hcd->hwcfg2 & GHWCFG2_FS_PHY_TYPE_MASK; + + if (val == DWC_PHY_TYPE_PARAM_UTMI && + (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || + hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) + valid = 1; + else if (val == DWC_PHY_TYPE_PARAM_ULPI && + (hs_phy_type == GHWCFG2_HS_PHY_TYPE_ULPI || + hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI)) + valid = 1; + else if (val == DWC_PHY_TYPE_PARAM_FS && + fs_phy_type == GHWCFG2_FS_PHY_TYPE_DEDICATED) + valid = 1; + + if (!valid) { + dev_err(hcd->dev, + "%d invalid for phy_type. Check HW configuration.\n", + val); + if (hs_phy_type != GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED) { + if (hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI || + hs_phy_type == GHWCFG2_HS_PHY_TYPE_UTMI_ULPI) + val = DWC_PHY_TYPE_PARAM_UTMI; + else + val = DWC_PHY_TYPE_PARAM_ULPI; + } + retval = -EINVAL; + } +#endif + + hcd->core_params->phy_type = val; + return retval; +} + +static int get_param_phy_type(struct dwc2_hcd *hcd) +{ + return hcd->core_params->phy_type; +} + +int dwc2_set_param_speed(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + + if (DWC2_PARAM_TEST(val, 0, 1)) { + dev_warn(hcd->dev, "Wrong value for speed parameter\n"); + dev_warn(hcd->dev, "max_speed parameter must be 0 or 1\n"); + return -EINVAL; + } + if (val == 0 && get_param_phy_type(hcd) == DWC_PHY_TYPE_PARAM_FS) { + dev_err(hcd->dev, + "%d invalid for speed parameter. Check HW configuration.\n", + val); + val = get_param_phy_type(hcd) == DWC_PHY_TYPE_PARAM_FS ? 1 : 0; + retval = -EINVAL; + } + hcd->core_params->speed = val; + return retval; +} + +int dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + + if (DWC2_PARAM_TEST(val, 0, 1)) { + dev_warn(hcd->dev, + "Wrong value for host_ls_low_power_phy_clk parameter\n"); + dev_warn(hcd->dev, + "host_ls_low_power_phy_clk must be 0 or 1\n"); + return -EINVAL; + } + + if (val == DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ && + get_param_phy_type(hcd) == DWC_PHY_TYPE_PARAM_FS) { + dev_err(hcd->dev, + "%d invalid for host_ls_low_power_phy_clk. Check HW configuration.\n", + val); + val = get_param_phy_type(hcd) == DWC_PHY_TYPE_PARAM_FS + ? DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ + : DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ; + retval = -EINVAL; + } + + hcd->core_params->host_ls_low_power_phy_clk = val; + return retval; +} + +int dwc2_set_param_phy_ulpi_ddr(struct dwc2_hcd *hcd, int val) +{ + if (DWC2_PARAM_TEST(val, 0, 1)) { + dev_warn(hcd->dev, "Wrong value for phy_ulpi_ddr\n"); + dev_warn(hcd->dev, "phy_upli_ddr must be 0 or 1\n"); + return -EINVAL; + } + + hcd->core_params->phy_ulpi_ddr = val; + return 0; +} + +int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hcd *hcd, int val) +{ + if (DWC2_PARAM_TEST(val, 0, 1)) { + dev_warn(hcd->dev, "Wrong valaue for phy_ulpi_ext_vbus\n"); + dev_warn(hcd->dev, "phy_ulpi_ext_vbus must be 0 or 1\n"); + return -EINVAL; + } + + hcd->core_params->phy_ulpi_ext_vbus = val; + return 0; +} + +int dwc2_set_param_phy_utmi_width(struct dwc2_hcd *hcd, int val) +{ + if (DWC2_PARAM_TEST(val, 8, 8) && DWC2_PARAM_TEST(val, 16, 16)) { + dev_warn(hcd->dev, "Wrong valaue for phy_utmi_width\n"); + dev_warn(hcd->dev, "phy_utmi_width must be 8 or 16\n"); + return -EINVAL; + } + + hcd->core_params->phy_utmi_width = val; + return 0; +} + +int dwc2_set_param_ulpi_fs_ls(struct dwc2_hcd *hcd, int val) +{ + if (DWC2_PARAM_TEST(val, 0, 1)) { + dev_warn(hcd->dev, "Wrong valaue for ulpi_fs_ls\n"); + dev_warn(hcd->dev, "ulpi_fs_ls must be 0 or 1\n"); + return -EINVAL; + } + + hcd->core_params->ulpi_fs_ls = val; + return 0; +} + +int dwc2_set_param_ts_dline(struct dwc2_hcd *hcd, int val) +{ + if (DWC2_PARAM_TEST(val, 0, 1)) { + dev_warn(hcd->dev, "Wrong valaue for ts_dline\n"); + dev_warn(hcd->dev, "ts_dline must be 0 or 1\n"); + return -EINVAL; + } + + hcd->core_params->ts_dline = val; + return 0; +} + +int dwc2_set_param_i2c_enable(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + + if (DWC2_PARAM_TEST(val, 0, 1)) { + dev_warn(hcd->dev, "Wrong valaue for i2c_enable\n"); + dev_warn(hcd->dev, "i2c_enable must be 0 or 1\n"); + return -EINVAL; + } + +#ifndef NO_FS_PHY_HW_CHECK + if (val == 1 && !(hcd->hwcfg3 & GHWCFG3_I2C)) { + dev_err(hcd->dev, + "%d invalid for i2c_enable. Check HW configuration.\n", + val); + val = 0; + retval = -EINVAL; + } +#endif + + hcd->core_params->i2c_enable = val; + return retval; +} + +int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + + if (DWC2_PARAM_TEST(val, 0, 1)) { + dev_warn(hcd->dev, "Wrong valaue for en_multiple_tx_fifo,\n"); + dev_warn(hcd->dev, "en_multiple_tx_fifo must be 0 or 1\n"); + return -EINVAL; + } + + if (val == 1 && !(hcd->hwcfg4 & GHWCFG4_DED_FIFO_EN)) { + dev_err(hcd->dev, + "%d invalid for parameter en_multiple_tx_fifo. Check HW configuration.\n", + val); + val = 0; + retval = -EINVAL; + } + + hcd->core_params->en_multiple_tx_fifo = val; + return retval; +} + +int dwc2_set_param_lpm_enable(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + + if (DWC2_PARAM_TEST(val, 0, 1)) { + dev_warn(hcd->dev, "Wrong value for lpm_enable\n"); + dev_warn(hcd->dev, "lpm_enable must be 0 or 1\n"); + return -EINVAL; + } + + if (val && !(hcd->hwcfg3 & GHWCFG3_OTG_LPM_EN)) { + dev_err(hcd->dev, + "%d invalid for parameter lpm_enable. Check HW configuration.\n", + val); + val = 0; + retval = -EINVAL; + } + + hcd->core_params->lpm_enable = val; + return retval; +} + +int dwc2_set_param_dma_burst_size(struct dwc2_hcd *hcd, int val) +{ + if (DWC2_PARAM_TEST(val, 1, 1) && + DWC2_PARAM_TEST(val, 4, 4) && + DWC2_PARAM_TEST(val, 8, 8) && + DWC2_PARAM_TEST(val, 16, 16) && + DWC2_PARAM_TEST(val, 32, 32) && + DWC2_PARAM_TEST(val, 64, 64) && + DWC2_PARAM_TEST(val, 128, 128) && + DWC2_PARAM_TEST(val, 256, 256)) { + dev_warn(hcd->dev, + "'%d' invalid for parameter dma_burst_size\n", val); + return -EINVAL; + } + hcd->core_params->dma_burst_size = val; + return 0; +} + +int dwc2_set_param_ic_usb_cap(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + + if (DWC2_PARAM_TEST(val, 0, 1)) { + dev_warn(hcd->dev, "'%d' invalid for parameter ic_usb_cap\n", + val); + dev_warn(hcd->dev, "ic_usb_cap must be 0 or 1\n"); + return -EINVAL; + } + + if (val && !(hcd->hwcfg2 & GHWCFG2_OTG_ENABLE_IC_USB)) { + dev_err(hcd->dev, + "%d invalid for parameter ic_usb_cap. Check HW configuration.\n", + val); + retval = -EINVAL; + val = 0; + } + hcd->core_params->ic_usb_cap = val; + return retval; +} + +int dwc2_set_param_reload_ctl(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + int valid = 1; + + if (DWC2_PARAM_TEST(val, 0, 1)) { + dev_warn(hcd->dev, "'%d' invalid for parameter reload_ctl\n", + val); + dev_warn(hcd->dev, "reload_ctl must be 0 or 1\n"); + return -EINVAL; + } + + if (val == 1 && hcd->snpsid < DWC2_CORE_REV_2_92a) + valid = 0; + if (!valid) { + dev_err(hcd->dev, + "%d invalid for parameter reload_ctl. Check HW configuration.\n", + val); + retval = -EINVAL; + val = 0; + } + hcd->core_params->reload_ctl = val; + return retval; +} + +int dwc2_set_param_ahb_single(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + int valid = 1; + + if (DWC2_PARAM_TEST(val, 0, 1)) { + dev_warn(hcd->dev, "'%d' invalid for parameter ahb_single\n", + val); + dev_warn(hcd->dev, "ahb_single must be 0 or 1\n"); + return -EINVAL; + } + + if (val > 0 && hcd->snpsid < DWC2_CORE_REV_2_94a) + valid = 0; + if (!valid) { + dev_err(hcd->dev, + "%d invalid for parameter ahb_single. Check HW configuration.\n", + val); + retval = -EINVAL; + val = 0; + } + hcd->core_params->ahb_single = val; + return retval; +} + +int dwc2_set_param_otg_ver(struct dwc2_hcd *hcd, int val) +{ + int retval = 0; + + if (DWC2_PARAM_TEST(val, 0, 1)) { + dev_warn(hcd->dev, "'%d' invalid for parameter otg_ver\n", val); + dev_warn(hcd->dev, + "otg_ver must be 0 (for OTG 1.3 support) or 1 (for OTG 2.0 support)\n"); + return -EINVAL; + } + + hcd->core_params->otg_ver = val; + return retval; +} + +u32 dwc2_get_gsnpsid(void *base) +{ + void __iomem *addr = (void __iomem *)(base + GSNPSID); + + return readl(addr); +} + +int dwc2_get_lpm_portsleepstatus(struct dwc2_hcd *hcd) +{ + u32 lpmcfg = readl(hcd->regs + GLPMCFG); + int prt_sleep_sts = !!(lpmcfg & GLPMCFG_PRT_SLEEP_STS); + + if ((hcd->lx_state == DWC2_L1) ^ prt_sleep_sts) + dev_err(hcd->dev, + "!!! lx_state = %d, lmpcfg.prt_sleep_sts = %d !!!\n", + hcd->lx_state, prt_sleep_sts); + + return prt_sleep_sts; +} + +u16 dwc2_get_otg_version(struct dwc2_hcd *hcd) +{ + return (u16)(hcd->otg_ver == 1 ? 0x0200 : 0x0103); +} + +int dwc2_check_core_status(struct dwc2_hcd *hcd) +{ + if (readl(hcd->regs + GSNPSID) == 0xffffffff) + return -1; + else + return 0; +} diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h new file mode 100644 index 0000000..923a327 --- /dev/null +++ b/drivers/usb/dwc2/core.h @@ -0,0 +1,745 @@ +/* + * core.h - DesignWare HS OTG Controller common declarations + * + * Copyright (C) 2004-2012 Synopsys, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the above-listed copyright holders may not be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DWC_CORE_H__ +#define __DWC_CORE_H__ + +#include "hw.h" + +#if 0 +static inline void do_write(u32 value, void *addr) +{ + writel(value, addr); + pr_info("INFO:: wrote %08x to %p\n", value, addr); +} + +#undef writel +#define writel(v, a) do_write(v, a) +#endif + +/* Maximum number of Endpoints/HostChannels */ +#define MAX_EPS_CHANNELS 16 + +/* Macros defined for DWC OTG HW Release version */ +#define DWC2_CORE_REV_2_60a 0x4f54260a +#define DWC2_CORE_REV_2_71a 0x4f54271a +#define DWC2_CORE_REV_2_72a 0x4f54272a +#define DWC2_CORE_REV_2_80a 0x4f54280a +#define DWC2_CORE_REV_2_81a 0x4f54281a +#define DWC2_CORE_REV_2_90a 0x4f54290a +#define DWC2_CORE_REV_2_91a 0x4f54291a +#define DWC2_CORE_REV_2_92a 0x4f54292a +#define DWC2_CORE_REV_2_93a 0x4f54293a +#define DWC2_CORE_REV_2_94a 0x4f54294a +#define DWC2_CORE_REV_3_00a 0x4f54300a + +struct dwc2_hcd; + +/* Reasons for halting a host channel */ +enum dwc2_halt_status { + DWC2_HC_XFER_NO_HALT_STATUS, + DWC2_HC_XFER_COMPLETE, + DWC2_HC_XFER_URB_COMPLETE, + DWC2_HC_XFER_ACK, + DWC2_HC_XFER_NAK, + DWC2_HC_XFER_NYET, + DWC2_HC_XFER_STALL, + DWC2_HC_XFER_XACT_ERR, + DWC2_HC_XFER_FRAME_OVERRUN, + DWC2_HC_XFER_BABBLE_ERR, + DWC2_HC_XFER_DATA_TOGGLE_ERR, + DWC2_HC_XFER_AHB_ERR, + DWC2_HC_XFER_PERIODIC_INCOMPLETE, + DWC2_HC_XFER_URB_DEQUEUE, +}; + +/* + * Host channel descriptor. This structure represents the state of a single + * host channel when acting in host mode. It contains the data items needed to + * transfer packets to an endpoint via a host channel. + */ +struct dwc2_hc { + /* Host channel number used for register address lookup */ + u8 hc_num; + + /* Device to access */ + unsigned dev_addr:7; + + /* EP to access */ + unsigned ep_num:4; + + /* EP direction. 0: OUT, 1: IN */ + unsigned ep_is_in:1; + + /* + * EP speed. + * One of the following values: + * - DWC2_EP_SPEED_LOW: 0 + * - DWC2_EP_SPEED_FULL: 1 + * - DWC2_EP_SPEED_HIGH: 2 + */ + unsigned speed:2; +#define DWC2_EP_SPEED_LOW 0 +#define DWC2_EP_SPEED_FULL 1 +#define DWC2_EP_SPEED_HIGH 2 + + /* + * Endpoint type. + * One of the following values: + * - DWC2_EP_TYPE_CONTROL: 0 + * - DWC2_EP_TYPE_ISOC: 1 + * - DWC2_EP_TYPE_BULK: 2 + * - DWC2_EP_TYPE_INTR: 3 + */ + unsigned ep_type:2; +#define DWC2_EP_TYPE_CONTROL 0 +#define DWC2_EP_TYPE_ISOC 1 +#define DWC2_EP_TYPE_BULK 2 +#define DWC2_EP_TYPE_INTR 3 + + /* Max packet size in bytes */ + unsigned max_packet:11; + + /* + * PID for initial transaction. + * 0: DATA0 + * 1: DATA2 + * 2: DATA1 + * 3: MDATA (non-Control EP), + * SETUP (Control EP) + */ + unsigned data_pid_start:2; +#define DWC2_HC_PID_DATA0 (TSIZ_SC_MC_PID_DATA0 >> TSIZ_SC_MC_PID_SHIFT) +#define DWC2_HC_PID_DATA2 (TSIZ_SC_MC_PID_DATA2 >> TSIZ_SC_MC_PID_SHIFT) +#define DWC2_HC_PID_DATA1 (TSIZ_SC_MC_PID_DATA1 >> TSIZ_SC_MC_PID_SHIFT) +#define DWC2_HC_PID_MDATA (TSIZ_SC_MC_PID_MDATA >> TSIZ_SC_MC_PID_SHIFT) +#define DWC2_HC_PID_SETUP (TSIZ_SC_MC_PID_SETUP >> TSIZ_SC_MC_PID_SHIFT) + + /* Number of periodic transactions per (micro)frame */ + unsigned multi_count:2; + + /* Pointer to the current transfer buffer position */ + u8 *xfer_buff; + /* + * In Buffer DMA mode this buffer will be used if xfer_buff is not + * DWORD aligned + */ + dma_addr_t align_buff; + /* Total number of bytes to transfer */ + u32 xfer_len; + /* Number of bytes transferred so far */ + u32 xfer_count; + /* Packet count at start of transfer */ + u16 start_pkt_count; + + /* + * Flag to indicate whether the transfer has been started. Set to 1 if + * it has been started, 0 otherwise. + */ + u8 xfer_started; + + /* + * Set to 1 to indicate that a PING request should be issued on this + * channel. If 0, process normally. + */ + u8 do_ping; + + /* + * Set to 1 to indicate that the error count for this transaction is + * non-zero. Set to 0 if the error count is 0. + */ + u8 error_state; + + /* + * Set to 1 to indicate that this channel should be halted the next + * time a request is queued for the channel. This is necessary in + * slave mode if no request queue space is available when an attempt + * is made to halt the channel. + */ + u8 halt_on_queue; + + /* + * Set to 1 if the host channel has been halted, but the core is not + * finished flushing queued requests. Otherwise 0. + */ + u8 halt_pending; + + /* Reason for halting the host channel */ + enum dwc2_halt_status halt_status; + + /* Split settings for the host channel */ + u8 do_split; /* Enable split for the channel */ + u8 complete_split; /* Enable complete split */ + u8 hub_addr; /* Address of high speed hub */ + u8 port_addr; /* Port of the low/full speed device */ + + /* + * Split transaction position + * One of the following values: + * - DWC_HCSPLT_XACTPOS_MID + * - DWC_HCSPLT_XACTPOS_BEGIN + * - DWC_HCSPLT_XACTPOS_END + * - DWC_HCSPLT_XACTPOS_ALL + */ + u8 xact_pos; +#define DWC_HCSPLT_XACTPOS_MID (HCSPLT_XACTPOS_MID >> HCSPLT_XACTPOS_SHIFT) +#define DWC_HCSPLT_XACTPOS_END (HCSPLT_XACTPOS_END >> HCSPLT_XACTPOS_SHIFT) +#define DWC_HCSPLT_XACTPOS_BEGIN (HCSPLT_XACTPOS_BEGIN >> HCSPLT_XACTPOS_SHIFT) +#define DWC_HCSPLT_XACTPOS_ALL (HCSPLT_XACTPOS_ALL >> HCSPLT_XACTPOS_SHIFT) + + /* + * Number of requests issued for this channel since it was assigned to + * the current transfer (not counting PINGs) + */ + u8 requests; + + /* Scheduling micro-frame bitmap */ + u8 schinfo; + + /* Number of Transfer Descriptors */ + u16 ntd; + + /* Queue Head for the transfer being processed by this channel */ + struct dwc2_qh *qh; + + /* Entry in list of host channels */ + struct list_head hc_list_entry; + + /* Descriptor List DMA address */ + dma_addr_t desc_list_addr; +}; + +/* + * The following parameters may be specified when starting the module. These + * parameters define how the DWC_otg controller should be configured. + */ +struct dwc2_core_params { + int opt; + + /* + * Specifies the OTG capabilities. The driver will automatically + * detect the value for this parameter if none is specified. + * 0 - HNP and SRP capable (default) + * 1 - SRP Only capable + * 2 - No HNP/SRP capable + */ + int otg_cap; + + /* + * Specifies whether to use slave or DMA mode for accessing the data + * FIFOs. The driver will automatically detect the value for this + * parameter if none is specified. + * 0 - Slave + * 1 - DMA (default, if available) + */ + int dma_enable; + + /* + * When DMA mode is enabled specifies whether to use address DMA or + * DMA Descriptor mode for accessing the data FIFOs in device mode. + * The driver will automatically detect the value for this if none + * is specified. + * 0 - address DMA + * 1 - DMA Descriptor(default, if available) + */ + int dma_desc_enable; + + /* + * The DMA Burst size (applicable only for External DMA Mode). + * 1, 4, 8 16, 32, 64, 128, 256 (default 32) + */ + int dma_burst_size; /* Translate this to GAHBCFG values */ + + /* + * Specifies the maximum speed of operation in host and device mode. + * The actual speed depends on the speed of the attached device and + * the value of phy_type. The actual speed depends on the speed of the + * attached device. + * 0 - High Speed (default) + * 1 - Full Speed + */ + int speed; + + /* + * Specifies whether low power mode is supported when attached + * to a Full Speed or Low Speed device in host mode. + * 0 - Don't support low power mode (default) + * 1 - Support low power mode + */ + int host_support_fs_ls_low_power; + + /* + * Specifies the PHY clock rate in low power mode when connected to a + * Low Speed device in host mode. This parameter is applicable only if + * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS + * then defaults to 6 MHZ otherwise 48 MHZ. + * + * 0 - 48 MHz + * 1 - 6 MHz + */ + int host_ls_low_power_phy_clk; + + /* + * 0 - Use cC FIFO size parameters + * 1 - Allow dynamic FIFO sizing (default) + */ + int enable_dynamic_fifo; + + /* + * Number of 4-byte words in the Rx FIFO in host mode when dynamic + * FIFO sizing is enabled. + * 16 to 32768 (default 1024) + */ + int host_rx_fifo_size; + + /* + * Number of 4-byte words in the non-periodic Tx FIFO in host mode + * when Dynamic FIFO sizing is enabled in the core. + * 16 to 32768 (default 1024) + */ + int host_nperio_tx_fifo_size; + + /* + * Number of 4-byte words in the host periodic Tx FIFO when dynamic + * FIFO sizing is enabled. + * 16 to 32768 (default 1024) + */ + int host_perio_tx_fifo_size; + + /* + * The maximum transfer size supported in bytes. + * 2047 to 65,535 (default 65,535) + */ + int max_transfer_size; + + /* + * The maximum number of packets in a transfer. + * 15 to 511 (default 511) + */ + int max_packet_count; + + /* + * The number of host channel registers to use. + * 1 to 16 (default 12) + * Note: The FPGA configuration supports a maximum of 12 host channels. + */ + int host_channels; + + /* + * Specifies the type of PHY interface to use. By default, the driver + * will automatically detect the phy_type. + * + * 0 - Full Speed PHY + * 1 - UTMI+ (default) + * 2 - ULPI + */ + int phy_type; + + /* + * Specifies the UTMI+ Data Width. This parameter is + * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI + * PHY_TYPE, this parameter indicates the data width between + * the MAC and the ULPI Wrapper.) Also, this parameter is + * applicable only if the OTG_HSPHY_WIDTH cC parameter was set + * to "8 and 16 bits", meaning that the core has been + * configured to work at either data path width. + * + * 8 or 16 bits (default 16) + */ + int phy_utmi_width; + + /* + * Specifies whether the ULPI operates at double or single + * data rate. This parameter is only applicable if PHY_TYPE is + * ULPI. + * + * 0 - single data rate ULPI interface with 8 bit wide data + * bus (default) + * 1 - double data rate ULPI interface with 4 bit wide data + * bus + */ + int phy_ulpi_ddr; + + /* + * Specifies whether to use the internal or external supply to + * drive the vbus with a ULPI phy + */ + int phy_ulpi_ext_vbus; + + /* + * Specifies whether to use the I2Cinterface for full speed PHY. This + * parameter is only applicable if PHY_TYPE is FS. + * 0 - No (default) + * 1 - Yes + */ + int i2c_enable; + + int ulpi_fs_ls; + + int ts_dline; + + /* + * Specifies whether dedicated per-endpoint transmit FIFOs are enabled + * 0 - No + * 1 - Yes + */ + int en_multiple_tx_fifo; + + /* Specifies whether LPM (Link Power Management) support is enabled */ + int lpm_enable; + + /* + * IS_USB Capability + * 1 - Enabled + * 0 - Disabled + */ + int ic_usb_cap; + + /* + * HFIR Reload Control + * 0 - HFIR cannot be reloaded dynamically. + * 1 - Allow dynamic reloading of HFIR register during runtime. + */ + int reload_ctl; + + /* + * GAHBCFG: AHB Single Support + * This bit when programmed supports SINGLE transfers for remainder + * data in a transfer for DMA mode of operation. + * 0 - remainder data will be sent using INCR burst size. + * 1 - remainder data will be sent using SINGLE burst size. + */ + int ahb_single; + + /* + * OTG revision supported + * 0 - OTG 1.3 revision + * 1 - OTG 2.0 revision + */ + int otg_ver; +}; + +/* Device States */ +enum dwc2_lx_state { + /* On state */ + DWC2_L0, + /* LPM sleep state*/ + DWC2_L1, + /* USB suspend state*/ + DWC2_L2, + /* Off state*/ + DWC2_L3 +}; + +/* + * The following are work functions and timers used for handling some + * interrupts + */ +extern void w_conn_id_status_change(struct work_struct *work); +extern void w_wakeup_detected(unsigned long data); + +/* + * The following functions support initialization of the core driver component + * and the DWC_otg controller + */ +extern void dwc2_core_host_init(struct dwc2_hcd *hcd); + +/* + * Host core Functions. + * The following functions support managing the DWC_otg controller in host + * mode. + */ +extern void dwc2_hc_init(struct dwc2_hcd *hcd, struct dwc2_hc *hc); +extern void dwc2_hc_halt(struct dwc2_hcd *hcd, struct dwc2_hc *hc, + enum dwc2_halt_status halt_status); +extern void dwc2_hc_cleanup(struct dwc2_hcd *hcd, struct dwc2_hc *hc); +extern void dwc2_hc_start_transfer(struct dwc2_hcd *hcd, struct dwc2_hc *hc); +extern void dwc2_hc_start_transfer_ddma(struct dwc2_hcd *hcd, + struct dwc2_hc *hc); +extern int dwc2_hc_continue_transfer(struct dwc2_hcd *hcd, struct dwc2_hc *hc); +extern void dwc2_hc_do_ping(struct dwc2_hcd *hcd, struct dwc2_hc *hc); +extern void dwc2_hc_write_packet(struct dwc2_hcd *hcd, struct dwc2_hc *hc); +extern void dwc2_enable_host_interrupts(struct dwc2_hcd *hcd); +extern void dwc2_disable_host_interrupts(struct dwc2_hcd *hcd); + +extern u32 dwc2_calc_frame_interval(struct dwc2_hcd *hcd); +extern int dwc2_check_core_status(struct dwc2_hcd *hcd); + +/* + * Common core Functions. + * The following functions support managing the DWC_otg controller in either + * device or host mode. + */ +extern void dwc2_read_packet(struct dwc2_hcd *hcd, u8 *dest, u16 bytes); +extern void dwc2_flush_tx_fifo(struct dwc2_hcd *hcd, const int num); +extern void dwc2_flush_rx_fifo(struct dwc2_hcd *hcd); + +extern void dwc2_core_init(struct dwc2_hcd *hcd); +extern void dwc2_enable_global_interrupts(struct dwc2_hcd *_hcd); +extern void dwc2_disable_global_interrupts(struct dwc2_hcd *_hcd); + +/* This function should be called on every hardware interrupt. */ +extern int dwc2_handle_common_intr(void *otg_dev); + +/* OTG Core Parameters */ + +/* + * Specifies the OTG capabilities. The driver will automatically + * detect the value for this parameter if none is specified. + * 0 - HNP and SRP capable (default) + * 1 - SRP Only capable + * 2 - No HNP/SRP capable + */ +extern int dwc2_set_param_otg_cap(struct dwc2_hcd *hcd, int val); +#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE 0 +#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE 1 +#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE 2 + +/* + * Specifies whether to use slave or DMA mode for accessing the data + * FIFOs. The driver will automatically detect the value for this + * parameter if none is specified. + * 0 - Slave + * 1 - DMA (default, if available) + */ +extern int dwc2_set_param_dma_enable(struct dwc2_hcd *hcd, int val); + +/* + * When DMA mode is enabled specifies whether to use + * address DMA or DMA Descritor mode for accessing the data + * FIFOs in device mode. The driver will automatically detect + * the value for this parameter if none is specified. + * 0 - address DMA + * 1 - DMA Descriptor(default, if available) + */ +extern int dwc2_set_param_dma_desc_enable(struct dwc2_hcd *hcd, int val); + +/* + * The DMA Burst size (applicable only for External DMA Mode). + * 1, 4, 8 16, 32, 64, 128, 256 (default 32) + */ +extern int dwc2_set_param_dma_burst_size(struct dwc2_hcd *hcd, int val); + +/* + * Specifies the maximum speed of operation in host and device mode. + * The actual speed depends on the speed of the attached device and + * the value of phy_type. The actual speed depends on the speed of the + * attached device. + * 0 - High Speed (default) + * 1 - Full Speed + */ +extern int dwc2_set_param_speed(struct dwc2_hcd *hcd, int val); +#define DWC_SPEED_PARAM_HIGH 0 +#define DWC_SPEED_PARAM_FULL 1 + +/* + * Specifies whether low power mode is supported when attached + * to a Full Speed or Low Speed device in host mode. + * + * 0 - Don't support low power mode (default) + * 1 - Support low power mode + */ +extern int dwc2_set_param_host_support_fs_ls_low_power(struct dwc2_hcd *hcd, + int val); + +/* + * Specifies the PHY clock rate in low power mode when connected to a + * Low Speed device in host mode. This parameter is applicable only if + * HOST_SUPPORT_FS_LS_LOW_POWER is enabled. If PHY_TYPE is set to FS + * then defaults to 6 MHZ otherwise 48 MHZ. + * + * 0 - 48 MHz + * 1 - 6 MHz + */ +extern int dwc2_set_param_host_ls_low_power_phy_clk(struct dwc2_hcd *hcd, + int val); +#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_48MHZ 0 +#define DWC_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ 1 + +/* + * 0 - Use cC FIFO size parameters + * 1 - Allow dynamic FIFO sizing (default) + */ +extern int dwc2_set_param_enable_dynamic_fifo(struct dwc2_hcd *hcd, int val); + +/* + * Number of 4-byte words in the Rx FIFO in host mode when dynamic + * FIFO sizing is enabled. + * 16 to 32768 (default 1024) + */ +extern int dwc2_set_param_host_rx_fifo_size(struct dwc2_hcd *hcd, int val); + +/* + * Number of 4-byte words in the non-periodic Tx FIFO in host mode + * when Dynamic FIFO sizing is enabled in the core. + * 16 to 32768 (default 256) + */ +extern int dwc2_set_param_host_nperio_tx_fifo_size(struct dwc2_hcd *hcd, + int val); + +/* + * Number of 4-byte words in the host periodic Tx FIFO when dynamic + * FIFO sizing is enabled. + * 16 to 32768 (default 256) + */ +extern int dwc2_set_param_host_perio_tx_fifo_size(struct dwc2_hcd *hcd, + int val); + +/* + * The maximum transfer size supported in bytes. + * 2047 to 65,535 (default 65,535) + */ +extern int dwc2_set_param_max_transfer_size(struct dwc2_hcd *hcd, int val); + +/* + * The maximum number of packets in a transfer. + * 15 to 511 (default 511) + */ +extern int dwc2_set_param_max_packet_count(struct dwc2_hcd *hcd, int val); + +/* + * The number of host channel registers to use. + * 1 to 16 (default 11) + * Note: The FPGA configuration supports a maximum of 11 host channels. + */ +extern int dwc2_set_param_host_channels(struct dwc2_hcd *hcd, int val); + +/* + * Specifies the type of PHY interface to use. By default, the driver + * will automatically detect the phy_type. + * + * 0 - Full Speed PHY + * 1 - UTMI+ (default) + * 2 - ULPI + */ +extern int dwc2_set_param_phy_type(struct dwc2_hcd *hcd, int val); +#define DWC_PHY_TYPE_PARAM_FS 0 +#define DWC_PHY_TYPE_PARAM_UTMI 1 +#define DWC_PHY_TYPE_PARAM_ULPI 2 + +/* + * Specifies the UTMI+ Data Width. This parameter is + * applicable for a PHY_TYPE of UTMI+ or ULPI. (For a ULPI + * PHY_TYPE, this parameter indicates the data width between + * the MAC and the ULPI Wrapper.) Also, this parameter is + * applicable only if the OTG_HSPHY_WIDTH cC parameter was set + * to "8 and 16 bits", meaning that the core has been + * configured to work at either data path width. + * + * 8 or 16 bits (default 16) + */ +extern int dwc2_set_param_phy_utmi_width(struct dwc2_hcd *hcd, int val); + +/* + * Specifies whether the ULPI operates at double or single + * data rate. This parameter is only applicable if PHY_TYPE is + * ULPI. + * + * 0 - single data rate ULPI interface with 8 bit wide data + * bus (default) + * 1 - double data rate ULPI interface with 4 bit wide data + * bus + */ +extern int dwc2_set_param_phy_ulpi_ddr(struct dwc2_hcd *hcd, int val); + +/* + * Specifies whether to use the internal or external supply to + * drive the vbus with a ULPI phy. + */ +extern int dwc2_set_param_phy_ulpi_ext_vbus(struct dwc2_hcd *hcd, int val); +#define DWC_PHY_ULPI_INTERNAL_VBUS 0 +#define DWC_PHY_ULPI_EXTERNAL_VBUS 1 + +/* + * Specifies whether to use the I2Cinterface for full speed PHY. This + * parameter is only applicable if PHY_TYPE is FS. + * 0 - No (default) + * 1 - Yes + */ +extern int dwc2_set_param_i2c_enable(struct dwc2_hcd *hcd, int val); + +extern int dwc2_set_param_ulpi_fs_ls(struct dwc2_hcd *hcd, int val); + +extern int dwc2_set_param_ts_dline(struct dwc2_hcd *hcd, int val); + +/* + * Specifies whether dedicated transmit FIFOs are + * enabled for non periodic IN endpoints in device mode + * 0 - No + * 1 - Yes + */ +extern int dwc2_set_param_en_multiple_tx_fifo(struct dwc2_hcd *hcd, int val); + +/* + * Specifies whether LPM (Link Power Management) support is enabled + */ +extern int dwc2_set_param_lpm_enable(struct dwc2_hcd *hcd, int val); + +/* + * Specifies whether IC_USB capability is enabled + */ +extern int dwc2_set_param_ic_usb_cap(struct dwc2_hcd *hcd, int val); + +extern int dwc2_set_param_reload_ctl(struct dwc2_hcd *hcd, int val); + +extern int dwc2_set_param_ahb_single(struct dwc2_hcd *hcd, int val); + +extern int dwc2_set_param_otg_ver(struct dwc2_hcd *hcd, int val); + +/* + * Dump core registers and SPRAM + */ +extern void dwc2_dump_dev_registers(struct dwc2_hcd *hcd); +extern void dwc2_dump_spram(struct dwc2_hcd *hcd); +extern void dwc2_dump_host_registers(struct dwc2_hcd *hcd); +extern void dwc2_dump_global_registers(struct dwc2_hcd *hcd); + +/* + * Get Content of SNPSID register. + */ +extern u32 dwc2_get_gsnpsid(void *base); + +/* + * Get value of prt_sleep_sts field from the GLPMCFG register + */ +extern int dwc2_get_lpm_portsleepstatus(struct dwc2_hcd *hcd); + +/* + * Return OTG version - either 1.3 or 2.0 + */ +extern u16 dwc2_get_otg_version(struct dwc2_hcd *hcd); + +#endif /* __DWC_CORE_H__ */ diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c new file mode 100644 index 0000000..26bd445 --- /dev/null +++ b/drivers/usb/dwc2/core_intr.c @@ -0,0 +1,726 @@ +/* + * core_intr.c - DesignWare HS OTG Controller common interrupt handling + * + * Copyright (C) 2004-2012 Synopsys, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the above-listed copyright holders may not be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* + * This file contains the common interrupt handlers + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/usb.h> + +#include <linux/usb/hcd.h> +#include <linux/usb/ch11.h> +#include <linux/usb/gadget.h> +#include <linux/usb/ch9.h> + +#include "core.h" +#include "hcd.h" + +static const char *op_state_str(struct dwc2_hcd *hcd) +{ +#ifdef DEBUG + return (hcd->op_state == A_HOST ? "a_host" : + (hcd->op_state == A_SUSPEND ? "a_suspend" : + (hcd->op_state == A_PERIPHERAL ? "a_peripheral" : + (hcd->op_state == B_PERIPHERAL ? "b_peripheral" : + (hcd->op_state == B_HOST ? "b_host" : "unknown"))))); +#else + return ""; +#endif +} + +/** + * dwc2_handle_mode_mismatch_intr() - Logs a mode mismatch warning message + * + * @hcd: Programming view of DWC_otg controller + */ +int dwc2_handle_mode_mismatch_intr(struct dwc2_hcd *hcd) +{ + dev_warn(hcd->dev, "Mode Mismatch Interrupt: currently in %s mode\n", + dwc2_is_host_mode(hcd) ? "Host" : "Device"); + + /* Clear interrupt */ + writel(GINTSTS_ModeMis, hcd->regs + GINTSTS); + return 1; +} + +/** + * dwc2_handle_otg_intr() - Handles the OTG Interrupts. It reads the OTG + * Interrupt Register (GOTGINT) to determine what interrupt has occurred. + * + * @hcd: Programming view of DWC_otg controller + */ +int dwc2_handle_otg_intr(struct dwc2_hcd *hcd) +{ + u32 gotgint; + u32 gotgctl; + u32 gintmsk; + + gotgint = readl(hcd->regs + GOTGINT); + gotgctl = readl(hcd->regs + GOTGCTL); + dev_dbg(hcd->dev, "++OTG Interrupt gotgint=%0x [%s]\n", gotgint, + op_state_str(hcd)); + + if (gotgint & GOTGINT_SesEndDet) { + dev_dbg(hcd->dev, + " ++OTG Interrupt: Session End Detected++ (%s)\n", + op_state_str(hcd)); + gotgctl = readl(hcd->regs + GOTGCTL); + + if (hcd->op_state == B_HOST) { + dwc2_pcd_start(hcd); + hcd->op_state = B_PERIPHERAL; + } else { + /* + * If not B_HOST and Device HNP still set, HNP did + * not succeed! + */ + if (gotgctl & GOTGCTL_DEVHNPEN) { + dev_dbg(hcd->dev, "Session End Detected\n"); + dev_err(hcd->dev, + "Device Not Connected/Responding!\n"); + } + + /* + * If Session End Detected the B-Cable has been + * disconnected + */ + /* Reset PCD and Gadget driver to a clean state */ + hcd->lx_state = DWC2_L0; + spin_unlock(&hcd->lock); + dwc2_pcd_stop(hcd); + spin_lock(&hcd->lock); + } + + gotgctl = readl(hcd->regs + GOTGCTL); + gotgctl &= ~GOTGCTL_DEVHNPEN; + writel(gotgctl, hcd->regs + GOTGCTL); + } + + if (gotgint & GOTGINT_SesReqSucStsChng) { + dev_dbg(hcd->dev, + " ++OTG Interrupt: Session Request Success Status Change++\n"); + gotgctl = readl(hcd->regs + GOTGCTL); + if (gotgctl & GOTGCTL_SESREQSCS) { + if (hcd->core_params->phy_type == + DWC_PHY_TYPE_PARAM_FS && + hcd->core_params->i2c_enable > 0) { + hcd->srp_success = 1; + } else { + spin_unlock(&hcd->lock); + dwc2_pcd_resume(hcd); + spin_lock(&hcd->lock); + + /* Clear Session Request */ + gotgctl = readl(hcd->regs + GOTGCTL); + gotgctl &= ~GOTGCTL_SESREQ; + writel(gotgctl, hcd->regs + GOTGCTL); + } + } + } + + if (gotgint & GOTGINT_HstnegSucStsChng) { + /* + * Print statements during the HNP interrupt handling + * can cause it to fail + */ + gotgctl = readl(hcd->regs + GOTGCTL); + /* + * WA for 3.00a- HW is not setting cur_mode, even sometimes + * this does not help + */ + if (hcd->snpsid >= DWC2_CORE_REV_3_00a) + udelay(100); + if (gotgctl & GOTGCTL_HSTNEGSCS) { + if (dwc2_is_host_mode(hcd)) { + hcd->op_state = B_HOST; + /* + * Need to disable SOF interrupt immediately. + * When switching from device to host, the PCD + * interrupt handler won't handle the interrupt + * if host mode is already set. The HCD + * interrupt handler won't get called if the + * HCD state is HALT. This means that the + * interrupt does not get handled and Linux + * complains loudly. + */ + gintmsk = readl(hcd->regs + GINTMSK); + gintmsk &= ~GINTSTS_SOF; + writel(gintmsk, hcd->regs + GINTMSK); + + /* + * Call callback function with spin lock + * released + */ + spin_unlock(&hcd->lock); + dwc2_pcd_stop(hcd); + + /* Initialize the Core for Host mode */ + dwc2_hcd_start(hcd); + spin_lock(&hcd->lock); + hcd->op_state = B_HOST; + } + } else { + gotgctl = readl(hcd->regs + GOTGCTL); + gotgctl &= ~(GOTGCTL_HNPREQ | GOTGCTL_DEVHNPEN); + writel(gotgctl, hcd->regs + GOTGCTL); + dev_dbg(hcd->dev, "HNP Failed\n"); + dev_err(hcd->dev, "Device Not Connected/Responding\n"); + } + } + + if (gotgint & GOTGINT_HstNegDet) { + /* + * The disconnect interrupt is set at the same time as + * Host Negotiation Detected. During the mode switch all + * interrupts are cleared so the disconnect interrupt + * handler will not get executed. + */ + dev_dbg(hcd->dev, + " ++OTG Interrupt: Host Negotiation Detected++ (%s)\n", + (dwc2_is_host_mode(hcd) ? "Host" : "Device")); + if (dwc2_is_device_mode(hcd)) { + dev_dbg(hcd->dev, "a_suspend->a_peripheral (%d)\n", + hcd->op_state); + spin_unlock(&hcd->lock); + dwc2_hcd_disconnect(hcd); + dwc2_pcd_start(hcd); + spin_lock(&hcd->lock); + hcd->op_state = A_PERIPHERAL; + } else { + /* + * Need to disable SOF interrupt immediately. When + * switching from device to host, the PCD interrupt + * handler won't handle the interrupt if host mode is + * already set. The HCD interrupt handler won't get + * called if the HCD state is HALT. This means that + * the interrupt does not get handled and Linux + * complains loudly. + */ + gintmsk = readl(hcd->regs + GINTMSK); + gintmsk &= ~GINTSTS_SOF; + writel(gintmsk, hcd->regs + GINTMSK); + spin_unlock(&hcd->lock); + dwc2_pcd_stop(hcd); + dwc2_hcd_start(hcd); + spin_lock(&hcd->lock); + hcd->op_state = A_HOST; + } + } + + if (gotgint & GOTGINT_ADevTOUTChg) + dev_dbg(hcd->dev, + " ++OTG Interrupt: A-Device Timeout Change++\n"); + if (gotgint & GOTGINT_DbnceDone) + dev_dbg(hcd->dev, " ++OTG Interrupt: Debounce Done++\n"); + + /* Clear GOTGINT */ + writel(gotgint, hcd->regs + GOTGINT); + + return 1; +} + +void w_conn_id_status_change(struct work_struct *work) +{ + struct dwc2_hcd *hcd = container_of(work, struct dwc2_hcd, wf_otg); + u32 count = 0; + u32 gotgctl; + + dev_dbg(hcd->dev, "%s()\n", __func__); + + gotgctl = readl(hcd->regs + GOTGCTL); + dev_dbg(hcd->dev, "gotgctl=%0x\n", gotgctl); + dev_dbg(hcd->dev, "gotgctl.b.conidsts=%d\n", + !!(gotgctl & GOTGCTL_CONID_B)); + + /* B-Device connector (Device Mode) */ + if (gotgctl & GOTGCTL_CONID_B) { + /* Wait for switch to device mode */ + dev_dbg(hcd->dev, "connId B\n"); + while (!dwc2_is_device_mode(hcd)) { + dev_info(hcd->dev, + "Waiting for Peripheral Mode, Mode=%s\n", + dwc2_is_host_mode(hcd) ? "Host" : + "Peripheral"); + msleep(100); + if (++count > 10000) + break; + } + if (count > 10000) + dev_err(hcd->dev, + "Connection id status change timed out"); + hcd->op_state = B_PERIPHERAL; + dwc2_core_init(hcd); + dwc2_enable_global_interrupts(hcd); + dwc2_pcd_start(hcd); + } else { + /* A-Device connector (Host Mode) */ + dev_dbg(hcd->dev, "connId A\n"); + while (!dwc2_is_host_mode(hcd)) { + dev_info(hcd->dev, "Waiting for Host Mode, Mode=%s\n", + dwc2_is_host_mode(hcd) ? "Host" : + "Peripheral"); + msleep(100); + if (++count > 10000) + break; + } + if (count > 10000) + dev_err(hcd->dev, + "Connection id status change timed out"); + hcd->op_state = A_HOST; + + /* Initialize the Core for Host mode */ + dwc2_core_init(hcd); + dwc2_enable_global_interrupts(hcd); + dwc2_hcd_start(hcd); + } +} + +/** + * dwc2_handle_conn_id_status_change_intr() - Handles the Connector ID Status + * Change Interrupt + * + * @hcd: Programming view of DWC_otg controller + * + * Reads the OTG Interrupt Register (GOTCTL) to determine whether this is a + * Device to Host Mode transition or a Host to Device Mode transition. This only + * occurs when the cable is connected/removed from the PHY connector. + */ +int dwc2_handle_conn_id_status_change_intr(struct dwc2_hcd *hcd) +{ + /* + * Need to disable SOF interrupt immediately. If switching from device + * to host, the PCD interrupt handler won't handle the interrupt if + * host mode is already set. The HCD interrupt handler won't get + * called if the HCD state is HALT. This means that the interrupt does + * not get handled and Linux complains loudly. + */ + u32 gintmsk = readl(hcd->regs + GINTMSK); + + gintmsk &= ~GINTSTS_SOF; + writel(gintmsk, hcd->regs + GINTMSK); + + dev_dbg(hcd->dev, " ++Connector ID Status Change Interrupt++ (%s)\n", + dwc2_is_host_mode(hcd) ? "Host" : "Device"); + + /* + * Need to schedule a work, as there are possible DELAY function calls. + * Release lock before scheduling workq as it holds spinlock during + * scheduling. + */ + spin_unlock(&hcd->lock); + queue_work(hcd->wq_otg, &hcd->wf_otg); + spin_lock(&hcd->lock); + + /* Clear interrupt */ + writel(GINTSTS_ConIDStsChng, hcd->regs + GINTSTS); + + return 1; +} + +/** + * dwc2_handle_session_req_intr() - This interrupt indicates that a device is + * initiating the Session Request Protocol to request the host to turn on bus + * power so a new session can begin + * + * @hcd: Programming view of DWC_otg controller + * + * This handler responds by turning on bus power. If the DWC_otg controller is + * in low power mode, this handler brings the controller out of low power mode + * before turning on bus power. + */ +int dwc2_handle_session_req_intr(struct dwc2_hcd *hcd) +{ + dev_dbg(hcd->dev, "++Session Request Interrupt++\n"); + +#ifndef DWC_HOST_ONLY + if (dwc2_is_device_mode(hcd)) { + dev_info(hcd->dev, "SRP: Device mode\n"); + } else { + u32 hprt0; + + dev_info(hcd->dev, "SRP: Host mode\n"); + + /* Turn on the port power bit */ + hprt0 = dwc2_read_hprt0(hcd); + hprt0 |= HPRT0_PWR; + writel(hprt0, hcd->regs + HPRT0); + + /* + * Start the Connection timer. So a message can be displayed if + * connect does not occur within 10 seconds. + */ + dwc2_hcd_session_start(hcd); + } +#endif + + /* Clear interrupt */ + writel(GINTSTS_SessReqInt, hcd->regs + GINTSTS); + + return 1; +} + +void w_wakeup_detected(unsigned long data) +{ + struct dwc2_hcd *hcd = (struct dwc2_hcd *)data; + u32 hprt0; + + dev_dbg(hcd->dev, "%s()\n", __func__); + + /* + * Clear the Resume after 70ms. (Need 20 ms minimum. Use 70 ms + * so that OPT tests pass with all PHYs.) + */ + hprt0 = dwc2_read_hprt0(hcd); + dev_dbg(hcd->dev, "Resume: HPRT0=%0x\n", hprt0); + hprt0 &= ~HPRT0_RES; + writel(hprt0, hcd->regs + HPRT0); + dev_dbg(hcd->dev, "Clear Resume: HPRT0=%0x\n", + readl(hcd->regs + HPRT0)); + + dwc2_hcd_rem_wakeup(hcd); + + /* Change to L0 state */ + hcd->lx_state = DWC2_L0; +} + +/* + * This interrupt indicates that the DWC_otg controller has detected a + * resume or remote wakeup sequence. If the DWC_otg controller is in + * low power mode, the handler must brings the controller out of low + * power mode. The controller automatically begins resume signaling. + * The handler schedules a time to stop resume signaling. + */ +int dwc2_handle_wakeup_detected_intr(struct dwc2_hcd *hcd) +{ + dev_dbg(hcd->dev, "++Resume and Remote Wakeup Detected Interrupt++\n"); + dev_info(hcd->dev, "%s lxstate = %d\n", __func__, hcd->lx_state); + + if (dwc2_is_device_mode(hcd)) { + dev_dbg(hcd->dev, "DSTS=0x%0x\n", readl(hcd->regs + DSTS)); + if (hcd->lx_state == DWC2_L2) { + u32 dctl; + + /* Clear Remote Wakeup Signaling */ + dctl = readl(hcd->regs + DCTL); + dctl &= ~DCTL_RmtWkUpSig; + writel(dctl, hcd->regs + DCTL); + + spin_unlock(&hcd->lock); + dwc2_pcd_resume(hcd); + spin_lock(&hcd->lock); + } else { + u32 lpmcfg = readl(hcd->regs + GLPMCFG); + + lpmcfg &= ~((1 << 4) << GLPMCFG_HIRD_THRES_SHIFT); + writel(lpmcfg, hcd->regs + GLPMCFG); + } + /* Change to L0 state */ + hcd->lx_state = DWC2_L0; + } else { + if (hcd->lx_state != DWC2_L1) { + u32 pcgcctl = readl(hcd->regs + PCGCTL); + + /* Restart the Phy Clock */ + pcgcctl &= ~PCGCTL_STOPPCLK; + writel(pcgcctl, hcd->regs + PCGCTL); + mod_timer(&hcd->wkp_timer, + jiffies + msecs_to_jiffies(71)); + } else { + /* Change to L0 state */ + hcd->lx_state = DWC2_L0; + } + } + + /* Clear interrupt */ + writel(GINTSTS_WkUpInt, hcd->regs + GINTSTS); + + return 1; +} + +/* + * This interrupt indicates that a device has been disconnected from the + * root port + */ +int dwc2_handle_disconnect_intr(struct dwc2_hcd *hcd) +{ + dev_dbg(hcd->dev, "++Disconnect Detected Interrupt++ (%s) %s\n", + dwc2_is_host_mode(hcd) ? "Host" : "Device", op_state_str(hcd)); + + /* Todo: Consolidate this if statement */ +#ifndef DWC_HOST_ONLY + if (hcd->op_state == B_HOST) { + /* + * If in device mode Disconnect and stop the HCD, then start + * the PCD + */ + spin_unlock(&hcd->lock); + dwc2_hcd_disconnect(hcd); + dwc2_pcd_start(hcd); + spin_lock(&hcd->lock); + hcd->op_state = B_PERIPHERAL; + } else if (dwc2_is_device_mode(hcd)) { + u32 gotgctl = 0; + + gotgctl = readl(hcd->regs + GOTGCTL); + if (gotgctl & GOTGCTL_HSTSETHNPEN) { + /* + * Do nothing, if HNP in process the OTG "Host + * Negotiation Detected" interrupt will do the mode + * switch + */ + } else if (!(gotgctl & GOTGCTL_DEVHNPEN)) { + /* + * If in device mode Disconnect and stop the HCD, then + * start the PCD + */ + spin_unlock(&hcd->lock); + dwc2_hcd_disconnect(hcd); + dwc2_pcd_start(hcd); + spin_lock(&hcd->lock); + hcd->op_state = B_PERIPHERAL; + } else { + dev_dbg(hcd->dev, "!a_peripheral && !devhnpen\n"); + } + } else { + if (hcd->op_state == A_HOST) + /* A-Cable still connected but device disconnected */ + dwc2_hcd_disconnect(hcd); + } +#endif + /* Change to L3 (OFF) state */ + hcd->lx_state = DWC2_L3; + + writel(GINTSTS_DisconnInt, hcd->regs + GINTSTS); + return 1; +} + +/* + * This interrupt indicates that SUSPEND state has been detected on the USB. + * + * For HNP the USB Suspend interrupt signals the change from "a_peripheral" + * to "a_host". + * + * When power management is enabled the core will be put in low power mode. + */ +int dwc2_handle_usb_suspend_intr(struct dwc2_hcd *hcd) +{ + u32 dsts; + + dev_dbg(hcd->dev, "USB SUSPEND\n"); + + if (dwc2_is_device_mode(hcd)) { + /* + * Check the Device status register to determine if the Suspend + * state is active + */ + dsts = readl(hcd->regs + DSTS); + dev_dbg(hcd->dev, "DSTS=0x%0x\n", dsts); + dev_dbg(hcd->dev, + "DSTS.Suspend Status=%d HWCFG4.Power Optimize=%d\n", + !!(dsts & DSTS_SuspSts), + !!(hcd->hwcfg4 & GHWCFG4_POWER_OPTIMIZ)); + + /* + * PCD callback for suspend. Release the lock inside of callback + * function. + */ + dwc2_pcd_suspend(hcd); + } else { + if (hcd->op_state == A_PERIPHERAL) { + dev_dbg(hcd->dev, "a_peripheral->a_host\n"); + + /* Clear the a_peripheral flag, back to a_host */ + spin_unlock(&hcd->lock); + dwc2_pcd_stop(hcd); + dwc2_hcd_start(hcd); + spin_lock(&hcd->lock); + hcd->op_state = A_HOST; + } + } + + /* Change to L2 (suspend) state */ + hcd->lx_state = DWC2_L2; + + /* Clear interrupt */ + writel(GINTSTS_USBSusp, hcd->regs + GINTSTS); + + return 1; +} + +/* + * This function handles LPM transaction received interrupt + */ +static int handle_lpm_intr(struct dwc2_hcd *hcd) +{ + u32 lpmcfg; + + if (hcd->core_params->lpm_enable <= 0) + dev_info(hcd->dev, "Unexpected LPM interrupt\n"); + + lpmcfg = readl(hcd->regs + GLPMCFG); + dev_info(hcd->dev, "LPM config register = 0x%08x\n", lpmcfg); + + if (dwc2_is_host_mode(hcd)) { + dwc2_hcd_sleep(hcd); + } else { + lpmcfg |= (1 << 4) << GLPMCFG_HIRD_THRES_SHIFT; + writel(lpmcfg, hcd->regs + GLPMCFG); + } + + /* Examine prt_sleep_sts after TL1TokenTetry period max (10 us) */ + udelay(10); + lpmcfg = readl(hcd->regs + GLPMCFG); + if (lpmcfg & GLPMCFG_PRT_SLEEP_STS) { + /* Save the current state */ + hcd->lx_state = DWC2_L1; + } + + /* Clear interrupt */ + writel(GINTSTS_LPMTranRcvd, hcd->regs + GINTSTS); + return 1; +} + +#define GINTMSK_COMMON (GINTSTS_WkUpInt | GINTSTS_SessReqInt | \ + GINTSTS_ConIDStsChng | GINTSTS_OTGInt | \ + GINTSTS_ModeMis | GINTSTS_DisconnInt | \ + GINTSTS_USBSusp | GINTSTS_RestoreDone | \ + GINTSTS_PrtInt | GINTSTS_LPMTranRcvd) + +/* + * This function returns the Core Interrupt register + */ +static u32 read_common_intr(struct dwc2_hcd *hcd) +{ + u32 gintsts; + u32 gintmsk; + u32 gahbcfg; + u32 gintmsk_common = GINTMSK_COMMON; + + gintsts = readl(hcd->regs + GINTSTS); + gintmsk = readl(hcd->regs + GINTMSK); + gahbcfg = readl(hcd->regs + GAHBCFG); + +#ifdef DEBUG + /* If any common interrupts set */ + if (gintsts & gintmsk_common) + dev_dbg(hcd->dev, "gintsts=%08x gintmsk=%08x\n", + gintsts, gintmsk); +#endif + + if (gahbcfg & GAHBCFG_GlblIntrEn) + return gintsts & gintmsk & gintmsk_common; + else + return 0; +} + +/* + * Common interrupt handler + * + * The common interrupts are those that occur in both Host and Device mode. + * This handler handles the following interrupts: + * - Mode Mismatch Interrupt + * - Disconnect Interrupt + * - OTG Interrupt + * - Connector ID Status Change Interrupt + * - Session Request Interrupt + * - Resume / Remote Wakeup Detected Interrupt + * - LPM Transaction Received Interrupt + * - ADP Transaction Received Interrupt + */ +int dwc2_handle_common_intr(void *dev) +{ + struct dwc2_device *otg_dev = dev; + struct dwc2_hcd *hcd = otg_dev->hcd; + u32 gintsts; + int retval = 0; + + if (dwc2_check_core_status(hcd) < 0) { + dev_warn(hcd->dev, "Controller is disconnected"); + return retval; + } + + spin_lock(&hcd->lock); + + gintsts = read_common_intr(hcd); + + if (gintsts & GINTSTS_ModeMis) + retval |= dwc2_handle_mode_mismatch_intr(hcd); + if (gintsts & GINTSTS_OTGInt) + retval |= dwc2_handle_otg_intr(hcd); + if (gintsts & GINTSTS_ConIDStsChng) + retval |= dwc2_handle_conn_id_status_change_intr(hcd); + if (gintsts & GINTSTS_DisconnInt) + retval |= dwc2_handle_disconnect_intr(hcd); + if (gintsts & GINTSTS_SessReqInt) + retval |= dwc2_handle_session_req_intr(hcd); + if (gintsts & GINTSTS_WkUpInt) + retval |= dwc2_handle_wakeup_detected_intr(hcd); + if (gintsts & GINTSTS_USBSusp) + retval |= dwc2_handle_usb_suspend_intr(hcd); + if (gintsts & GINTSTS_LPMTranRcvd) + retval |= handle_lpm_intr(hcd); + + if (gintsts & GINTSTS_RestoreDone) { + gintsts = GINTSTS_RestoreDone; + writel(gintsts, hcd->regs + GINTSTS); + dev_info(hcd->dev, " --Restore done interrupt received--\n"); + retval |= 1; + } + + if ((gintsts & GINTSTS_PrtInt) && dwc2_is_device_mode(hcd)) { + /* + * The port interrupt occurs while in device mode with + * HPRT0 Port Enable/Disable + */ + gintsts = GINTSTS_PrtInt; + writel(gintsts, hcd->regs + GINTSTS); + retval |= 1; + } + + spin_unlock(&hcd->lock); + + return retval; +} diff --git a/drivers/usb/dwc2/hw.h b/drivers/usb/dwc2/hw.h new file mode 100644 index 0000000..328d56f --- /dev/null +++ b/drivers/usb/dwc2/hw.h @@ -0,0 +1,512 @@ +/* + * hw.h - DesignWare HS OTG Controller hardware definitions + * + * Copyright 2004-2012 Synopsys, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The names of the above-listed copyright holders may not be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * ALTERNATIVELY, this software may be distributed under the terms of the + * GNU General Public License ("GPL") as published by the Free Software + * Foundation; either version 2 of the License, or (at your option) any + * later version. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS + * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, + * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DWC_HW_H__ +#define __DWC_HW_H__ + +#include "s3c-hsotg.h" + +#define GOTGCTL_CHIRPEN (1 << 27) +#define GOTGCTL_MULTVALIDBC_MASK (0x1f << 22) +#define GOTGCTL_MULTVALIDBC_SHIFT 22 +#define GOTGCTL_OTGVER (1 << 20) +#define GOTGCTL_OTGVER_SHIFT 20 +#define GOTGCTL_HSTSETHNPEN (1 << 10) + +#define GAHBCFG_AHBSingle (1 << 23) +#define GAHBCFG_NotiAllDMAWrit (1 << 22) +#define GAHBCFG_RemMemSupp (1 << 21) +#define GAHBCFG_DMAEn_SHIFT 5 + +#define GUSBCFG_ForceDevMode (1 << 30) +#define GUSBCFG_ForceHostMode (1 << 29) +#define GUSBCFG_TxEndDelay (1 << 28) +#define GUSBCFG_ICTrafficPullRemove (1 << 27) +#define GUSBCFG_ICUSBCap (1 << 26) +#define GUSBCFG_ULPIIntProtDis (1 << 25) +#define GUSBCFG_IndicatorPassThrough (1 << 24) +#define GUSBCFG_IndicatorComplement (1 << 23) +#define GUSBCFG_TermSelDlPulse (1 << 22) +#define GUSBCFG_ULPIIntVBusInd (1 << 21) +#define GUSBCFG_ULPIExtVBusDrv (1 << 20) +#define GUSBCFG_ULPIClkSuspM (1 << 19) +#define GUSBCFG_ULPIAutoRes (1 << 18) +#define GUSBCFG_ULPIFSLS (1 << 17) +#define GUSBCFG_OTGUTMIFSSel (1 << 16) +#define GUSBCFG_USBTrdTim_MASK (0xf << 10) +#define GUSBCFG_USBTrdTim_SHIFT 10 +#define GUSBCFG_HNPCap_SHIFT 9 +#define GUSBCFG_SRPCap_SHIFT 8 +#define GUSBCFG_DDRSel (1 << 7) +#define GUSBCFG_PHYSel (1 << 6) +#define GUSBCFG_FSIntf (1 << 5) +#define GUSBCFG_ULPIUTMISel (1 << 4) +#define GUSBCFG_PHYIf16 (1 << 3) + +#define GINTSTS_LPMTranRcvd (1 << 27) +#define GINTSTS_ResetDet (1 << 23) +#define GINTSTS_IncomplIP (1 << 21) +#define GINTSTS_RestoreDone (1 << 16) +#define GINTSTS_I2CInt (1 << 9) +#define GINTSTS_ULPICkInt (1 << 8) +#define GINTSTS_CurMode_Host (1 << 0) + +#define GRXSTS_PktSts_HChIn (2 << 17) +#define GRXSTS_PktSts_HChIn_XFER_COMP (3 << 17) +#define GRXSTS_PktSts_DataToggleErr (5 << 17) +#define GRXSTS_PktSts_HChHalted (7 << 17) +#define GRXSTS_HChNum_MASK (0xf << 0) +#define GRXSTS_HChNum_SHIFT 0 + +#define FIFOSIZE_DEPTH_MASK (0xffff << 16) +#define FIFOSIZE_DEPTH_SHIFT 16 +#define FIFOSIZE_STARTADDR_MASK (0xffff << 0) +#define FIFOSIZE_STARTADDR_SHIFT 0 + +#define GI2CCTL HSOTG_REG(0x0030) +#define GI2CCTL_BSYDNE (1 << 31) +#define GI2CCTL_RW (1 << 30) +#define GI2CCTL_I2CDATSE0 (1 << 28) +#define GI2CCTL_I2CDEVADDR_MASK (0x3 << 26) +#define GI2CCTL_I2CDEVADDR_SHIFT 26 +#define GI2CCTL_I2CSUSPCTL (1 << 25) +#define GI2CCTL_ACK (1 << 24) +#define GI2CCTL_I2CEN (1 << 23) +#define GI2CCTL_ADDR_MASK (0x7f << 16) +#define GI2CCTL_ADDR_SHIFT 16 +#define GI2CCTL_REGADDR_MASK (0xff << 8) +#define GI2CCTL_REGADDR_SHIFT 8 +#define GI2CCTL_RWDATA_MASK (0xff << 0) +#define GI2CCTL_RWDATA_SHIFT 0 + +#define GPVNDCTL HSOTG_REG(0x0034) +#define GGPIO HSOTG_REG(0x0038) +#define GUID HSOTG_REG(0x003c) +#define GSNPSID HSOTG_REG(0x0040) +#define GHWCFG1 HSOTG_REG(0x0044) + +#define GHWCFG2 HSOTG_REG(0x0048) +#define GHWCFG2_OTG_ENABLE_IC_USB (1 << 31) +#define GHWCFG2_DEV_TOKEN_Q_DEPTH_MASK (0x1f << 26) +#define GHWCFG2_DEV_TOKEN_Q_DEPTH_SHIFT 26 +#define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_MASK (0x3 << 24) +#define GHWCFG2_HOST_PERIO_TX_Q_DEPTH_SHIFT 24 +#define GHWCFG2_NONPERIO_TX_Q_DEPTH_MASK (0x3 << 22) +#define GHWCFG2_NONPERIO_TX_Q_DEPTH_SHIFT 22 +#define GHWCFG2_MULTI_PROC_INT (1 << 20) +#define GHWCFG2_DYNAMIC_FIFO (1 << 19) +#define GHWCFG2_PERIO_EP_SUPPORTED (1 << 18) +#define GHWCFG2_NUM_HOST_CHAN_MASK (0xf << 14) +#define GHWCFG2_NUM_HOST_CHAN_SHIFT 14 +#define GHWCFG2_NUM_DEV_EP_MASK (0xf << 10) +#define GHWCFG2_NUM_DEV_EP_SHIFT 10 +#define GHWCFG2_FS_PHY_TYPE_MASK (0x3 << 8) +#define GHWCFG2_FS_PHY_TYPE_SHIFT 8 +#define GHWCFG2_FS_PHY_TYPE_NOT_SUPPORTED (0 << 8) +#define GHWCFG2_FS_PHY_TYPE_DEDICATED (1 << 8) +#define GHWCFG2_FS_PHY_TYPE_SHARED_UTMI (2 << 8) +#define GHWCFG2_FS_PHY_TYPE_SHARED_ULPI (3 << 8) +#define GHWCFG2_HS_PHY_TYPE_MASK (0x3 << 6) +#define GHWCFG2_HS_PHY_TYPE_SHIFT 6 +#define GHWCFG2_HS_PHY_TYPE_NOT_SUPPORTED (0 << 6) +#define GHWCFG2_HS_PHY_TYPE_UTMI (1 << 6) +#define GHWCFG2_HS_PHY_TYPE_ULPI (2 << 6) +#define GHWCFG2_HS_PHY_TYPE_UTMI_ULPI (3 << 6) +#define GHWCFG2_POINT2POINT (1 << 5) +#define GHWCFG2_ARCHITECTURE_MASK (0x3 << 3) +#define GHWCFG2_ARCHITECTURE_SHIFT 3 +#define GHWCFG2_SLAVE_ONLY_ARCH (0 << 3) +#define GHWCFG2_EXT_DMA_ARCH (1 << 3) +#define GHWCFG2_INT_DMA_ARCH (2 << 3) +#define GHWCFG2_OP_MODE_MASK (0x7 << 0) +#define GHWCFG2_OP_MODE_SHIFT 0 +#define GHWCFG2_OP_MODE_HNP_SRP_CAPABLE (0 << 0) +#define GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE (1 << 0) +#define GHWCFG2_OP_MODE_NO_HNP_SRP_CAPABLE (2 << 0) +#define GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE (3 << 0) +#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_DEVICE (4 << 0) +#define GHWCFG2_OP_MODE_SRP_CAPABLE_HOST (5 << 0) +#define GHWCFG2_OP_MODE_NO_SRP_CAPABLE_HOST (6 << 0) + +#define GHWCFG3 HSOTG_REG(0x004c) +#define GHWCFG3_DFIFO_DEPTH_MASK (0xffff << 16) +#define GHWCFG3_DFIFO_DEPTH_SHIFT 16 +#define GHWCFG3_OTG_LPM_EN (1 << 15) +#define GHWCFG3_BC_SUPPORT (1 << 14) +#define GHWCFG3_OTG_ENABLE_HSIC (1 << 13) +#define GHWCFG3_ADP_SUPP (1 << 12) +#define GHWCFG3_SYNCH_RESET_TYPE (1 << 11) +#define GHWCFG3_OPTIONAL_FEATURES (1 << 10) +#define GHWCFG3_VENDOR_CTRL_IF (1 << 9) +#define GHWCFG3_I2C (1 << 8) +#define GHWCFG3_OTG_FUNC (1 << 7) +#define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_MASK (0x7 << 4) +#define GHWCFG3_PACKET_SIZE_CNTR_WIDTH_SHIFT 4 +#define GHWCFG3_XFER_SIZE_CNTR_WIDTH_MASK (0xf << 0) +#define GHWCFG3_XFER_SIZE_CNTR_WIDTH_SHIFT 0 + +#define GHWCFG4 HSOTG_REG(0x0050) +#define GHWCFG4_DESC_DMA_DYN (1 << 31) +#define GHWCFG4_DESC_DMA (1 << 30) +#define GHWCFG4_NUM_IN_EPS_MASK (0xf << 26) +#define GHWCFG4_NUM_IN_EPS_SHIFT 26 +#define GHWCFG4_DED_FIFO_EN (1 << 25) +#define GHWCFG4_SESSION_END_FILT_EN (1 << 24) +#define GHWCFG4_B_VALID_FILT_EN (1 << 23) +#define GHWCFG4_A_VALID_FILT_EN (1 << 22) +#define GHWCFG4_VBUS_VALID_FILT_EN (1 << 21) +#define GHWCFG4_IDDIG_FILT_EN (1 << 20) +#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_MASK (0xf << 16) +#define GHWCFG4_NUM_DEV_MODE_CTRL_EP_SHIFT 16 +#define GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK (0x3 << 14) +#define GHWCFG4_UTMI_PHY_DATA_WIDTH_SHIFT 14 +#define GHWCFG4_XHIBER (1 << 7) +#define GHWCFG4_HIBER (1 << 6) +#define GHWCFG4_MIN_AHB_FREQ (1 << 5) +#define GHWCFG4_POWER_OPTIMIZ (1 << 4) +#define GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK (0xf << 0) +#define GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT 0 + +#define GLPMCFG HSOTG_REG(0x0054) +#define GLPMCFG_INV_SEL_HSIC (1 << 31) +#define GLPMCFG_INV_SEL_HSIC_SHIFT 31 +#define GLPMCFG_HSIC_CONNECT (1 << 30) +#define GLPMCFG_HSIC_CONNECT_SHIFT 30 +#define GLPMCFG_RETRY_COUNT_STS_MASK (0x7 << 25) +#define GLPMCFG_RETRY_COUNT_STS_SHIFT 25 +#define GLPMCFG_SEND_LPM (1 << 24) +#define GLPMCFG_RETRY_COUNT_MASK (0x7 << 21) +#define GLPMCFG_RETRY_COUNT_SHIFT 21 +#define GLPMCFG_LPM_CHAN_INDEX_MASK (0xf << 17) +#define GLPMCFG_LPM_CHAN_INDEX_SHIFT 17 +#define GLPMCFG_SLEEP_STATE_RESUMEOK (1 << 16) +#define GLPMCFG_PRT_SLEEP_STS (1 << 15) +#define GLPMCFG_LPM_RESP_MASK (0x3 << 13) +#define GLPMCFG_LPM_RESP_SHIFT 13 +#define GLPMCFG_HIRD_THRES_MASK (0x1f << 8) +#define GLPMCFG_HIRD_THRES_SHIFT 8 +#define GLPMCFG_HIRD_THRES_EN (0x10 << 8) +#define GLPMCFG_EN_UTMI_SLEEP (1 << 7) +#define GLPMCFG_REM_WKUP_EN (1 << 6) +#define GLPMCFG_HIRD_MASK (0xf << 2) +#define GLPMCFG_HIRD_SHIFT 2 +#define GLPMCFG_APPL_RESP (1 << 1) +#define GLPMCFG_APPL_RESP_SHIFT 1 +#define GLPMCFG_LPM_CAP_EN (1 << 0) + +#define GPWRDN HSOTG_REG(0x0058) +#define GPWRDN_MULT_VAL_ID_BC_MASK (0x1f << 24) +#define GPWRDN_MULT_VAL_ID_BC_SHIFT 24 +#define GPWRDN_ADP_INT (1 << 23) +#define GPWRDN_BSESSVLD (1 << 22) +#define GPWRDN_IDSTS (1 << 21) +#define GPWRDN_LINESTATE_MASK (0x3 << 19) +#define GPWRDN_LINESTATE_SHIFT 19 +#define GPWRDN_STS_CHGINT_MSK (1 << 18) +#define GPWRDN_STS_CHGINT (1 << 17) +#define GPWRDN_SRP_DET_MSK (1 << 16) +#define GPWRDN_SRP_DET (1 << 15) +#define GPWRDN_CONNECT_DET_MSK (1 << 14) +#define GPWRDN_CONNECT_DET (1 << 13) +#define GPWRDN_DISCONN_DET_MSK (1 << 12) +#define GPWRDN_DISCONN_DET (1 << 11) +#define GPWRDN_RST_DET_MSK (1 << 10) +#define GPWRDN_RST_DET (1 << 9) +#define GPWRDN_LNSTSCHG_MSK (1 << 8) +#define GPWRDN_LNSTSCHG (1 << 7) +#define GPWRDN_DIS_VBUS (1 << 6) +#define GPWRDN_PWRDNSWTCH (1 << 5) +#define GPWRDN_PWRDNRSTN (1 << 4) +#define GPWRDN_PWRDNCLMP (1 << 3) +#define GPWRDN_RESTORE (1 << 2) +#define GPWRDN_PMUACTV (1 << 1) +#define GPWRDN_PMUINTSEL (1 << 0) + +#define GDFIFOCFG HSOTG_REG(0x005c) +#define GDFIFOCFG_EPINFOBASE_MASK (0xffff << 16) +#define GDFIFOCFG_EPINFOBASE_SHIFT 16 +#define GDFIFOCFG_GDFIFOCFG_MASK (0xffff << 0) +#define GDFIFOCFG_GDFIFOCFG_SHIFT 0 + +#define ADPCTL HSOTG_REG(0x0060) +#define ADPCTL_AR_MASK (0x3 << 27) +#define ADPCTL_AR_SHIFT 27 +#define ADPCTL_ADP_TMOUT_INT_MSK (1 << 26) +#define ADPCTL_ADP_SNS_INT_MSK (1 << 25) +#define ADPCTL_ADP_PRB_INT_MSK (1 << 24) +#define ADPCTL_ADP_TMOUT_INT (1 << 23) +#define ADPCTL_ADP_SNS_INT (1 << 22) +#define ADPCTL_ADP_PRB_INT (1 << 21) +#define ADPCTL_ADPENA (1 << 20) +#define ADPCTL_ADPRES (1 << 19) +#define ADPCTL_ENASNS (1 << 18) +#define ADPCTL_ENAPRB (1 << 17) +#define ADPCTL_RTIM_MASK (0x7ff << 6) +#define ADPCTL_RTIM_SHIFT 6 +#define ADPCTL_PRB_PER_MASK (0x3 << 4) +#define ADPCTL_PRB_PER_SHIFT 4 +#define ADPCTL_PRB_DELTA_MASK (0x3 << 2) +#define ADPCTL_PRB_DELTA_SHIFT 2 +#define ADPCTL_PRB_DSCHRG_MASK (0x3 << 0) +#define ADPCTL_PRB_DSCHRG_SHIFT 0 + +/* Host Mode Registers */ + +#define HCFG HSOTG_REG(0x0400) + +#define HCFG_MODECHTIMEN (1 << 31) +#define HCFG_MODECHTIMEN_SHIFT 31 +#define HCFG_PERSCHEDENA (1 << 26) +#define HCFG_FRLISTEN_MASK (0x3 << 24) +#define HCFG_FRLISTEN_SHIFT 24 +#define HCFG_DESCDMA (1 << 23) +#define HCFG_RESVALID_MASK (0xff << 8) +#define HCFG_RESVALID_SHIFT 8 +#define HCFG_ENA32KHZ (1 << 7) +#define HCFG_FSLSSUPP (1 << 2) +#define HCFG_FSLSPCLKSEL_MASK (0x3 << 0) +#define HCFG_FSLSPCLKSEL_SHIFT 0 +#define HCFG_FSLSPCLKSEL_30_60_MHZ (0 << 0) +#define HCFG_FSLSPCLKSEL_48_MHZ (1 << 0) +#define HCFG_FSLSPCLKSEL_6_MHZ (2 << 0) + +#define HFIR HSOTG_REG(0x0404) + +#define HFIR_FRINT_MASK (0xffff << 0) +#define HFIR_FRINT_SHIFT 0 +#define HFIR_RLDCTRL (1 << 16) +#define HFIR_RLDCTRL_SHIFT 16 + +#define HFNUM HSOTG_REG(0x0408) + +#define HFNUM_FRREM_MASK (0xffff << 16) +#define HFNUM_FRREM_SHIFT 16 +#define HFNUM_FRNUM_MASK (0xffff << 0) +#define HFNUM_FRNUM_SHIFT 0 + +#define HFNUM_MAX_FRNUM 0x3fff + +#define HPTXSTS HSOTG_REG(0x0410) + +#define TXSTS_QTOP_ODD (1 << 31) +#define TXSTS_QTOP_CHNEP_MASK (0xf << 27) +#define TXSTS_QTOP_CHNEP_SHIFT 27 +#define TXSTS_QTOP_TOKEN_MASK (0x3 << 25) +#define TXSTS_QTOP_TOKEN_SHIFT 25 +#define TXSTS_QTOP_TERMINATE (1 << 24) +#define TXSTS_QSPCAVAIL_MASK (0xff << 16) +#define TXSTS_QSPCAVAIL_SHIFT 16 +#define TXSTS_FSPCAVAIL_MASK (0xffff << 0) +#define TXSTS_FSPCAVAIL_SHIFT 0 + +#define HAINT HSOTG_REG(0x0414) +#define HAINTMSK HSOTG_REG(0x0418) +#define HFLBADDR HSOTG_REG(0x041c) + +#define HPRT0 HSOTG_REG(0x0440) + +#define HPRT0_SPD_MASK (0x3 << 17) +#define HPRT0_SPD_SHIFT 17 +#define HPRT0_SPD_HIGH_SPEED (0 << 17) +#define HPRT0_SPD_FULL_SPEED (1 << 17) +#define HPRT0_SPD_LOW_SPEED (2 << 17) +#define HPRT0_TSTCTL_MASK (0xf << 13) +#define HPRT0_TSTCTL_SHIFT 13 +#define HPRT0_PWR (1 << 12) +#define HPRT0_PWR_SHIFT 12 +#define HPRT0_LNSTS_MASK (0x3 << 10) +#define HPRT0_LNSTS_SHIFT 10 +#define HPRT0_RST (1 << 8) +#define HPRT0_SUSP (1 << 7) +#define HPRT0_SUSP_SHIFT 7 +#define HPRT0_RES (1 << 6) +#define HPRT0_RES_SHIFT 6 +#define HPRT0_OVRCURRCHG (1 << 5) +#define HPRT0_OVRCURRACT (1 << 4) +#define HPRT0_ENACHG (1 << 3) +#define HPRT0_ENA (1 << 2) +#define HPRT0_CONNDET (1 << 1) +#define HPRT0_CONNSTS (1 << 0) + +#define HCCHAR(ch) HSOTG_REG(0x0500 + 0x20 * (ch)) + +#define HCCHAR_CHENA (1 << 31) +#define HCCHAR_CHDIS (1 << 30) +#define HCCHAR_ODDFRM (1 << 29) +#define HCCHAR_DEVADDR_MASK (0x7f << 22) +#define HCCHAR_DEVADDR_SHIFT 22 +#define HCCHAR_MULTICNT_MASK (0x3 << 20) +#define HCCHAR_MULTICNT_SHIFT 20 +#define HCCHAR_EPTYPE_MASK (0x3 << 18) +#define HCCHAR_EPTYPE_SHIFT 18 +#define HCCHAR_LSPDDEV (1 << 17) +#define HCCHAR_EPDIR (1 << 15) +#define HCCHAR_EPDIR_SHIFT 15 +#define HCCHAR_EPNUM_MASK (0xf << 11) +#define HCCHAR_EPNUM_SHIFT 11 +#define HCCHAR_MPS_MASK (0x7ff << 0) +#define HCCHAR_MPS_SHIFT 0 + +#define HCSPLT(ch) HSOTG_REG(0x0504 + 0x20 * (ch)) + +#define HCSPLT_SPLTENA (1 << 31) +#define HCSPLT_COMPSPLT (1 << 16) +#define HCSPLT_COMPSPLT_SHIFT 16 +#define HCSPLT_XACTPOS_MASK (0x3 << 14) +#define HCSPLT_XACTPOS_SHIFT 14 +#define HCSPLT_XACTPOS_MID (0 << 14) +#define HCSPLT_XACTPOS_END (1 << 14) +#define HCSPLT_XACTPOS_BEGIN (2 << 14) +#define HCSPLT_XACTPOS_ALL (3 << 14) +#define HCSPLT_HUBADDR_MASK (0x7f << 7) +#define HCSPLT_HUBADDR_SHIFT 7 +#define HCSPLT_PRTADDR_MASK (0x7f << 0) +#define HCSPLT_PRTADDR_SHIFT 0 + +#define HCINT(ch) HSOTG_REG(0x0508 + 0x20 * (ch)) +#define HCINTMSK(ch) HSOTG_REG(0x050c + 0x20 * (ch)) + +#define HCINTMSK_RESERVED14_31 (0x3ffff << 14) +#define HCINTMSK_FRM_LIST_ROLL (1 << 13) +#define HCINTMSK_XCS_XACT (1 << 12) +#define HCINTMSK_BNA (1 << 11) +#define HCINTMSK_DATATGLERR (1 << 10) +#define HCINTMSK_FRMOVRUN (1 << 9) +#define HCINTMSK_BBLERR (1 << 8) +#define HCINTMSK_XACTERR (1 << 7) +#define HCINTMSK_NYET (1 << 6) +#define HCINTMSK_ACK (1 << 5) +#define HCINTMSK_NAK (1 << 4) +#define HCINTMSK_STALL (1 << 3) +#define HCINTMSK_AHBERR (1 << 2) +#define HCINTMSK_CHHLTD (1 << 1) +#define HCINTMSK_XFERCOMPL (1 << 0) + +#define HCTSIZ(ch) HSOTG_REG(0x0510 + 0x20 * (ch)) + +#define TSIZ_DOPNG (1 << 31) +#define TSIZ_SC_MC_PID_MASK (0x3 << 29) +#define TSIZ_SC_MC_PID_SHIFT 29 +#define TSIZ_SC_MC_PID_DATA0 (0 << 29) +#define TSIZ_SC_MC_PID_DATA2 (1 << 29) +#define TSIZ_SC_MC_PID_DATA1 (2 << 29) +#define TSIZ_SC_MC_PID_MDATA (3 << 29) +#define TSIZ_SC_MC_PID_SETUP (3 << 29) +#define TSIZ_PKTCNT_MASK (0x3ff << 19) +#define TSIZ_PKTCNT_SHIFT 19 +#define TSIZ_NTD_MASK (0xff << 8) +#define TSIZ_NTD_SHIFT 8 +#define TSIZ_SCHINFO_MASK (0xff << 0) +#define TSIZ_SCHINFO_SHIFT 0 +#define TSIZ_XFERSIZE_MASK (0x7ffff << 0) +#define TSIZ_XFERSIZE_SHIFT 0 + +#define HCDMA(ch) HSOTG_REG(0x0514 + 0x20 * (ch)) + +#define HCDMA_DMA_ADDR_MASK (0x1fffff << 11) +#define HCDMA_DMA_ADDR_SHIFT 11 +#define HCDMA_CTD_MASK (0xff << 3) +#define HCDMA_CTD_SHIFT 3 + +#define HCDMAB(ch) HSOTG_REG(0x051c + 0x20 * (ch)) + +#define HCFIFO(ch) HSOTG_REG(0x1000 + 0x1000 * (ch)) + +#define PCGCTL HSOTG_REG(0x0e00) + +#define PCGCTL_IF_DEV_MODE (1 << 31) +#define PCGCTL_P2HD_PRT_SPD_MASK (0x3 << 29) +#define PCGCTL_P2HD_PRT_SPD_SHIFT 29 +#define PCGCTL_P2HD_DEV_ENUM_SPD_MASK (0x3 << 27) +#define PCGCTL_P2HD_DEV_ENUM_SPD_SHIFT 27 +#define PCGCTL_MAC_DEV_ADDR_MASK (0x7f << 20) +#define PCGCTL_MAC_DEV_ADDR_SHIFT 20 +#define PCGCTL_MAX_TERMSEL (1 << 19) +#define PCGCTL_MAX_XCVRSELECT_MASK (0x3 << 17) +#define PCGCTL_MAX_XCVRSELECT_SHIFT 17 +#define PCGCTL_PORT_POWER (1 << 16) +#define PCGCTL_PRT_CLK_SEL_MASK (0x3 << 14) +#define PCGCTL_PRT_CLK_SEL_SHIFT 14 +#define PCGCTL_ESS_REG_RESTORED (1 << 13) +#define PCGCTL_EXTND_HIBER_SWITCH (1 << 12) +#define PCGCTL_EXTND_HIBER_PWRCLMP (1 << 11) +#define PCGCTL_ENBL_EXTND_HIBER (1 << 10) +#define PCGCTL_RESTOREMODE (1 << 9) +#define PCGCTL_RESETAFTSUSP (1 << 8) +#define PCGCTL_DEEP_SLEEP (1 << 7) +#define PCGCTL_PHY_IN_SLEEP (1 << 6) +#define PCGCTL_ENBL_SLEEP_GATING (1 << 5) +#define PCGCTL_RSTPDWNMODULE (1 << 3) +#define PCGCTL_PWRCLMP (1 << 2) +#define PCGCTL_GATEHCLK (1 << 1) +#define PCGCTL_STOPPCLK (1 << 0) + +/* + * Host-mode DMA Descriptor structure + * + * DMA Descriptor structure contains two quadlets: + * Status quadlet and Data buffer pointer. + */ +struct dwc2_host_dma_desc { + /* DMA Descriptor status quadlet */ + u32 status; + + /* DMA Descriptor data buffer pointer */ + u32 buf; +}; + +#define HOST_DMA_A (1 << 31) +#define HOST_DMA_STS_MASK (0x3 << 28) +#define HOST_DMA_STS_SHIFT 28 +#define HOST_DMA_STS_PKTERR (1 << 28) +#define HOST_DMA_EOL (1 << 26) +#define HOST_DMA_IOC (1 << 25) +#define HOST_DMA_SUP (1 << 24) +#define HOST_DMA_ALT_QTD (1 << 23) +#define HOST_DMA_QTD_OFFSET_MASK (0x3f << 17) +#define HOST_DMA_QTD_OFFSET_SHIFT 17 +#define HOST_DMA_ISOC_NBYTES_MASK (0xfff << 0) +#define HOST_DMA_ISOC_NBYTES_SHIFT 0 +#define HOST_DMA_NBYTES_MASK (0x1ffff << 0) +#define HOST_DMA_NBYTES_SHIFT 0 + +#define MAX_DMA_DESC_SIZE 131071 +#define MAX_DMA_DESC_NUM_GENERIC 64 +#define MAX_DMA_DESC_NUM_HS_ISOC 256 + +#define MAX_FRLIST_EN_NUM 64 + +#endif /* __DWC_HW_H__ */ -- 1.7.1 -- To unsubscribe from this list: send the line "unsubscribe linux-usb" in the body of a message to majordomo@xxxxxxxxxxxxxxx More majordomo info at http://vger.kernel.org/majordomo-info.html