Re: [PATCH 7/8] usb: cdnsp: cdns3 Add main part of Cadence USBSSP DRD Driver

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On Mon, 2020-09-28 at 14:27 +0200, Pawel Laszczak wrote:
> This patch introduces the main part of Cadence USBSSP DRD driver
> to Linux kernel.
> To reduce the patch size a little bit, the header file gadget.h was
> intentionally added as separate patch.
> 
> The Cadence USBSSP DRD Controller is a highly configurable IP Core which
> can be instantiated as Dual-Role Device (DRD), Peripheral Only and
> Host Only (XHCI)configurations.
> 
> The current driver has been validated with FPGA platform. We have
> support for PCIe bus, which is used on FPGA prototyping.
> 
> The host side of USBSS DRD controller is compliant with XHCI.
> The architecture for device side is almost the same as for host side,
> and most of the XHCI specification can be used to understand how
> this controller operates.
> 
> Signed-off-by: Pawel Laszczak <pawell@xxxxxxxxxxx>
> ---
>  drivers/usb/Kconfig               |    1 +
>  drivers/usb/Makefile              |    1 +
>  drivers/usb/cdns3/core.c          |   19 +-
>  drivers/usb/cdns3/drd.c           |   28 +
>  drivers/usb/cdns3/drd.h           |    2 +
>  drivers/usb/cdns3/gadget-export.h |   18 +-
>  drivers/usb/cdns3/host-export.h   |    4 +-
>  drivers/usb/cdnsp/Kconfig         |   40 +
>  drivers/usb/cdnsp/Makefile        |    7 +
>  drivers/usb/cdnsp/cdnsp-pci.c     |  247 +++
>  drivers/usb/cdnsp/ep0.c           |  480 ++++++
>  drivers/usb/cdnsp/gadget.c        | 1946 ++++++++++++++++++++++++
>  drivers/usb/cdnsp/gadget.h        |  139 ++
>  drivers/usb/cdnsp/mem.c           | 1312 ++++++++++++++++
>  drivers/usb/cdnsp/ring.c          | 2363 +++++++++++++++++++++++++++++
>  15 files changed, 6600 insertions(+), 7 deletions(-)
>  create mode 100644 drivers/usb/cdnsp/Kconfig
>  create mode 100644 drivers/usb/cdnsp/Makefile
>  create mode 100644 drivers/usb/cdnsp/cdnsp-pci.c
>  create mode 100644 drivers/usb/cdnsp/ep0.c
>  create mode 100644 drivers/usb/cdnsp/gadget.c
>  create mode 100644 drivers/usb/cdnsp/mem.c
>  create mode 100644 drivers/usb/cdnsp/ring.c
> 
> diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
> index 26475b409b53..555c4a4cb465 100644
> --- a/drivers/usb/Kconfig
> +++ b/drivers/usb/Kconfig
> @@ -112,6 +112,7 @@ source "drivers/usb/usbip/Kconfig"
>  endif
>  
>  source "drivers/usb/cdns3/Kconfig"
> +source "drivers/usb/cdnsp/Kconfig"
>  
>  source "drivers/usb/mtu3/Kconfig"
>  
> diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
> index 1c1c1d659394..84727f7a4b92 100644
> --- a/drivers/usb/Makefile
> +++ b/drivers/usb/Makefile
> @@ -14,6 +14,7 @@ obj-$(CONFIG_USB_DWC2)		+= dwc2/
>  obj-$(CONFIG_USB_ISP1760)	+= isp1760/
>  
>  obj-$(CONFIG_USB_CDNS3)		+= cdns3/
> +obj-$(CONFIG_USB_CDNSP)		+= cdnsp/
>  
>  obj-$(CONFIG_USB_MON)		+= mon/
>  obj-$(CONFIG_USB_MTU3)		+= mtu3/
> diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c
> index 2af99294beaa..560783092d8a 100644
> --- a/drivers/usb/cdns3/core.c
> +++ b/drivers/usb/cdns3/core.c
> @@ -138,7 +138,14 @@ static int cdns_core_init_role(struct cdns *cdns)
>  	dr_mode = best_dr_mode;
>  
>  	if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
> -		ret = cdns_host_init(cdns);
> +		if ((cdns->version == CDNSP_CONTROLLER_V2 &&
> +		     IS_ENABLED(CONFIG_USB_CDNSP_HOST)) ||
> +		    (cdns->version < CDNSP_CONTROLLER_V2 &&
> +		     IS_ENABLED(CONFIG_USB_CDNS3_HOST)))
> +			ret = cdns_host_init(cdns);
> +		else
> +			ret = -ENXIO;
> +
>  		if (ret) {
>  			dev_err(dev, "Host initialization failed with %d\n",
>  				ret);
> @@ -147,7 +154,15 @@ static int cdns_core_init_role(struct cdns *cdns)
>  	}
>  
>  	if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
> -		ret = cdns3_gadget_init(cdns);
> +		if (cdns->version == CDNSP_CONTROLLER_V2 &&
> +		    IS_ENABLED(CONFIG_USB_CDNSP_GADGET))
> +			ret = cdnsp_gadget_init(cdns);
> +		else if (cdns->version < CDNSP_CONTROLLER_V2 &&
> +			 IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
> +			ret = cdns3_gadget_init(cdns);
> +		else
> +			ret = -ENXIO;
> +
>  		if (ret) {
>  			dev_err(dev, "Device initialization failed with %d\n",
>  				ret);
> diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c
> index 7feb622972da..3c732e19c61c 100644
> --- a/drivers/usb/cdns3/drd.c
> +++ b/drivers/usb/cdns3/drd.c
> @@ -90,6 +90,32 @@ int cdns_get_vbus(struct cdns *cdns)
>  	return vbus;
>  }
>  
> +void cdns_clear_vbus(struct cdns *cdns)
> +{
> +	u32 reg;
> +
> +	if (cdns->version != CDNSP_CONTROLLER_V2)
> +		return;
> +
> +	reg = readl(&cdns->otg_cdnsp_regs->override);
> +	reg |= OVERRIDE_SESS_VLD_SEL;
> +	writel(reg, &cdns->otg_cdnsp_regs->override);
> +}
> +EXPORT_SYMBOL_GPL(cdns_clear_vbus);
> +
> +void cdns_set_vbus(struct cdns *cdns)
> +{
> +	u32 reg;
> +
> +	if (cdns->version != CDNSP_CONTROLLER_V2)
> +		return;
> +
> +	reg = readl(&cdns->otg_cdnsp_regs->override);
> +	reg &= ~OVERRIDE_SESS_VLD_SEL;
> +	writel(reg, &cdns->otg_cdnsp_regs->override);
Is this use to force vbus-valid signal always valid? if it is,
is there any issue if work as device only mode?

> +}
> +EXPORT_SYMBOL_GPL(cdns_set_vbus);
> +
>  bool cdns_is_host(struct cdns *cdns)
>  {
>  	if (cdns->dr_mode == USB_DR_MODE_HOST)
> @@ -431,5 +457,7 @@ int cdns_drd_init(struct cdns *cdns)
>  int cdns_drd_exit(struct cdns *cdns)
>  {
>  	cdns_otg_disable_irq(cdns);
> +	devm_free_irq(cdns->dev, cdns->otg_irq, cdns);
> +
>  	return 0;
>  }
> diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h
> index b92e2834dc3f..7ef14bef047c 100644
> --- a/drivers/usb/cdns3/drd.h
> +++ b/drivers/usb/cdns3/drd.h
> @@ -204,6 +204,8 @@ bool cdns_is_host(struct cdns *cdns);
>  bool cdns_is_device(struct cdns *cdns);
>  int cdns_get_id(struct cdns *cdns);
>  int cdns_get_vbus(struct cdns *cdns);
> +extern void cdns_clear_vbus(struct cdns *cdns);
> +extern void cdns_set_vbus(struct cdns *cdns);
>  int cdns_drd_init(struct cdns *cdns);
>  int cdns_drd_exit(struct cdns *cdns);
>  int cdns_drd_update_mode(struct cdns *cdns);
> diff --git a/drivers/usb/cdns3/gadget-export.h b/drivers/usb/cdns3/gadget-export.h
> index e784584fe053..b7eec9fb8fda 100644
> --- a/drivers/usb/cdns3/gadget-export.h
> +++ b/drivers/usb/cdns3/gadget-export.h
> @@ -1,6 +1,6 @@
>  /* SPDX-License-Identifier: GPL-2.0 */
>  /*
> - * Cadence USBSS DRD Driver - Gadget Export APIs.
> + * Cadence USBSS and USBSSP DRD Driver - Gadget Export APIs.
>   *
>   * Copyright (C) 2017 NXP
>   * Copyright (C) 2017-2018 NXP
> @@ -10,7 +10,19 @@
>  #ifndef __LINUX_CDNS3_GADGET_EXPORT
>  #define __LINUX_CDNS3_GADGET_EXPORT
>  
> -#ifdef CONFIG_USB_CDNS3_GADGET
> +#if IS_ENABLED(CONFIG_USB_CDNSP_GADGET)
> +
> +extern int cdnsp_gadget_init(struct cdns *cdns);
> +#else
> +
> +static inline int cdnsp_gadget_init(struct cdns *cdns)
> +{
> +	return -ENXIO;
> +}
> +
> +#endif  /* CONFIG_USB_CDNSP_GADGET */
> +
> +#if IS_ENABLED(CONFIG_USB_CDNS3_GADGET)
>  
>  extern int cdns3_gadget_init(struct cdns *cdns);
>  void cdns3_gadget_exit(struct cdns *cdns);
> @@ -23,6 +35,6 @@ static inline int cdns3_gadget_init(struct cdns *cdns)
>  
>  static inline void cdns3_gadget_exit(struct cdns *cdns) { }
>  
> -#endif
> +#endif /* CONFIG_USB_CDNS3_GADGET */
>  
>  #endif /* __LINUX_CDNS3_GADGET_EXPORT */
> diff --git a/drivers/usb/cdns3/host-export.h b/drivers/usb/cdns3/host-export.h
> index d82b83d070ad..41f7ea1fed29 100644
> --- a/drivers/usb/cdns3/host-export.h
> +++ b/drivers/usb/cdns3/host-export.h
> @@ -9,7 +9,7 @@
>  #ifndef __LINUX_CDNS3_HOST_EXPORT
>  #define __LINUX_CDNS3_HOST_EXPORT
>  
> -#ifdef CONFIG_USB_CDNS3_HOST
> +#if  IS_ENABLED(CONFIG_USB_CDNS3_HOST) || IS_ENABLED(CONFIG_USB_CDNSP_GADGET)
>  
>  int cdns_host_init(struct cdns *cdns);
>  
> @@ -22,6 +22,6 @@ static inline int cdns_host_init(struct cdns *cdns)
>  
>  static inline void cdns_host_exit(struct cdns *cdns) { }
>  
> -#endif /* CONFIG_USB_CDNS3_HOST */
> +#endif /* CONFIG_USB_CDNS3_HOST || CONFIG_USB_CDNSP_GADGET */
>  
>  #endif /* __LINUX_CDNS3_HOST_EXPORT */
> diff --git a/drivers/usb/cdnsp/Kconfig b/drivers/usb/cdnsp/Kconfig
> new file mode 100644
> index 000000000000..56cee5f6dfb4
> --- /dev/null
> +++ b/drivers/usb/cdnsp/Kconfig
> @@ -0,0 +1,40 @@
> +config USB_CDNSP_PCI
> +	tristate "Cadence CDNSP Dual-Role Controller"
> +	depends on USB_SUPPORT && (USB || USB_GADGET) && HAS_DMA && USB_PCI && ACPI
> +	select USB_XHCI_PLATFORM if USB_XHCI_HCD
> +	select USB_ROLE_SWITCH
> +	select CDNS_USB_COMMON
> +	help
> +	  Say Y here if your system has a Cadence CDNSP dual-role controller.
> +	  It supports: dual-role switch Host-only, and Peripheral-only.
> +
> +	  If you choose to build this driver is a dynamically linked
> +	  module, the module will be called cdnsp.ko.
> +
> +if USB_CDNSP_PCI
> +
> +config USB_CDNSP_GADGET
> +	bool "Cadence CDNSP device controller"
> +	depends on USB_GADGET=y || USB_GADGET=USB_CDNSP_PCI
> +	help
> +	  Say Y here to enable device controller functionality of the
> +	  Cadence CDNSP-DEV driver.
> +
> +	  Cadence CDNSP Device Controller in device mode is
> +	  very similar to XHCI controller. Therefore some algorithms
> +	  used has been taken from host driver.
> +	  This controller supports FF, HS, SS and SSP mode.
> +	  It doesn't support LS.
> +
> +config USB_CDNSP_HOST
> +	bool "Cadence CDNSP host controller"
> +	depends on USB=y || USB=USB_CDNSP_PCI
> +	select CDNS_USB_HOST
> +	help
> +	  Say Y here to enable host controller functionality of the
> +	  Cadence driver.
> +
> +	  Host controller is compliant with XHCI so it uses
> +	  standard XHCI driver.
> +
> +endif
> diff --git a/drivers/usb/cdnsp/Makefile b/drivers/usb/cdnsp/Makefile
> new file mode 100644
> index 000000000000..53202b21a8d2
> --- /dev/null
> +++ b/drivers/usb/cdnsp/Makefile
> @@ -0,0 +1,7 @@
> +# SPDX-License-Identifier: GPL-2.0
> +
> +cdnsp-udc-pci-y					:= cdnsp-pci.o
> +
> +obj-$(CONFIG_USB_CDNSP_PCI) 			+= cdnsp-udc-pci.o
> +cdnsp-udc-pci-$(CONFIG_USB_CDNSP_GADGET)	+= ring.o gadget.o mem.o ep0.o
> +
> diff --git a/drivers/usb/cdnsp/cdnsp-pci.c b/drivers/usb/cdnsp/cdnsp-pci.c
> new file mode 100644
> index 000000000000..f67ee8effcd3
> --- /dev/null
> +++ b/drivers/usb/cdnsp/cdnsp-pci.c
> @@ -0,0 +1,247 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Cadence PCI Glue driver.
> + *
> + * Copyright (C) 2019 Cadence.
> + *
> + * Author: Pawel Laszczak <pawell@xxxxxxxxxxx>
> + *
> + */
> +
> +#include <linux/platform_device.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/kernel.h>
> +#include <linux/module.h>
> +#include <linux/slab.h>
> +#include <linux/pci.h>
> +
> +#include "../cdns3/core.h"
> +
> +#define PCI_BAR_HOST		0
> +#define PCI_BAR_OTG		0
> +#define PCI_BAR_DEV		2
> +
> +#define PCI_DEV_FN_HOST_DEVICE	0
> +#define PCI_DEV_FN_OTG		1
> +
> +#define PCI_DRIVER_NAME		"cdns-pci-usbssp"
> +#define PLAT_DRIVER_NAME	"cdns-usbssp"
> +
> +#define CDNS_VENDOR_ID		0x17cd
> +#define CDNS_DEVICE_ID		0x0100
> +#define CDNS_DRD_IF		(PCI_CLASS_SERIAL_USB << 8 | 0x80)
> +
> +static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
> +{
> +	struct pci_dev *func;
> +
> +	/*
> +	 * Gets the second function.
> +	 * It's little tricky, but this platform has two function.
> +	 * The fist keeps resources for Host/Device while the second
> +	 * keeps resources for DRD/OTG.
> +	 */
> +	func = pci_get_device(pdev->vendor, pdev->device, NULL);
> +	if (!func)
> +		return NULL;
> +
> +	if (func->devfn == pdev->devfn) {
> +		func = pci_get_device(pdev->vendor, pdev->device, func);
> +		if (!func)
> +			return NULL;
> +	}
> +
> +	return func;
> +}
> +
> +static int cdnsp_pci_probe(struct pci_dev *pdev,
> +			   const struct pci_device_id *id)
> +{
> +	struct device *dev = &pdev->dev;
> +	struct pci_dev *func;
> +	struct resource *res;
> +	struct cdns *cdnsp;
> +	int ret;
> +
> +	/*
> +	 * For GADGET/HOST PCI (devfn) function number is 0,
> +	 * for OTG PCI (devfn) function number is 1.
> +	 */
> +	if (!id || (pdev->devfn != PCI_DEV_FN_HOST_DEVICE &&
> +		    pdev->devfn != PCI_DEV_FN_OTG))
> +		return -EINVAL;
> +
> +	func = cdnsp_get_second_fun(pdev);
> +	if (!func)
> +		return -EINVAL;
> +
> +	if (func->class == PCI_CLASS_SERIAL_USB_XHCI ||
> +	    pdev->class == PCI_CLASS_SERIAL_USB_XHCI) {
> +		ret = -EINVAL;
> +		goto put_pci;
> +	}
> +
> +	ret = pcim_enable_device(pdev);
> +	if (ret) {
> +		dev_err(&pdev->dev, "Enabling PCI device has failed %d\n", ret);
> +		goto put_pci;
> +	}
> +
> +	pci_set_master(pdev);
> +	if (pci_is_enabled(func)) {
> +		cdnsp = pci_get_drvdata(func);
> +	} else {
> +		cdnsp = kzalloc(sizeof(*cdnsp), GFP_KERNEL);
> +		if (!cdnsp) {
> +			ret = -ENOMEM;
> +			goto disable_pci;
> +		}
> +	}
> +
> +	/* For GADGET device function number is 0. */
> +	if (pdev->devfn == 0) {
> +		resource_size_t rsrc_start, rsrc_len;
> +
> +		/* Function 0: host(BAR_0) + device(BAR_1).*/
> +		dev_dbg(dev, "Initialize resources\n");
> +		rsrc_start = pci_resource_start(pdev, PCI_BAR_DEV);
> +		rsrc_len = pci_resource_len(pdev, PCI_BAR_DEV);
> +		res = devm_request_mem_region(dev, rsrc_start, rsrc_len, "dev");
> +		if (!res) {
> +			dev_dbg(dev, "controller already in use\n");
> +			ret = -EBUSY;
> +			goto free_cdnsp;
> +		}
> +
> +		cdnsp->dev_regs = devm_ioremap(dev, rsrc_start, rsrc_len);
> +		if (!cdnsp->dev_regs) {
> +			dev_dbg(dev, "error mapping memory\n");
> +			ret = -EFAULT;
> +			goto free_cdnsp;
> +		}
> +
> +		cdnsp->dev_irq = pdev->irq;
> +		dev_dbg(dev, "USBSS-DEV physical base addr: %pa\n",
> +			&rsrc_start);
> +
> +		res = &cdnsp->xhci_res[0];
> +		res->start = pci_resource_start(pdev, PCI_BAR_HOST);
> +		res->end = pci_resource_end(pdev, PCI_BAR_HOST);
> +		res->name = "xhci";
> +		res->flags = IORESOURCE_MEM;
> +		dev_dbg(dev, "USBSS-XHCI physical base addr: %pa\n",
> +			&res->start);
> +
> +		/* Interrupt for XHCI, */
> +		res = &cdnsp->xhci_res[1];
> +		res->start = pdev->irq;
> +		res->name = "host";
> +		res->flags = IORESOURCE_IRQ;
> +	} else {
> +		res = &cdnsp->otg_res;
> +		res->start = pci_resource_start(pdev, PCI_BAR_OTG);
> +		res->end =   pci_resource_end(pdev, PCI_BAR_OTG);
> +		res->name = "otg";
> +		res->flags = IORESOURCE_MEM;
> +		dev_dbg(dev, "CDNSP-DRD physical base addr: %pa\n",
> +			&res->start);
> +
> +		/* Interrupt for OTG/DRD. */
> +		cdnsp->otg_irq = pdev->irq;
> +	}
> +
> +	if (pci_is_enabled(func)) {
> +		cdnsp->dev = dev;
> +
> +		ret = cdns_init(cdnsp);
> +		if (ret)
> +			goto free_cdnsp;
> +	}
> +
> +	pci_set_drvdata(pdev, cdnsp);
> +
> +	device_wakeup_enable(&pdev->dev);
> +	if (pci_dev_run_wake(pdev))
> +		pm_runtime_put_noidle(&pdev->dev);
> +
> +	return 0;
> +
> +free_cdnsp:
> +	if (!pci_is_enabled(func))
> +		kfree(cdnsp);
> +
> +disable_pci:
> +	pci_disable_device(pdev);
> +
> +put_pci:
> +	pci_dev_put(func);
> +
> +	return ret;
> +}
> +
> +static void cdnsp_pci_remove(struct pci_dev *pdev)
> +{
> +	struct cdns *cdnsp;
> +	struct pci_dev *func;
> +
> +	func = cdnsp_get_second_fun(pdev);
> +	cdnsp = (struct cdns *)pci_get_drvdata(pdev);
> +
> +	if (pci_dev_run_wake(pdev))
> +		pm_runtime_get_noresume(&pdev->dev);
> +
> +	if (!pci_is_enabled(func)) {
> +		kfree(cdnsp);
> +		goto pci_put;
> +	}
> +
> +	cdns_remove(cdnsp);
> +
> +pci_put:
> +	pci_dev_put(func);
> +}
> +
> +static int __maybe_unused cdnsp_pci_suspend(struct device *dev)
> +{
> +	struct cdns *cdns = dev_get_drvdata(dev);
> +
> +	return cdns_suspend(cdns);
> +}
> +
> +static int __maybe_unused cdnsp_pci_resume(struct device *dev)
> +{
> +	struct cdns *cdns = dev_get_drvdata(dev);
> +
> +	return cdns_resume(cdns);
> +}
> +
> +static const struct dev_pm_ops cdnsp_pci_pm_ops = {
> +	SET_SYSTEM_SLEEP_PM_OPS(cdnsp_pci_suspend, cdnsp_pci_resume)
> +};
> +
> +static const struct pci_device_id cdnsp_pci_ids[] = {
> +	{ PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
> +	  PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
> +	{ PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
> +	  CDNS_DRD_IF, PCI_ANY_ID },
> +	{ 0, }
> +};
> +
> +static struct pci_driver cdnsp_pci_driver = {
> +	.name = "cdnsp-pci",
> +	.id_table = &cdnsp_pci_ids[0],
> +	.probe = cdnsp_pci_probe,
> +	.remove = cdnsp_pci_remove,
> +	.driver = {
> +		.pm = &cdnsp_pci_pm_ops,
> +	}
> +};
> +
> +module_pci_driver(cdnsp_pci_driver);
> +MODULE_DEVICE_TABLE(pci, cdnsp_pci_ids);
> +
> +MODULE_ALIAS("pci:cdnsp");
> +MODULE_AUTHOR("Pawel Laszczak <pawell@xxxxxxxxxxx>");
> +MODULE_LICENSE("GPL v2");
> +MODULE_DESCRIPTION("Cadence CDNSP PCI driver");
> +
> diff --git a/drivers/usb/cdnsp/ep0.c b/drivers/usb/cdnsp/ep0.c
> new file mode 100644
> index 000000000000..7f6e1d28d3b8
> --- /dev/null
> +++ b/drivers/usb/cdnsp/ep0.c
> @@ -0,0 +1,480 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Cadence CDNSP DRD Driver.
> + *
> + * Copyright (C) 2020 Cadence.
> + *
> + * Author: Pawel Laszczak <pawell@xxxxxxxxxxx>
> + *
> + */
> +
> +#include <linux/usb/composite.h>
> +#include <linux/usb/gadget.h>
> +#include <linux/list.h>
> +
> +#include "gadget.h"
> +
> +static void cdnsp_ep0_stall(struct cdnsp_device *pdev)
> +{
> +	struct cdnsp_request *preq;
> +	struct cdnsp_ep *pep;
> +
> +	pep = &pdev->eps[0];
> +	preq = next_request(&pep->pending_list);
> +
> +	if (pdev->three_stage_setup) {
> +		cdnsp_halt_endpoint(pdev, pep, true);
> +
> +		if (preq)
> +			cdnsp_gadget_giveback(pep, preq, -ECONNRESET);
> +	} else {
> +		pep->ep_state |= EP0_HALTED_STATUS;
> +
> +		if (preq)
> +			list_del(&preq->list);
> +
> +		cdnsp_status_stage(pdev);
> +	}
> +}
> +
> +static int cdnsp_ep0_delegate_req(struct cdnsp_device *pdev,
> +				  struct usb_ctrlrequest *ctrl)
> +{
> +	int ret;
> +
> +	spin_unlock(&pdev->lock);
> +	ret = pdev->gadget_driver->setup(&pdev->gadget, ctrl);
> +	spin_lock(&pdev->lock);
> +
> +	return ret;
> +}
> +
> +static int cdnsp_ep0_set_config(struct cdnsp_device *pdev,
> +				struct usb_ctrlrequest *ctrl)
> +{
> +	enum usb_device_state state = pdev->gadget.state;
> +	u32 cfg;
> +	int ret;
> +
> +	cfg = le16_to_cpu(ctrl->wValue);
> +
> +	switch (state) {
> +	case USB_STATE_ADDRESS:
> +		ret = cdnsp_ep0_delegate_req(pdev, ctrl);
> +		if (ret)
> +			return ret;
> +		break;
> +	case USB_STATE_CONFIGURED:
> +		ret = cdnsp_ep0_delegate_req(pdev, ctrl);
> +		if (ret)
> +			return ret;
What about moving cdnsp_ep0_delegate_req() after switch (), it's the
same for ADDRESS & CONFIGURED state
> +		break;
> +	default:
> +		dev_err(pdev->dev, "Set Configuration - bad device state\n");
> +		return -EINVAL;
> +	}
> +
> +	if (!cfg)
> +		usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS);
> +
> +	return 0;
> +}
> +
> +static int cdnsp_ep0_set_address(struct cdnsp_device *pdev,
> +				 struct usb_ctrlrequest *ctrl)
> +{
> +	enum usb_device_state state = pdev->gadget.state;
> +	struct cdnsp_slot_ctx *slot_ctx;
> +	unsigned int slot_state;
> +	int ret;
> +	u32 addr;
> +
> +	addr = le16_to_cpu(ctrl->wValue);
> +
> +	if (addr > 127) {
> +		dev_err(pdev->dev, "Invalid device address %d\n", addr);
> +		return -EINVAL;
> +	}
> +
> +	slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
> +
> +	if (state == USB_STATE_CONFIGURED) {
> +		dev_err(pdev->dev, "Can't Set Address from Configured State\n");
> +		return -EINVAL;
> +	}
> +
> +	pdev->device_address = le16_to_cpu(ctrl->wValue);
> +
> +	slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
> +	slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
> +	if (slot_state == SLOT_STATE_ADDRESSED)
> +		cdnsp_reset_device(pdev);
> +
> +	/*set device address*/
> +	ret = cdnsp_setup_device(pdev, SETUP_CONTEXT_ADDRESS);
> +	if (ret)
> +		return ret;
> +
> +	if (addr)
> +		usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS);
> +	else
> +		usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT);
> +
> +	return 0;
> +}
> +
> +int cdnsp_status_stage(struct cdnsp_device *pdev)
> +{
> +	pdev->ep0_stage = CDNSP_STATUS_STAGE;
> +	pdev->ep0_preq.request.length = 0;
> +
> +	return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq);
> +}
> +
> +static int cdnsp_w_index_to_ep_index(__le32  wIndex)
> +{
> +	wIndex = le32_to_cpu(wIndex);
> +
> +	if (!(wIndex & USB_ENDPOINT_NUMBER_MASK))
> +		return 0;
> +
> +	return ((wIndex & USB_ENDPOINT_NUMBER_MASK) * 2) +
> +		(wIndex & USB_ENDPOINT_DIR_MASK ? 1 : 0) - 1;
> +}
> +
> +static int cdnsp_ep0_handle_status(struct cdnsp_device *pdev,
> +				   struct usb_ctrlrequest *ctrl)
> +{
> +	struct cdnsp_ep *pep;
> +	__le16 *response;
> +	int ep_sts = 0;
> +	u16 status = 0;
> +	u32 recipient;
> +
> +	recipient = ctrl->bRequestType & USB_RECIP_MASK;
> +
> +	switch (recipient) {
> +	case USB_RECIP_DEVICE:
> +		status = pdev->gadget.is_selfpowered;
> +		status |= pdev->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
> +
> +		if (pdev->gadget.speed >= USB_SPEED_SUPER) {
> +			status |= pdev->u1_allowed << USB_DEV_STAT_U1_ENABLED;
> +			status |= pdev->u2_allowed << USB_DEV_STAT_U2_ENABLED;
> +		}
> +		break;
> +	case USB_RECIP_INTERFACE:
> +		/*
> +		 * Function Remote Wake Capable	D0
> +		 * Function Remote Wakeup	D1
> +		 */
> +		return cdnsp_ep0_delegate_req(pdev, ctrl);
> +	case USB_RECIP_ENDPOINT:
> +		pep = &pdev->eps[cdnsp_w_index_to_ep_index(ctrl->wIndex)];
> +		ep_sts = GET_EP_CTX_STATE(pep->out_ctx);
> +
> +		/* check if endpoint is stalled */
> +		if (ep_sts == EP_STATE_HALTED)
> +			status =  BIT(USB_ENDPOINT_HALT);
> +		break;
> +	default:
> +		return -EINVAL;
> +	}
> +
> +	response = (__le16 *)pdev->setup_buf;
> +	*response = cpu_to_le16(status);
> +
> +	pdev->ep0_preq.request.length = sizeof(*response);
> +	pdev->ep0_preq.request.buf = pdev->setup_buf;
> +
> +	return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq);
> +}
> +
> +static void cdnsp_enter_test_mode(struct cdnsp_device *pdev)
> +{
> +	u32 temp;
> +
> +	temp = readl(&pdev->active_port->regs->portpmsc) & ~GENMASK(31, 28);
> +	temp |= PORT_TEST_MODE(pdev->test_mode);
> +	writel(temp, &pdev->active_port->regs->portpmsc);
> +	pdev->test_mode = 0;
> +}
> +
> +static int cdnsp_ep0_handle_feature_device(struct cdnsp_device *pdev,
> +					   struct usb_ctrlrequest *ctrl,
> +					   int set)
> +{
> +	enum usb_device_state state;
> +	enum usb_device_speed speed;
> +	u16 tmode;
> +
> +	state = pdev->gadget.state;
> +	speed = pdev->gadget.speed;
> +
> +	switch (le16_to_cpu(ctrl->wValue)) {
> +	case USB_DEVICE_REMOTE_WAKEUP:
> +		pdev->may_wakeup = !!set;
> +		break;
> +	case USB_DEVICE_U1_ENABLE:
> +		if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER)
> +			return -EINVAL;
> +
> +		pdev->u1_allowed = !!set;
> +		break;
> +	case USB_DEVICE_U2_ENABLE:
> +		if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER)
> +			return -EINVAL;
> +
> +		pdev->u2_allowed = !!set;
> +		break;
> +	case USB_DEVICE_LTM_ENABLE:
> +		return -EINVAL;
> +	case USB_DEVICE_TEST_MODE:
> +		if (state != USB_STATE_CONFIGURED || speed > USB_SPEED_HIGH)
> +			return -EINVAL;
> +
> +		tmode = le16_to_cpu(ctrl->wIndex);
> +
> +		if (!set || (tmode & 0xff) != 0)
> +			return -EINVAL;
> +
> +		tmode = tmode >> 8;
> +
> +		if (tmode > USB_TEST_FORCE_ENABLE || tmode < USB_TEST_J)
> +			return -EINVAL;
> +
> +		pdev->test_mode = tmode;
> +
> +		/*
> +		 * Test mode must be set before Status Stage but controller
> +		 * will start testing sequence after Status Stage.
> +		 */
> +		cdnsp_enter_test_mode(pdev);
> +		break;
> +	default:
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int cdnsp_ep0_handle_feature_intf(struct cdnsp_device *pdev,
> +					 struct usb_ctrlrequest *ctrl,
> +					 int set)
> +{
> +	u16 wValue, wIndex;
> +	int ret;
> +
> +	wValue = le16_to_cpu(ctrl->wValue);
> +	wIndex = le16_to_cpu(ctrl->wIndex);
> +
> +	switch (wValue) {
> +	case USB_INTRF_FUNC_SUSPEND:
> +		ret = cdnsp_ep0_delegate_req(pdev, ctrl);
> +		if (ret)
> +			return ret;
> +
> +		/*
> +		 * Remote wakeup is enabled when any function within a device
> +		 * is enabled for function remote wakeup.
> +		 */
> +		if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
> +			pdev->may_wakeup++;
> +		else
> +			if (pdev->may_wakeup > 0)
> +				pdev->may_wakeup--;
> +
> +		return 0;
> +	default:
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int cdnsp_ep0_handle_feature_endpoint(struct cdnsp_device *pdev,
> +					     struct usb_ctrlrequest *ctrl,
> +					     int set)
> +{
> +	struct cdnsp_ep *pep;
> +	u32 wValue;
> +
> +	wValue = le16_to_cpu(ctrl->wValue);
> +	pep = &pdev->eps[cdnsp_w_index_to_ep_index(ctrl->wIndex)];
> +
> +	switch (wValue) {
> +	case USB_ENDPOINT_HALT:
> +		if (!set && (pep->ep_state & EP_WEDGE)) {
> +			/* Resets Sequence Number */
> +			cdnsp_halt_endpoint(pdev, pep, 0);
> +			cdnsp_halt_endpoint(pdev, pep, 1);
> +			break;
> +		}
> +
> +		return cdnsp_halt_endpoint(pdev, pep, set);
> +	default:
> +		dev_warn(pdev->dev, "WARN Incorrect wValue %04x\n", wValue);
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
> +static int cdnsp_ep0_handle_feature(struct cdnsp_device *pdev,
> +				    struct usb_ctrlrequest *ctrl,
> +				    int set)
> +{
> +	switch (ctrl->bRequestType & USB_RECIP_MASK) {
> +	case USB_RECIP_DEVICE:
> +		return cdnsp_ep0_handle_feature_device(pdev, ctrl, set);
> +	case USB_RECIP_INTERFACE:
> +		return cdnsp_ep0_handle_feature_intf(pdev, ctrl, set);
> +	case USB_RECIP_ENDPOINT:
> +		return cdnsp_ep0_handle_feature_endpoint(pdev, ctrl, set);
> +	default:
> +		return -EINVAL;
> +	}
> +}
> +
> +static int cdnsp_ep0_set_sel(struct cdnsp_device *pdev,
> +			     struct usb_ctrlrequest *ctrl)
> +{
> +	enum usb_device_state state = pdev->gadget.state;
> +	u16 wLength;
> +
> +	if (state == USB_STATE_DEFAULT)
> +		return -EINVAL;
> +
> +	wLength = le16_to_cpu(ctrl->wLength);
> +
> +	if (wLength != 6) {
> +		dev_err(pdev->dev, "Set SEL should be 6 bytes, got %d\n",
> +			wLength);
> +		return -EINVAL;
> +	}
> +
> +	/*
> +	 * To handle Set SEL we need to receive 6 bytes from Host. So let's
> +	 * queue a usb_request for 6 bytes.
> +	 */
> +	pdev->ep0_preq.request.length = 6;
> +	pdev->ep0_preq.request.buf = pdev->setup_buf;
> +
> +	return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq);
> +}
> +
> +static int cdnsp_ep0_set_isoch_delay(struct cdnsp_device *pdev,
> +				     struct usb_ctrlrequest *ctrl)
> +{
> +	if (le16_to_cpu(ctrl->wIndex) || le16_to_cpu(ctrl->wLength))
> +		return -EINVAL;
> +
> +	pdev->gadget.isoch_delay = le16_to_cpu(ctrl->wValue);
> +
> +	return 0;
> +}
> +
> +static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
> +				 struct usb_ctrlrequest *ctrl)
> +{
> +	int ret;
> +
> +	switch (ctrl->bRequest) {
> +	case USB_REQ_GET_STATUS:
> +		ret = cdnsp_ep0_handle_status(pdev, ctrl);
> +		break;
> +	case USB_REQ_CLEAR_FEATURE:
> +		ret = cdnsp_ep0_handle_feature(pdev, ctrl, 0);
> +		break;
> +	case USB_REQ_SET_FEATURE:
> +		ret = cdnsp_ep0_handle_feature(pdev, ctrl, 1);
> +		break;
> +	case USB_REQ_SET_ADDRESS:
> +		ret = cdnsp_ep0_set_address(pdev, ctrl);
> +		break;
> +	case USB_REQ_SET_CONFIGURATION:
> +		ret = cdnsp_ep0_set_config(pdev, ctrl);
> +		break;
> +	case USB_REQ_SET_SEL:
> +		ret = cdnsp_ep0_set_sel(pdev, ctrl);
> +		break;
> +	case USB_REQ_SET_ISOCH_DELAY:
> +		ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl);
> +		break;
> +	case USB_REQ_SET_INTERFACE:
> +		/*
> +		 * Add request into pending list to block sending status stage
> +		 * by libcomposite.
> +		 */
> +		list_add_tail(&pdev->ep0_preq.list,
> +			      &pdev->ep0_preq.pep->pending_list);
> +
> +		ret = cdnsp_ep0_delegate_req(pdev, ctrl);
> +		if (ret == -EBUSY)
> +			ret = 0;
> +
> +		list_del(&pdev->ep0_preq.list);
> +		break;
> +	default:
> +		ret = cdnsp_ep0_delegate_req(pdev, ctrl);
> +		break;
> +	}
> +
> +	return ret;
> +}
> +
> +void cdnsp_setup_analyze(struct cdnsp_device *pdev)
> +{
> +	struct usb_ctrlrequest *ctrl = &pdev->setup;
> +	int ret = 0;
> +	__le16 len;
> +
> +	if (!pdev->gadget_driver)
> +		goto out;
> +
> +	if (pdev->gadget.state == USB_STATE_NOTATTACHED) {
> +		dev_err(pdev->dev, "ERR: Setup detected in unattached state\n");
> +		ret = -EINVAL;
> +		goto out;
> +	}
> +
> +	/* Restore the ep0 to Stopped/Running state. */
> +	if (pdev->eps[0].ep_state & EP_HALTED)
> +		cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0);
> +
> +	/*
> +	 * Finishing previous SETUP transfer by removing request from
> +	 * list and informing upper layer
> +	 */
> +	if (!list_empty(&pdev->eps[0].pending_list)) {
> +		struct cdnsp_request	*req;
> +
> +		req = next_request(&pdev->eps[0].pending_list);
> +		cdnsp_ep_dequeue(&pdev->eps[0], req);
> +	}
> +
> +	len = le16_to_cpu(ctrl->wLength);
> +	if (!len) {
> +		pdev->three_stage_setup = false;
> +		pdev->ep0_expect_in = false;
> +	} else {
> +		pdev->three_stage_setup = true;
> +		pdev->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
> +	}
> +
> +	if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
> +		ret = cdnsp_ep0_std_request(pdev, ctrl);
> +	else
> +		ret = cdnsp_ep0_delegate_req(pdev, ctrl);
> +
> +	if (!len)
> +		pdev->ep0_stage = CDNSP_STATUS_STAGE;
> +
> +	if (ret == USB_GADGET_DELAYED_STATUS)
> +		return;
> +out:
> +	if (ret < 0)
> +		cdnsp_ep0_stall(pdev);
> +	else if (pdev->ep0_stage == CDNSP_STATUS_STAGE)
> +		cdnsp_status_stage(pdev);
> +}
> diff --git a/drivers/usb/cdnsp/gadget.c b/drivers/usb/cdnsp/gadget.c
> new file mode 100644
> index 000000000000..38ad170b2bdd
> --- /dev/null
> +++ b/drivers/usb/cdnsp/gadget.c
> @@ -0,0 +1,1946 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Cadence CDNSP DRD Driver.
> + *
> + * Copyright (C) 2020 Cadence.
> + *
> + * Author: Pawel Laszczak <pawell@xxxxxxxxxxx>
> + *
> + */
> +
> +#include <linux/moduleparam.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/module.h>
> +#include <linux/iopoll.h>
> +#include <linux/delay.h>
> +#include <linux/log2.h>
> +#include <linux/slab.h>
> +#include <linux/pci.h>
> +#include <linux/irq.h>
> +#include <linux/dmi.h>
> +
> +#include "../cdns3/core.h"
> +#include "../cdns3/gadget-export.h"
> +#include "../cdns3/drd.h"
> +#include "gadget.h"
> +
> +unsigned int cdnsp_port_speed(unsigned int port_status)
> +{
> +	/*Detect gadget speed based on PORTSC register*/
> +	if (DEV_SUPERSPEEDPLUS(port_status))
> +		return USB_SPEED_SUPER_PLUS;
> +	else if (DEV_SUPERSPEED(port_status))
> +		return USB_SPEED_SUPER;
> +	else if (DEV_HIGHSPEED(port_status))
> +		return USB_SPEED_HIGH;
> +	else if (DEV_FULLSPEED(port_status))
> +		return USB_SPEED_FULL;
> +
> +	/* If device is detached then speed will be USB_SPEED_UNKNOWN.*/
> +	return USB_SPEED_UNKNOWN;
> +}
> +
> +/*
Use /* or /**?
See doc-guide/kernel-doc.rst
> + * Given a port state, this function returns a value that would result in the
> + * port being in the same state, if the value was written to the port status
> + * control register.
> + * Save Read Only (RO) bits and save read/write bits where
> + * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
> + * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
> + */
> +u32 cdnsp_port_state_to_neutral(u32 state)
> +{
> +	/* Save read-only status and port state. */
> +	return (state & CDNSP_PORT_RO) | (state & CDNSP_PORT_RWS);
> +}
> +
> +/**
> + * Find the offset of the extended capabilities with capability ID id.
> + * @base: PCI MMIO registers base address.
> + * @start: Address at which to start looking, (0 or HCC_PARAMS to start at
> + *         beginning of list)
> + * @id: Extended capability ID to search for.
> + *
> + * Returns the offset of the next matching extended capability structure.
> + * Some capabilities can occur several times,
> + * e.g., the EXT_CAPS_PROTOCOL, and this provides a way to find them all.
> + */
> +int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id)
> +{
> +	u32 offset = start;
> +	u32 next;
> +	u32 val;
> +
> +	if (!start || start == HCC_PARAMS_OFFSET) {
> +		val = readl(base + HCC_PARAMS_OFFSET);
> +		if (val == ~0)
> +			return 0;
> +
> +		offset = HCC_EXT_CAPS(val) << 2;
> +		if (!offset)
> +			return 0;
> +	};
> +
> +	do {
> +		val = readl(base + offset);
> +		if (val == ~0)
> +			return 0;
> +
> +		if (EXT_CAPS_ID(val) == id && offset != start)
> +			return offset;
> +
> +		next = EXT_CAPS_NEXT(val);
> +		offset += next << 2;
> +	} while (next);
> +
> +	return 0;
> +}
> +
> +void cdnsp_set_link_state(struct cdnsp_device *pdev,
> +			  __le32 __iomem *port_regs,
> +			  u32 link_state)
> +{
> +	u32 temp;
> +
> +	temp = readl(port_regs);
> +	temp = cdnsp_port_state_to_neutral(temp);
> +	temp |= PORT_WKCONN_E | PORT_WKDISC_E;
> +	writel(temp, port_regs);
> +
> +	temp &= ~PORT_PLS_MASK;
> +	temp |= PORT_LINK_STROBE | link_state;
> +
> +	writel(temp, port_regs);
> +}
> +
> +static void cdnsp_disable_port(struct cdnsp_device *pdev,
> +			       __le32 __iomem *port_regs)
> +{
> +	u32 temp = cdnsp_port_state_to_neutral(readl(port_regs));
> +
> +	writel(temp | PORT_PED, port_regs);
> +}
> +
> +static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev,
> +					__le32 __iomem *port_regs)
> +{
> +	u32 portsc = readl(port_regs);
> +
> +	writel(cdnsp_port_state_to_neutral(portsc) |
> +	       (portsc & PORT_CHANGE_BITS), port_regs);
> +}
> +
> +static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
> +{
> +	__le32 __iomem *reg;
> +	void __iomem *base;
> +	u32 offset = 0;
> +
> +	base = &pdev->cap_regs->hc_capbase;
> +	offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
> +	reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
> +
> +	bit = readl(reg) | bit;
> +	writel(bit, reg);
> +}
> +
> +static void cdnsp_clear_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
> +{
> +	__le32 __iomem *reg;
> +	void __iomem *base;
> +	u32 offset = 0;
> +
> +	base = &pdev->cap_regs->hc_capbase;
> +	offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
> +	reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
> +
> +	bit = readl(reg) & ~bit;
> +	writel(bit, reg);
> +}
> +
> +/*
> + * Disable interrupts and begin the controller halting process.
> + */
> +static void cdnsp_quiesce(struct cdnsp_device *pdev)
> +{
> +	u32 halted;
> +	u32 mask;
> +	u32 cmd;
> +
> +	mask = ~(u32)(CDNSP_IRQS);
> +
> +	halted = readl(&pdev->op_regs->status) & STS_HALT;
> +	if (!halted)
> +		mask &= ~(CMD_R_S | CMD_DEVEN);
> +
> +	cmd = readl(&pdev->op_regs->command);
> +	cmd &= mask;
> +	writel(cmd, &pdev->op_regs->command);
> +}
> +
> +/*
> + * Force controller into halt state.
> + *
> + * Disable any IRQs and clear the run/stop bit.
> + * Controller will complete any current and actively pipelined transactions, and
> + * should halt within 16 ms of the run/stop bit being cleared.
> + * Read controller Halted bit in the status register to see when the
> + * controller is finished.
> + */
> +int cdnsp_halt(struct cdnsp_device *pdev)
> +{
> +	int ret;
> +	u32 val;
> +
> +	cdnsp_quiesce(pdev);
> +
> +	ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val,
> +					val & STS_HALT, 1,
> +					CDNSP_MAX_HALT_USEC);
> +	if (ret) {
> +		dev_err(pdev->dev, "ERROR: Device halt failed\n");
> +		return ret;
> +	}
> +
> +	pdev->cdnsp_state |= CDNSP_STATE_HALTED;
> +
> +	return 0;
> +}
> +
> +/*
> + * device controller died, register read returns 0xffffffff, or command never
> + * ends.
> + */
> +void cdnsp_died(struct cdnsp_device *pdev)
> +{
> +	dev_err(pdev->dev, "ERROR: CDNSP controller not responding\n");
> +	pdev->cdnsp_state |= CDNSP_STATE_DYING;
> +	cdnsp_halt(pdev);
> +}
> +
> +/*
> + * Set the run bit and wait for the device to be running.
> + */
> +static int cdnsp_start(struct cdnsp_device *pdev)
> +{
> +	u32 temp;
> +	int ret;
> +
> +	temp = readl(&pdev->op_regs->command);
> +	temp |= (CMD_R_S | CMD_DEVEN);
> +	writel(temp, &pdev->op_regs->command);
> +
> +	pdev->cdnsp_state = 0;
> +
> +	/*
> +	 * Wait for the STS_HALT Status bit to be 0 to indicate the device is
> +	 * running.
> +	 */
> +	ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
> +					!(temp & STS_HALT), 1,
> +					CDNSP_MAX_HALT_USEC);
> +	if (ret) {
> +		pdev->cdnsp_state = CDNSP_STATE_DYING;
> +		dev_err(pdev->dev, "ERROR: Controller run failed\n");
> +	}
> +
> +	return ret;
> +}
> +
> +/*
> + * Reset a halted controller.
> + *
> + * This resets pipelines, timers, counters, state machines, etc.
> + * Transactions will be terminated immediately, and operational registers
> + * will be set to their defaults.
> + */
> +int cdnsp_reset(struct cdnsp_device *pdev)
> +{
> +	u32 command;
> +	u32 temp;
> +	int ret;
> +
> +	temp = readl(&pdev->op_regs->status);
> +
> +	if (temp == ~(u32)0) {
> +		dev_err(pdev->dev, "Device not accessible, reset failed.\n");
> +		return -ENODEV;
> +	}
> +
> +	if ((temp & STS_HALT) == 0) {
> +		dev_err(pdev->dev, "Controller not halted, aborting reset.\n");
> +		return -EINVAL;
> +	}
> +
> +	command = readl(&pdev->op_regs->command);
> +	command |= CMD_RESET;
> +	writel(command, &pdev->op_regs->command);
> +
> +	ret = readl_poll_timeout_atomic(&pdev->op_regs->command, temp,
> +					!(temp & CMD_RESET), 1,
> +					10 * 1000);
> +	if (ret) {
> +		dev_err(pdev->dev, "ERROR: Controller reset failed\n");
> +		return ret;
> +	}
> +
> +	/*
> +	 * CDNSP cannot write any doorbells or operational registers other
> +	 * than status until the "Controller Not Ready" flag is cleared.
> +	 */
> +	ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
> +					!(temp & STS_CNR), 1,
> +					10 * 1000);
> +
> +	if (ret) {
> +		dev_err(pdev->dev, "ERROR: Controller not ready to work\n");
> +		return ret;
> +	}
> +
> +	dev_info(pdev->dev, "Controller ready to work");
> +
> +	return ret;
> +}
> +
> +/*
> + * cdnsp_get_endpoint_index - Find the index for an endpoint given its
> + * descriptor.Use the return value to right shift 1 for the bitmask.
> + *
> + * Index = (epnum * 2) + direction - 1,
> + * where direction = 0 for OUT, 1 for IN.
> + * For control endpoints, the IN index is used (OUT index is unused), so
> + * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
> + */
> +static unsigned int
> +	cdnsp_get_endpoint_index(const struct usb_endpoint_descriptor *desc)
> +{
> +	unsigned int index = (unsigned int)usb_endpoint_num(desc);
> +
> +	if (usb_endpoint_xfer_control(desc))
> +		return index * 2;
> +
> +	return (index * 2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
> +}
> +
> +/*
> + * Find the flag for this endpoint (for use in the control context). Use the
> + * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
> + * bit 1, etc.
> + */
> +static unsigned int
> +	cdnsp_get_endpoint_flag(const struct usb_endpoint_descriptor *desc)
> +{
> +	return 1 << (cdnsp_get_endpoint_index(desc) + 1);
> +}
> +
> +int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
> +{
> +	struct cdnsp_device *pdev = pep->pdev;
> +	struct usb_request *request;
> +	int ret;
> +
> +	if (preq->epnum == 0 && !list_empty(&pep->pending_list))
> +		return -EBUSY;
> +
> +	request = &preq->request;
> +	request->actual = 0;
> +	request->status = -EINPROGRESS;
> +	preq->direction = pep->direction;
> +	preq->epnum = pep->number;
> +	preq->td.drbl = 0;
> +
> +	ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction);
> +	if (ret)
> +		return ret;
> +
> +	list_add_tail(&preq->list, &pep->pending_list);
> +
> +	switch (usb_endpoint_type(pep->endpoint.desc)) {
> +	case USB_ENDPOINT_XFER_CONTROL:
> +		ret = cdnsp_queue_ctrl_tx(pdev, preq);
> +		break;
> +	case USB_ENDPOINT_XFER_BULK:
> +	case USB_ENDPOINT_XFER_INT:
> +		ret = cdnsp_queue_bulk_tx(pdev, preq);
> +		break;
> +	case USB_ENDPOINT_XFER_ISOC:
> +		ret = cdnsp_queue_isoc_tx_prepare(pdev, preq);
> +	}
> +
> +	if (ret)
> +		goto unmap;
> +
> +	return 0;
> +
> +unmap:
> +	usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
> +					pep->direction);
> +	list_del(&preq->list);
> +
> +	return ret;
> +}
> +
> +/*
> + * Remove the request's TD from the endpoint ring. This may cause the
> + * controller to stop USB transfers, potentially stopping in the middle of a
> + * TRB buffer. The controller should pick up where it left off in the TD,
> + * unless a Set Transfer Ring Dequeue Pointer is issued.
> + *
> + * The TRBs that make up the buffers for the canceled request will be "removed"
> + * from the ring. Since the ring is a contiguous structure, they can't be
> + * physically removed. Instead, there are two options:
> + *
> + *  1) If the controller is in the middle of processing the request to be
> + *     canceled, we simply move the ring's dequeue pointer past those TRBs
> + *     using the Set Transfer Ring Dequeue Pointer command. This will be
> + *     the common case, when drivers timeout on the last submitted request
> + *     and attempt to cancel.
> + *
> + *  2) If the controller is in the middle of a different TD, we turn the TRBs
> + *     into a series of 1-TRB transfer no-op TDs. No-ops shouldn't be chained.
> + *     The controller will need to invalidate the any TRBs it has cached after
> + *     the stop endpoint command.
> + *
> + *  3) The TD may have completed by the time the Stop Endpoint Command
> + *     completes, so software needs to handle that case too.
> + *
> + */
> +int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
> +{
> +	struct cdnsp_device *pdev = pep->pdev;
> +	int ret;
> +
> +	if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) {
> +		ret = cdnsp_cmd_stop_ep(pdev, pep);
> +		if (ret)
> +			return ret;
> +	}
> +
> +	return cdnsp_remove_request(pdev, preq, pep);
> +}
> +
> +static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev)
> +{
> +	struct cdnsp_input_control_ctx *ctrl_ctx;
> +	struct cdnsp_slot_ctx *slot_ctx;
> +	struct cdnsp_ep_ctx *ep_ctx;
> +	int i;
> +
> +	ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
> +
> +	/*
> +	 * When a device's add flag and drop flag are zero, any subsequent
> +	 * configure endpoint command will leave that endpoint's state
> +	 * untouched. Make sure we don't leave any old state in the input
> +	 * endpoint contexts.
> +	 */
> +	ctrl_ctx->drop_flags = 0;
> +	ctrl_ctx->add_flags = 0;
> +	slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
> +	slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
> +
> +	/* Endpoint 0 is always valid */
> +	slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
> +	for (i = 1; i < 31; ++i) {
> +		ep_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, i);
> +		ep_ctx->ep_info = 0;
> +		ep_ctx->ep_info2 = 0;
> +		ep_ctx->deq = 0;
> +		ep_ctx->tx_info = 0;
> +	}
> +}
> +
> +/* Issue a configure endpoint command and wait for it to finish. */
> +static int cdnsp_configure_endpoint(struct cdnsp_device *pdev)
> +{
> +	int ret;
> +
> +	cdnsp_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma);
> +	cdnsp_ring_cmd_db(pdev);
> +	ret = cdnsp_wait_for_cmd_compl(pdev);
> +	if (ret) {
> +		dev_err(pdev->dev,
> +			"ERR: unexpected command completion code 0x%x.\n", ret);
> +		return -EINVAL;
> +	}
> +
> +	return ret;
> +}
> +
> +static void cdnsp_invalidate_ep_events(struct cdnsp_device *pdev,
> +				       struct cdnsp_ep *pep)
> +{
> +	struct cdnsp_segment *segment;
> +	union cdnsp_trb *event;
> +	u32 cycle_state;
> +	__le32  data;
> +
> +	event = pdev->event_ring->dequeue;
> +	segment = pdev->event_ring->deq_seg;
> +	cycle_state = pdev->event_ring->cycle_state;
> +
> +	while (1) {
> +		data = le32_to_cpu(event->trans_event.flags);
> +
> +		/* Check the owner of the TRB. */
> +		if ((data & TRB_CYCLE) != cycle_state)
> +			break;
> +
> +		if (TRB_FIELD_TO_TYPE(data) == TRB_TRANSFER &&
> +		    TRB_TO_EP_ID(data) == (pep->idx + 1)) {
> +			data |= TRB_EVENT_INVALIDATE;
> +			event->trans_event.flags = cpu_to_le32(data);
> +		}
> +
> +		if (cdnsp_last_trb_on_seg(segment, event)) {
> +			cycle_state ^= 1;
> +			segment = pdev->event_ring->deq_seg->next;
> +			event = segment->trbs;
> +		} else {
> +			event++;
> +		}
> +	}
> +}
> +
> +int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev)
> +{
> +	struct cdnsp_segment *event_deq_seg;
> +	dma_addr_t cmd_deq_dma;
> +	union cdnsp_trb *cmd_trb;
> +	union cdnsp_trb *event;
> +	u32 cycle_state;
> +	__le32  flags;
> +	int ret, val;
> +	u64 cmd_dma;
> +
> +	cmd_trb = pdev->cmd.command_trb;
> +	pdev->cmd.status = 0;
> +
> +	ret = readl_poll_timeout_atomic(&pdev->op_regs->cmd_ring, val,
> +					!CMD_RING_BUSY(val), 1,
> +					CDNSP_CMD_TIMEOUT);
> +	if (ret) {
> +		dev_err(pdev->dev, "ERR: Timeout while waiting for command\n");
> +		pdev->cdnsp_state = CDNSP_STATE_DYING;
> +		return -ETIMEDOUT;
> +	}
> +
> +	event = pdev->event_ring->dequeue;
> +	event_deq_seg = pdev->event_ring->deq_seg;
> +	cycle_state = pdev->event_ring->cycle_state;
> +
> +	cmd_deq_dma = cdnsp_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb);
> +	if (!cmd_deq_dma)
> +		return -EINVAL;
> +
> +	while (1) {
> +		flags = le32_to_cpu(event->event_cmd.flags);
> +
> +		/* Check the owner of the TRB. */
> +		if ((flags & TRB_CYCLE) != cycle_state)
> +			return -EINVAL;
> +
> +		cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb);
> +
> +		/*
> +		 * Check whether the completion event is for last queued
> +		 * command.
> +		 */
> +		if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION ||
> +		    cmd_dma != (u64)cmd_deq_dma) {
> +			if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
> +				event++;
> +				continue;
> +			}
> +
> +			if (cdnsp_last_trb_on_ring(pdev->event_ring,
> +						   event_deq_seg, event))
> +				cycle_state ^= 1;
> +
> +			event_deq_seg = event_deq_seg->next;
> +			event = event_deq_seg->trbs;
> +			continue;
> +		}
> +
> +		pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status));
> +		if (pdev->cmd.status == COMP_SUCCESS)
> +			return 0;

> +
> +		return -pdev->cmd.status;
> +	}
> +
> +	return 0;
Maybe no need if no break in while()
> +}
> +
> +int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
> +			struct cdnsp_ep *pep,
> +			int value)
> +{
> +	int ret;
> +
> +	if (value) {
> +		ret = cdnsp_cmd_stop_ep(pdev, pep);
> +		if (ret)
> +			return ret;
> +
> +		if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) {
> +			cdnsp_queue_halt_endpoint(pdev, pep->idx);
> +			cdnsp_ring_cmd_db(pdev);
> +			ret = cdnsp_wait_for_cmd_compl(pdev);
> +		}
> +
> +		pep->ep_state |= EP_HALTED;
> +	} else {
> +		/*
> +		 * In device mode driver can call reset endpoint command
> +		 * from any endpoint state.
> +		 */
> +		cdnsp_queue_reset_ep(pdev, pep->idx);
> +		cdnsp_ring_cmd_db(pdev);
> +		ret = cdnsp_wait_for_cmd_compl(pdev);
> +		if (ret)
> +			return ret;
> +
> +		pep->ep_state &= ~EP_HALTED;
> +
> +		if (pep->idx != 0 && !(pep->ep_state & EP_WEDGE))
> +			cdnsp_ring_doorbell_for_active_rings(pdev, pep);
> +
> +		pep->ep_state &= ~EP_WEDGE;
> +	}
> +
> +	return 0;
> +}
> +
> +static int cdnsp_update_eps_configuration(struct cdnsp_device *pdev,
> +					  struct cdnsp_ep *pep)
> +{
> +	struct cdnsp_input_control_ctx *ctrl_ctx;
> +	struct cdnsp_slot_ctx *slot_ctx;
> +	int ret = 0;
> +	u32 ep_sts;
> +	int i;
> +
> +	ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
> +
> +	/* Don't issue the command if there's no endpoints to update. */
> +	if (ctrl_ctx->add_flags == 0 && ctrl_ctx->drop_flags == 0)
> +		return 0;
> +
> +	ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
> +	ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
> +	ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
> +
> +	/* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
> +	slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
> +	for (i = 31; i >= 1; i--) {
> +		__le32 le32 = cpu_to_le32(BIT(i));
> +
> +		if ((pdev->eps[i - 1].ring && !(ctrl_ctx->drop_flags & le32)) ||
> +		    (ctrl_ctx->add_flags & le32) || i == 1) {
> +			slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
> +			slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
> +			break;
> +		}
> +	}
> +
> +	ep_sts = GET_EP_CTX_STATE(pep->out_ctx);
> +
> +	if ((ctrl_ctx->add_flags != cpu_to_le32(SLOT_FLAG) &&
> +	     ep_sts == EP_STATE_DISABLED) ||
> +	    (ep_sts != EP_STATE_DISABLED && ctrl_ctx->drop_flags))
> +		ret = cdnsp_configure_endpoint(pdev);
> +
> +	cdnsp_zero_in_ctx(pdev);
> +
> +	return ret;
> +}
> +
> +/*
> + * This submits a Reset Device Command, which will set the device state to 0,
> + * set the device address to 0, and disable all the endpoints except the default
> + * control endpoint. The USB core should come back and call
> + * cdnsp_setup_device(), and then re-set up the configuration.
> + */
> +int cdnsp_reset_device(struct cdnsp_device *pdev)
> +{
> +	struct cdnsp_slot_ctx *slot_ctx;
> +	int slot_state;
> +	int ret, i;
> +
> +	slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
> +	slot_ctx->dev_info = 0;
> +	pdev->device_address = 0;
> +
> +	/* If device is not setup, there is no point in resetting it. */
> +	slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
> +	slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
> +
> +	if (slot_state <= SLOT_STATE_DEFAULT &&
> +	    pdev->eps[0].ep_state & EP_HALTED) {
> +		cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0);
> +	}
> +
> +	/*
> +	 * During Reset Device command controller shall transition the
> +	 * endpoint ep0 to the Running State.
> +	 */
> +	pdev->eps[0].ep_state &= ~(EP_STOPPED | EP_HALTED);
> +	pdev->eps[0].ep_state |= EP_ENABLED;
> +
> +	if (slot_state <= SLOT_STATE_DEFAULT)
> +		return 0;
> +
> +	cdnsp_queue_reset_device(pdev);
> +	cdnsp_ring_cmd_db(pdev);
> +	ret = cdnsp_wait_for_cmd_compl(pdev);
> +
> +	/*
> +	 * After Reset Device command all not default endpoints
> +	 * are in Disabled state.
> +	 */
> +	for (i = 1; i < 31; ++i)
> +		pdev->eps[i].ep_state |= EP_STOPPED;
> +
> +	if (ret)
> +		dev_err(pdev->dev, "Reset device failed with error code %d",
> +			ret);
> +
> +	return ret;
> +}
> +
> +/*
> + * Sets the MaxPStreams field and the Linear Stream Array field.
> + * Sets the dequeue pointer to the stream context array.
> + */
> +static void cdnsp_setup_streams_ep_input_ctx(struct cdnsp_device *pdev,
> +					     struct cdnsp_ep_ctx *ep_ctx,
> +					     struct cdnsp_stream_info *stream_info)
> +{
> +	u32 max_primary_streams;
> +
> +	/* MaxPStreams is the number of stream context array entries, not the
> +	 * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
> +	 * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
> +	 */
> +	max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
> +	ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
> +	ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
> +				       | EP_HAS_LSA);
> +	ep_ctx->deq  = cpu_to_le64(stream_info->ctx_array_dma);
> +}
> +
> +/*
> + * The drivers use this function to prepare a bulk endpoints to use streams.
> + *
> + * Don't allow the call to succeed if endpoint only supports one stream
> + * (which means it doesn't support streams at all).
> + */
> +int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
> +{
> +	unsigned int num_streams = usb_ss_max_streams(pep->endpoint.comp_desc);
> +	unsigned int num_stream_ctxs;
> +	int ret;
> +
> +	if (num_streams ==  0)
> +		return 0;
> +
> +	if (num_streams > STREAM_NUM_STREAMS)
> +		return -EINVAL;
> +
> +	/*
> +	 * Add two to the number of streams requested to account for
> +	 * stream 0 that is reserved for controller usage and one additional
> +	 * for TASK SET FULL response.
> +	 */
> +	num_streams += 2;
> +
> +	/* The stream context array size must be a power of two */
> +	num_stream_ctxs = roundup_pow_of_two(num_streams);
> +
> +	ret = cdnsp_alloc_stream_info(pdev, pep, num_stream_ctxs, num_streams);
> +	if (ret)
> +		return ret;
> +
> +	cdnsp_setup_streams_ep_input_ctx(pdev, pep->in_ctx, &pep->stream_info);
> +
> +	pep->ep_state |= EP_HAS_STREAMS;
> +	pep->stream_info.td_count = 0;
> +	pep->stream_info.first_prime_det = 0;
> +
> +	/* Subtract 1 for stream 0, which drivers can't use. */
> +	return num_streams - 1;
> +}
> +
> +int cdnsp_disable_slot(struct cdnsp_device *pdev)
> +{
> +	int ret;
> +
> +	cdnsp_queue_slot_control(pdev, TRB_DISABLE_SLOT);
> +	cdnsp_ring_cmd_db(pdev);
> +	ret = cdnsp_wait_for_cmd_compl(pdev);
> +
> +	pdev->slot_id = 0;
> +	pdev->active_port = NULL;
> +
> +	memset(pdev->in_ctx.bytes, 0, CDNSP_CTX_SIZE);
> +	memset(pdev->out_ctx.bytes, 0, CDNSP_CTX_SIZE);
> +
> +	return ret;
> +}
> +
> +int cdnsp_enable_slot(struct cdnsp_device *pdev)
> +{
> +	struct cdnsp_slot_ctx *slot_ctx;
> +	int slot_state;
> +	int ret;
> +
> +	/* If device is not setup, there is no point in resetting it */
> +	slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
> +	slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
> +
> +	if (slot_state != SLOT_STATE_DISABLED)
> +		return 0;
> +
> +	cdnsp_queue_slot_control(pdev, TRB_ENABLE_SLOT);
> +	cdnsp_ring_cmd_db(pdev);
> +	ret = cdnsp_wait_for_cmd_compl(pdev);
> +	if (ret)
> +		return ret;
> +
> +	pdev->slot_id = 1;
> +
> +	return 0;
> +}
> +
> +/*
> + * Issue an Address Device command with BSR=0 if setup is SETUP_CONTEXT_ONLY
> + * or with BSR = 1 if set_address is SETUP_CONTEXT_ADDRESS.
> + */
> +int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup)
> +{
> +	struct cdnsp_input_control_ctx *ctrl_ctx;
> +	struct cdnsp_slot_ctx *slot_ctx;
> +	int dev_state = 0;
> +	int ret;
> +
> +	if (!pdev->slot_id)
> +		return -EINVAL;
> +
> +	if (!pdev->active_port->port_num)
> +		return -EINVAL;
> +
> +	slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
> +	dev_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
> +
> +	if (setup == SETUP_CONTEXT_ONLY && dev_state == SLOT_STATE_DEFAULT)
> +		return 0;
> +
> +	slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
> +	ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
> +
> +	if (!slot_ctx->dev_info || dev_state == SLOT_STATE_DEFAULT) {
> +		ret = cdnsp_setup_addressable_priv_dev(pdev);
> +		if (ret)
> +			return ret;
> +	}
> +
> +	cdnsp_copy_ep0_dequeue_into_input_ctx(pdev);
> +
> +	ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
> +	ctrl_ctx->drop_flags = 0;
> +
> +	cdnsp_queue_address_device(pdev, pdev->in_ctx.dma, setup);
> +	cdnsp_ring_cmd_db(pdev);
> +	ret = cdnsp_wait_for_cmd_compl(pdev);
> +
> +	/* Zero the input context control for later use. */
> +	ctrl_ctx->add_flags = 0;
> +	ctrl_ctx->drop_flags = 0;
> +
> +	return ret;
> +}
> +
> +void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *pdev,
> +				 struct usb_request *req,
> +				 int enable)
> +{
> +	if (pdev->active_port != &pdev->usb2_port || !pdev->gadget.lpm_capable)
> +		return;
> +
> +	if (enable)
> +		writel(PORT_BESL(CDNSP_DEFAULT_BESL) | PORT_L1S_NYET | PORT_HLE,
> +		       &pdev->active_port->regs->portpmsc);
> +	else
> +		writel(PORT_L1S_NYET, &pdev->active_port->regs->portpmsc);
> +}
> +
> +static int cdnsp_get_frame(struct cdnsp_device *pdev)
> +{
> +	return readl(&pdev->run_regs->microframe_index) >> 3;
> +}
> +
> +static int cdnsp_gadget_ep_enable(struct usb_ep *ep,
> +				  const struct usb_endpoint_descriptor *desc)
> +{
> +	struct cdnsp_input_control_ctx *ctrl_ctx;
> +	struct cdnsp_device *pdev;
> +	struct cdnsp_ep *pep;
> +	unsigned long flags;
> +	u32 added_ctxs;
> +	int ret;
> +
> +	if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
> +	    !desc->wMaxPacketSize)
> +		return -EINVAL;
> +
> +	pep = to_cdnsp_ep(ep);
> +	pdev = pep->pdev;
> +
> +	if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
> +			  "%s is already enabled\n", pep->name))
> +		return 0;
> +
> +	spin_lock_irqsave(&pdev->lock, flags);
> +
> +	added_ctxs = cdnsp_get_endpoint_flag(desc);
> +	if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
> +		dev_err(pdev->dev, "ERROR: Bad endpoint number\n");
> +		ret = -EINVAL;
> +		goto unlock;
> +	}
> +
> +	pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
> +
> +	if (pdev->gadget.speed == USB_SPEED_FULL) {
> +		if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT)
> +			pep->interval = desc->bInterval << 3;
> +		if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC)
> +			pep->interval = BIT(desc->bInterval - 1) << 3;
> +	}
> +
> +	if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) {
> +		if (pep->interval > BIT(12)) {
> +			dev_err(pdev->dev, "bInterval %d not supported\n",
> +				desc->bInterval);
> +			ret = -EINVAL;
> +			goto unlock;
> +		}
> +		cdnsp_set_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
> +	}
> +
> +	ret = cdnsp_endpoint_init(pdev, pep, GFP_ATOMIC);
> +	if (ret)
> +		goto unlock;
> +
> +	ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
> +	ctrl_ctx->add_flags = cpu_to_le32(added_ctxs);
> +	ctrl_ctx->drop_flags = 0;
> +
> +	ret = cdnsp_update_eps_configuration(pdev, pep);
> +	if (ret) {
> +		cdnsp_free_endpoint_rings(pdev, pep);
> +		goto unlock;
> +	}
> +
> +	pep->ep_state |= EP_ENABLED;
> +	pep->ep_state &= ~EP_STOPPED;
> +
> +unlock:
> +	spin_unlock_irqrestore(&pdev->lock, flags);
> +
> +	return ret;
> +}
> +
> +static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
> +{
> +	struct cdnsp_input_control_ctx *ctrl_ctx;
> +	struct cdnsp_request *preq;
> +	struct cdnsp_device *pdev;
> +	struct cdnsp_ep *pep;
> +	unsigned long flags;
> +	u32 drop_flag;
> +	int ret = 0;
> +
> +	if (!ep)
> +		return -EINVAL;
> +
> +	pep = to_cdnsp_ep(ep);
> +	pdev = pep->pdev;
> +
> +	spin_lock_irqsave(&pdev->lock, flags);
> +
> +	if (!(pep->ep_state & EP_ENABLED)) {
> +		dev_err(pdev->dev, "%s is already disabled\n", pep->name);
> +		ret = -EINVAL;
> +		goto finish;
> +	}
> +
> +	cdnsp_cmd_stop_ep(pdev, pep);
> +	pep->ep_state |= EP_DIS_IN_RROGRESS;
> +	cdnsp_cmd_flush_ep(pdev, pep);
> +
> +	/* Remove all queued USB requests. */
> +	while (!list_empty(&pep->pending_list)) {
> +		preq = next_request(&pep->pending_list);
> +		cdnsp_ep_dequeue(pep, preq);
> +	}
> +
> +	cdnsp_invalidate_ep_events(pdev, pep);
> +
> +	pep->ep_state &= ~EP_DIS_IN_RROGRESS;
> +	drop_flag = cdnsp_get_endpoint_flag(pep->endpoint.desc);
> +	ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
> +	ctrl_ctx->drop_flags = cpu_to_le32(drop_flag);
> +	ctrl_ctx->add_flags = 0;
> +
> +	cdnsp_endpoint_zero(pdev, pep);
> +
> +	ret = cdnsp_update_eps_configuration(pdev, pep);
> +	cdnsp_free_endpoint_rings(pdev, pep);
> +
> +	pep->ep_state &= ~EP_ENABLED;
> +	pep->ep_state |= EP_STOPPED;
> +
> +finish:
> +	spin_unlock_irqrestore(&pdev->lock, flags);
> +
> +	return ret;
> +}
> +
> +static struct usb_request *cdnsp_gadget_ep_alloc_request(struct usb_ep *ep,
> +							 gfp_t gfp_flags)
> +{
> +	struct cdnsp_ep *pep = to_cdnsp_ep(ep);
> +	struct cdnsp_request *preq;
> +
> +	preq = kzalloc(sizeof(*preq), gfp_flags);
> +	if (!preq)
> +		return NULL;
> +
> +	preq->epnum = pep->number;
> +	preq->pep = pep;
> +
> +	return &preq->request;
> +}
> +
> +static void cdnsp_gadget_ep_free_request(struct usb_ep *ep,
> +					 struct usb_request *request)
> +{
> +	struct cdnsp_request *preq = to_cdnsp_request(request);
> +
> +	kfree(preq);
> +}
> +
> +static int cdnsp_gadget_ep_queue(struct usb_ep *ep,
> +				 struct usb_request *request,
> +				 gfp_t gfp_flags)
> +{
> +	struct cdnsp_request *preq;
> +	struct cdnsp_device *pdev;
> +	struct cdnsp_ep *pep;
> +	unsigned long flags;
> +	int ret;
> +
> +	if (!request || !ep)
> +		return -EINVAL;
> +
> +	pep = to_cdnsp_ep(ep);
> +	pdev = pep->pdev;
> +
> +	if (!(pep->ep_state & EP_ENABLED)) {
> +		dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n",
> +			pep->name);
> +		return -EINVAL;
> +	}
> +
> +	preq = to_cdnsp_request(request);
> +	spin_lock_irqsave(&pdev->lock, flags);
> +	ret = cdnsp_ep_enqueue(pep, preq);
> +	spin_unlock_irqrestore(&pdev->lock, flags);
> +
> +	return ret;
> +}
> +
> +static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
> +				   struct usb_request *request)
> +{
> +	struct cdnsp_ep *pep = to_cdnsp_ep(ep);
> +	struct cdnsp_device *pdev = pep->pdev;
> +	unsigned long flags;
> +	int ret;
> +
> +	if (!pep->endpoint.desc) {
> +		dev_err(pdev->dev,
> +			"%s: can't dequeue to disabled endpoint\n",
> +			pep->name);
> +		return -ESHUTDOWN;
> +	}
> +
> +	spin_lock_irqsave(&pdev->lock, flags);
> +	ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request));
> +	spin_unlock_irqrestore(&pdev->lock, flags);
> +
> +	return ret;
> +}
> +
> +static int cdnsp_gadget_ep_set_halt(struct usb_ep *ep, int value)
> +{
> +	struct cdnsp_ep *pep = to_cdnsp_ep(ep);
> +	struct cdnsp_device *pdev = pep->pdev;
> +	struct cdnsp_request *preq;
> +	unsigned long flags = 0;
> +	int ret;
> +
> +	spin_lock_irqsave(&pdev->lock, flags);
> +
> +	preq = next_request(&pep->pending_list);
> +	if (value) {
> +		if (preq) {
> +			ret = -EAGAIN;
> +			goto done;
> +		}
> +	}
> +
> +	ret = cdnsp_halt_endpoint(pdev, pep, value);
> +
> +done:
> +	spin_unlock_irqrestore(&pdev->lock, flags);
> +	return ret;
> +}
> +
> +static int cdnsp_gadget_ep_set_wedge(struct usb_ep *ep)
> +{
> +	struct cdnsp_ep *pep = to_cdnsp_ep(ep);
> +	struct cdnsp_device *pdev = pep->pdev;
> +	unsigned long flags = 0;
> +	int ret;
> +
> +	spin_lock_irqsave(&pdev->lock, flags);
> +	pep->ep_state |= EP_WEDGE;
> +	ret = cdnsp_halt_endpoint(pdev, pep, 1);
> +	spin_unlock_irqrestore(&pdev->lock, flags);
> +
> +	return ret;
> +}
> +
> +static const struct usb_ep_ops cdnsp_gadget_ep0_ops = {
> +	.enable		= cdnsp_gadget_ep_enable,
> +	.disable	= cdnsp_gadget_ep_disable,
> +	.alloc_request	= cdnsp_gadget_ep_alloc_request,
> +	.free_request	= cdnsp_gadget_ep_free_request,
> +	.queue		= cdnsp_gadget_ep_queue,
> +	.dequeue	= cdnsp_gadget_ep_dequeue,
> +	.set_halt	= cdnsp_gadget_ep_set_halt,
> +	.set_wedge	= cdnsp_gadget_ep_set_wedge,
> +};
> +
> +static const struct usb_ep_ops cdnsp_gadget_ep_ops = {
> +	.enable		= cdnsp_gadget_ep_enable,
> +	.disable	= cdnsp_gadget_ep_disable,
> +	.alloc_request	= cdnsp_gadget_ep_alloc_request,
> +	.free_request	= cdnsp_gadget_ep_free_request,
> +	.queue		= cdnsp_gadget_ep_queue,
> +	.dequeue	= cdnsp_gadget_ep_dequeue,
> +	.set_halt	= cdnsp_gadget_ep_set_halt,
> +	.set_wedge	= cdnsp_gadget_ep_set_wedge,
> +};
> +
> +void cdnsp_gadget_giveback(struct cdnsp_ep *pep,
> +			   struct cdnsp_request *preq,
> +			   int status)
> +{
> +	struct cdnsp_device *pdev = pep->pdev;
> +
> +	list_del(&preq->list);
> +
> +	if (preq->request.status == -EINPROGRESS)
> +		preq->request.status = status;
> +
> +	usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
> +					preq->direction);
> +
> +	if (preq != &pdev->ep0_preq) {
> +		spin_unlock(&pdev->lock);
> +		usb_gadget_giveback_request(&pep->endpoint, &preq->request);
> +		spin_lock(&pdev->lock);
> +	}
> +}
> +
> +static struct usb_endpoint_descriptor cdnsp_gadget_ep0_desc = {
> +	.bLength =		USB_DT_ENDPOINT_SIZE,
> +	.bDescriptorType =	USB_DT_ENDPOINT,
> +	.bmAttributes =		USB_ENDPOINT_XFER_CONTROL,
> +};
> +
> +static int cdnsp_run(struct cdnsp_device *pdev,
> +		     enum usb_device_speed speed)
> +{
> +	u32 fs_speed = 0;
> +	u64 temp_64;
> +	u32 temp;
> +	int ret;
> +
> +	temp_64 = cdnsp_read_64(pdev, &pdev->ir_set->erst_dequeue);
> +	temp_64 &= ~ERST_PTR_MASK;
> +	temp = readl(&pdev->ir_set->irq_control);
> +	temp &= ~IMOD_INTERVAL_MASK;
> +	temp |= ((IMOD_DEFAULT_INTERVAL / 250) & IMOD_INTERVAL_MASK);
> +	writel(temp, &pdev->ir_set->irq_control);
> +
> +	temp = readl(&pdev->port3x_regs->mode_addr);
> +
> +	switch (speed) {
> +	case USB_SPEED_SUPER_PLUS:
> +		temp |= CFG_3XPORT_SSP_SUPPORT;
> +		break;
> +	case USB_SPEED_SUPER:
> +		temp &= ~CFG_3XPORT_SSP_SUPPORT;
> +		break;
> +	case USB_SPEED_HIGH:
> +		break;
> +	case USB_SPEED_FULL:
> +		fs_speed = PORT_REG6_FORCE_FS;
> +		break;
> +	default:
> +		dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
> +			speed);
> +		fallthrough;
> +	case USB_SPEED_UNKNOWN:
> +		/* Default to superspeed. */
> +		speed = USB_SPEED_SUPER;
> +		break;
> +	}
> +
> +	if (speed >= USB_SPEED_SUPER) {
> +		writel(temp, &pdev->port3x_regs->mode_addr);
> +		cdnsp_set_link_state(pdev, &pdev->usb3_port.regs->portsc,
> +				     XDEV_RXDETECT);
> +	} else {
> +		cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
> +	}
> +
> +	cdnsp_set_link_state(pdev, &pdev->usb2_port.regs->portsc,
> +			     XDEV_RXDETECT);
> +
> +	cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
> +
> +	writel(PORT_REG6_L1_L0_HW_EN | fs_speed, &pdev->port20_regs->port_reg6);
> +
> +	ret = cdnsp_start(pdev);
> +	if (ret) {
> +		ret = -ENODEV;
> +		goto err;
> +	}
> +
> +	temp = readl(&pdev->op_regs->command);
> +	temp |= (CMD_INTE);
> +	writel(temp, &pdev->op_regs->command);
> +
> +	temp = readl(&pdev->ir_set->irq_pending);
> +	writel(IMAN_IE_SET(temp), &pdev->ir_set->irq_pending);
> +
> +	return 0;
> +err:
> +	cdnsp_halt(pdev);
> +	return ret;
> +}
> +
> +static int cdnsp_gadget_udc_start(struct usb_gadget *g,
> +				  struct usb_gadget_driver *driver)
> +{
> +	enum usb_device_speed max_speed = driver->max_speed;
> +	struct cdnsp_device *pdev = gadget_to_cdnsp(g);
> +	unsigned long flags;
> +	int ret;
> +
> +	spin_lock_irqsave(&pdev->lock, flags);
> +	pdev->gadget_driver = driver;
> +
> +	/* limit speed if necessary */
> +	max_speed = min(driver->max_speed, g->max_speed);
> +	ret = cdnsp_run(pdev, max_speed);
> +
> +	spin_unlock_irqrestore(&pdev->lock, flags);
> +
> +	return ret;
> +}
> +
> +/*
> + * Update Event Ring Dequeue Pointer:
> + * - When all events have finished
> + * - To avoid "Event Ring Full Error" condition
> + */
> +void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev,
> +			       union cdnsp_trb *event_ring_deq,
> +			       u8 clear_ehb)
> +{
> +	u64 temp_64;
> +	dma_addr_t deq;
> +
> +	temp_64 = cdnsp_read_64(pdev, &pdev->ir_set->erst_dequeue);
> +
> +	/* If necessary, update the HW's version of the event ring deq ptr. */
> +	if (event_ring_deq != pdev->event_ring->dequeue) {
> +		deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
> +					    pdev->event_ring->dequeue);
> +		temp_64 &= ERST_PTR_MASK;
> +		temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK);
> +	}
> +
> +	/* Clear the event handler busy flag (RW1C). */
> +	if (clear_ehb)
> +		temp_64 |= ERST_EHB;
> +	else
> +		temp_64 &= ~ERST_EHB;
> +
> +	cdnsp_write_64(pdev, temp_64, &pdev->ir_set->erst_dequeue);
> +}
> +
> +static void cdnsp_clear_cmd_ring(struct cdnsp_device *pdev)
> +{
> +	struct cdnsp_segment *seg;
> +	u64 val_64;
> +	int i;
> +
> +	cdnsp_initialize_ring_info(pdev->cmd_ring);
> +
> +	seg = pdev->cmd_ring->first_seg;
> +	for (i = 0; i < pdev->cmd_ring->num_segs; i++) {
> +		memset(seg->trbs, 0,
> +		       sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1));
> +		seg = seg->next;
> +	}
> +
> +	/* Set the address in the Command Ring Control register. */
> +	val_64 = cdnsp_read_64(pdev, &pdev->op_regs->cmd_ring);
> +	val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
> +		 (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
> +		 pdev->cmd_ring->cycle_state;
> +	cdnsp_write_64(pdev, val_64, &pdev->op_regs->cmd_ring);
> +}
> +
> +static void cdnsp_consume_all_events(struct cdnsp_device *pdev)
> +{
> +	struct cdnsp_segment *event_deq_seg;
> +	union cdnsp_trb *event_ring_deq;
> +	union cdnsp_trb *event;
> +	u32 cycle_bit;
> +
> +	event_ring_deq = pdev->event_ring->dequeue;
> +	event_deq_seg = pdev->event_ring->deq_seg;
> +	event = pdev->event_ring->dequeue;
> +
> +	/* Update ring dequeue pointer. */
> +	while (1) {
> +		cycle_bit = (le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE);
> +
> +		/* Does the controller or driver own the TRB? */
> +		if (cycle_bit != pdev->event_ring->cycle_state)
> +			break;
> +
> +		cdnsp_inc_deq(pdev, pdev->event_ring);
> +
> +		if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
> +			event++;
> +			continue;
> +		}
> +
> +		if (cdnsp_last_trb_on_ring(pdev->event_ring, event_deq_seg,
> +					   event))
> +			cycle_bit ^= 1;
> +
> +		event_deq_seg = event_deq_seg->next;
> +		event = event_deq_seg->trbs;
> +	}
> +
> +	cdnsp_update_erst_dequeue(pdev,  event_ring_deq, 1);
> +}
> +
> +static void cdnsp_stop(struct cdnsp_device *pdev)
> +{
> +	u32 temp;
> +
> +	cdnsp_cmd_flush_ep(pdev, &pdev->eps[0]);
> +
> +	/* Remove internally queued request for ep0. */
> +	if (!list_empty(&pdev->eps[0].pending_list)) {
> +		struct cdnsp_request *req;
> +
> +		req = next_request(&pdev->eps[0].pending_list);
> +		if (req == &pdev->ep0_preq)
> +			cdnsp_ep_dequeue(&pdev->eps[0], req);
> +	}
> +
> +	cdnsp_disable_port(pdev, &pdev->usb2_port.regs->portsc);
> +	cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
> +	cdnsp_disable_slot(pdev);
> +	cdnsp_halt(pdev);
> +
> +	temp = readl(&pdev->op_regs->status);
> +	writel((temp & ~0x1fff) | STS_EINT, &pdev->op_regs->status);
> +	temp = readl(&pdev->ir_set->irq_pending);
> +	writel(IMAN_IE_CLEAR(temp), &pdev->ir_set->irq_pending);
> +
> +	cdnsp_clear_port_change_bit(pdev, &pdev->usb2_port.regs->portsc);
> +	cdnsp_clear_port_change_bit(pdev, &pdev->usb3_port.regs->portsc);
> +
> +	/*Clear interrupt line */
> +	temp = readl(&pdev->ir_set->irq_pending);
> +	temp |= IMAN_IP;
> +	writel(temp, &pdev->ir_set->irq_pending);
> +
> +	cdnsp_consume_all_events(pdev);
> +	cdnsp_clear_cmd_ring(pdev);
> +}
> +
> +/*
> + * Stop controller.
> + * This function is called by the gadget core when the driver is removed.
> + * Disable slot, disable IRQs, and quiesce the controller.
> + */
> +static int cdnsp_gadget_udc_stop(struct usb_gadget *g)
> +{
> +	struct cdnsp_device *pdev = gadget_to_cdnsp(g);
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&pdev->lock, flags);
> +	cdnsp_stop(pdev);
> +	pdev->gadget_driver = NULL;
> +	spin_unlock_irqrestore(&pdev->lock, flags);
> +
> +	return 0;
> +}
> +
> +static int cdnsp_gadget_get_frame(struct usb_gadget *g)
> +{
> +	struct cdnsp_device *pdev = gadget_to_cdnsp(g);
> +
> +	return cdnsp_get_frame(pdev);
> +}
> +
> +static void __cdnsp_gadget_wakeup(struct cdnsp_device *pdev)
> +{
> +	struct cdnsp_port_regs __iomem *port_regs;
> +	u32 portpm, portsc;
> +
> +	port_regs = pdev->active_port->regs;
> +	portsc = readl(&port_regs->portsc) & PORT_PLS_MASK;
> +
> +	/* Remote wakeup feature is not enabled by host. */
> +	if (pdev->gadget.speed < USB_SPEED_SUPER && portsc == XDEV_U2) {
> +		portpm = readl(&port_regs->portpmsc);
> +
> +		if (!(portpm & PORT_RWE))
> +			return;
> +	}
> +
> +	if (portsc == XDEV_U3 && !pdev->may_wakeup)
> +		return;
> +
> +	cdnsp_set_link_state(pdev, &port_regs->portsc, XDEV_U0);
> +
> +	pdev->cdnsp_state |= CDNSP_WAKEUP_PENDING;
> +}
> +
> +static int cdnsp_gadget_wakeup(struct usb_gadget *g)
> +{
> +	struct cdnsp_device *pdev = gadget_to_cdnsp(g);
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&pdev->lock, flags);
> +	__cdnsp_gadget_wakeup(pdev);
> +	spin_unlock_irqrestore(&pdev->lock, flags);
> +
> +	return 0;
> +}
> +
> +static int cdnsp_gadget_set_selfpowered(struct usb_gadget *g,
> +					int is_selfpowered)
> +{
> +	struct cdnsp_device *pdev = gadget_to_cdnsp(g);
> +	unsigned long flags;
> +
> +	spin_lock_irqsave(&pdev->lock, flags);
> +	g->is_selfpowered = !!is_selfpowered;
> +	spin_unlock_irqrestore(&pdev->lock, flags);
> +
> +	return 0;
> +}
> +
> +static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on)
> +{
> +	struct cdnsp_device *pdev = gadget_to_cdnsp(gadget);
> +	struct cdns *cdns = dev_get_drvdata(pdev->dev);
> +
> +	if (!is_on) {
> +		cdnsp_reset_device(pdev);
> +		cdns_clear_vbus(cdns);
> +	} else {
> +		cdns_set_vbus(cdns);
> +	}
> +	return 0;
> +}
> +
> +const struct usb_gadget_ops cdnsp_gadget_ops = {
> +	.get_frame		= cdnsp_gadget_get_frame,
> +	.wakeup			= cdnsp_gadget_wakeup,
> +	.set_selfpowered	= cdnsp_gadget_set_selfpowered,
> +	.pullup			= cdnsp_gadget_pullup,
> +	.udc_start		= cdnsp_gadget_udc_start,
> +	.udc_stop		= cdnsp_gadget_udc_stop,
> +};
> +
> +static void cdnsp_get_ep_buffering(struct cdnsp_device *pdev,
> +				   struct cdnsp_ep *pep)
> +{
> +	void __iomem *reg = &pdev->cap_regs->hc_capbase;
> +	int endpoints;
> +
> +	reg += cdnsp_find_next_ext_cap(reg, 0, XBUF_CAP_ID);
> +
> +	if (!pep->direction) {
> +		pep->buffering = readl(reg + XBUF_RX_TAG_MASK_0_OFFSET);
> +		pep->buffering_period = readl(reg + XBUF_RX_TAG_MASK_1_OFFSET);
> +		pep->buffering = (pep->buffering + 1) / 2;
> +		pep->buffering_period = (pep->buffering_period + 1) / 2;
> +		return;
> +	}
> +
> +	endpoints = HCS_ENDPOINTS(readl(&pdev->hcs_params1)) / 2;
> +
> +	/* Set to XBUF_TX_TAG_MASK_0 register. */
> +	reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32);
> +	/* Set reg to XBUF_TX_TAG_MASK_N related with this endpoint. */
> +	reg += pep->number * sizeof(u32) * 2;
> +
> +	pep->buffering = (readl(reg) + 1) / 2;
> +	pep->buffering_period = pep->buffering;
> +}
> +
> +static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev)
> +{
> +	int max_streams = HCC_MAX_PSA(pdev->hcc_params);
> +	struct cdnsp_ep *pep;
> +	int i;
> +
> +	INIT_LIST_HEAD(&pdev->gadget.ep_list);
> +
> +	if (max_streams < STREAM_LOG_STREAMS) {
> +		dev_err(pdev->dev, "Stream size %d not supported\n",
> +			max_streams);
> +		return -EINVAL;
> +	}
> +
> +	max_streams = STREAM_LOG_STREAMS;
> +
> +	for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
> +		bool direction = !(i & 1); /* Start from OUT endpoint. */
> +		u8 epnum = ((i + 1) >> 1);
> +
> +		if (!CDNSP_IF_EP_EXIST(pdev, epnum, direction))
> +			continue;
> +
> +		pep = &pdev->eps[i];
> +		pep->pdev = pdev;
> +		pep->number = epnum;
> +		pep->direction = direction; /* 0 for OUT, 1 for IN. */
> +
> +		/*
> +		 * Ep0 is bidirectional, so ep0in and ep0out are represented by
> +		 * pdev->eps[0]
> +		 */
> +		if (epnum == 0) {
> +			snprintf(pep->name, sizeof(pep->name), "ep%d%s",
> +				 epnum, "BiDir");
> +
> +			pep->idx = 0;
> +			usb_ep_set_maxpacket_limit(&pep->endpoint, 512);
> +			pep->endpoint.maxburst = 1;
> +			pep->endpoint.ops = &cdnsp_gadget_ep0_ops;
> +			pep->endpoint.desc = &cdnsp_gadget_ep0_desc;
> +			pep->endpoint.comp_desc = NULL;
> +			pep->endpoint.caps.type_control = true;
> +			pep->endpoint.caps.dir_in = true;
> +			pep->endpoint.caps.dir_out = true;
> +
> +			pdev->ep0_preq.epnum = pep->number;
> +			pdev->ep0_preq.pep = pep;
> +			pdev->gadget.ep0 = &pep->endpoint;
> +		} else {
> +			snprintf(pep->name, sizeof(pep->name), "ep%d%s",
> +				 epnum, (pep->direction) ? "in"  : "out");
there is two blank space after "in", please use checkpatch.pl with
--strict
> +
> +			pep->idx =  (epnum * 2 + (direction ? 1 : 0)) - 1;
> +			usb_ep_set_maxpacket_limit(&pep->endpoint, 1024);
> +
> +			pep->endpoint.max_streams = max_streams;
> +			pep->endpoint.ops = &cdnsp_gadget_ep_ops;
> +			list_add_tail(&pep->endpoint.ep_list,
> +				      &pdev->gadget.ep_list);
> +
> +			pep->endpoint.caps.type_iso = true;
> +			pep->endpoint.caps.type_bulk = true;
> +			pep->endpoint.caps.type_int = true;
> +
> +			pep->endpoint.caps.dir_in = direction;
> +			pep->endpoint.caps.dir_out = !direction;
> +		}
> +
> +		pep->endpoint.name = pep->name;
> +		pep->in_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, pep->idx);
> +		pep->out_ctx = cdnsp_get_ep_ctx(&pdev->out_ctx, pep->idx);
> +		cdnsp_get_ep_buffering(pdev, pep);
> +
> +		dev_dbg(pdev->dev, "Init %s, MPS: %04x SupType: "
> +			"CTRL: %s, INT: %s, BULK: %s, ISOC %s, "
> +			"SupDir IN: %s, OUT: %s\n",
> +			pep->name, 1024,
> +			(pep->endpoint.caps.type_control) ? "yes" : "no",
> +			(pep->endpoint.caps.type_int) ? "yes" : "no",
> +			(pep->endpoint.caps.type_bulk) ? "yes" : "no",
> +			(pep->endpoint.caps.type_iso) ? "yes" : "no",
> +			(pep->endpoint.caps.dir_in) ? "yes" : "no",
> +			(pep->endpoint.caps.dir_out) ? "yes" : "no");
> +
> +		INIT_LIST_HEAD(&pep->pending_list);
> +	}
> +
> +	return 0;
> +}
> +
> +static void cdnsp_gadget_free_endpoints(struct cdnsp_device *pdev)
> +{
> +	struct cdnsp_ep *pep;
> +	int i;
> +
> +	for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
> +		pep = &pdev->eps[i];
> +		if (pep->number != 0 && pep->out_ctx)
> +			list_del(&pep->endpoint.ep_list);
> +	}
> +}
> +
> +void cdnsp_disconnect_gadget(struct cdnsp_device *pdev)
> +{
> +	pdev->cdnsp_state |= CDNSP_STATE_DISCONNECT_PENDING;
> +
> +	if (pdev->gadget_driver && pdev->gadget_driver->disconnect) {
> +		spin_unlock(&pdev->lock);
> +		pdev->gadget_driver->disconnect(&pdev->gadget);
> +		spin_lock(&pdev->lock);
> +	}
> +
> +	pdev->gadget.speed = USB_SPEED_UNKNOWN;
> +	usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED);
> +
> +	pdev->cdnsp_state &= ~CDNSP_STATE_DISCONNECT_PENDING;
> +}
> +
> +void cdnsp_suspend_gadget(struct cdnsp_device *pdev)
> +{
> +	if (pdev->gadget_driver && pdev->gadget_driver->suspend) {
> +		spin_unlock(&pdev->lock);
> +		pdev->gadget_driver->suspend(&pdev->gadget);
> +		spin_lock(&pdev->lock);
> +	}
> +}
> +
> +void cdnsp_resume_gadget(struct cdnsp_device *pdev)
> +{
> +	if (pdev->gadget_driver && pdev->gadget_driver->resume) {
> +		spin_unlock(&pdev->lock);
> +		pdev->gadget_driver->resume(&pdev->gadget);
> +		spin_lock(&pdev->lock);
> +	}
> +}
> +
> +void cdnsp_irq_reset(struct cdnsp_device *pdev)
> +{
> +	struct cdnsp_port_regs __iomem *port_regs;
> +
> +	cdnsp_reset_device(pdev);
> +
> +	port_regs = pdev->active_port->regs;
> +	pdev->gadget.speed = cdnsp_port_speed(readl(port_regs));
> +
> +	spin_unlock(&pdev->lock);
> +	usb_gadget_udc_reset(&pdev->gadget, pdev->gadget_driver);
> +	spin_lock(&pdev->lock);
> +
> +	switch (pdev->gadget.speed) {
> +	case USB_SPEED_SUPER_PLUS:
> +	case USB_SPEED_SUPER:
> +		cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
> +		pdev->gadget.ep0->maxpacket = 512;
> +		break;
> +	case USB_SPEED_HIGH:
> +	case USB_SPEED_FULL:
> +		cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
> +		pdev->gadget.ep0->maxpacket = 64;
> +		break;
> +	default:
> +		/* Low speed is not supported. */
> +		dev_err(pdev->dev, "Unknown device speed\n");
> +		break;
> +	}
> +
> +	cdnsp_clear_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
> +	cdnsp_setup_device(pdev, SETUP_CONTEXT_ONLY);
> +	usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT);
> +}
> +
> +static void cdnsp_get_rev_cap(struct cdnsp_device *pdev)
> +{
> +	void __iomem *reg = &pdev->cap_regs->hc_capbase;
> +	struct cdnsp_rev_cap *rev_cap;
> +
> +	reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP);
> +	rev_cap = reg;
> +
> +	pdev->rev_cap.ctrl_revision = readl(&rev_cap->ctrl_revision);
> +	pdev->rev_cap.rtl_revision = readl(&rev_cap->rtl_revision);
> +	pdev->rev_cap.ep_supported = readl(&rev_cap->ep_supported);
> +	pdev->rev_cap.ext_cap = readl(&rev_cap->ext_cap);
> +	pdev->rev_cap.rx_buff_size = readl(&rev_cap->rx_buff_size);
> +	pdev->rev_cap.tx_buff_size = readl(&rev_cap->tx_buff_size);
> +
> +	dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n",
> +		 pdev->rev_cap.ctrl_revision, pdev->rev_cap.rtl_revision,
> +		 pdev->rev_cap.ep_supported, pdev->rev_cap.rx_buff_size,
> +		 pdev->rev_cap.tx_buff_size);
> +}
> +
> +static int cdnsp_gen_setup(struct cdnsp_device *pdev)
> +{
> +	int ret;
> +
> +	pdev->cap_regs = pdev->regs;
> +	pdev->op_regs = pdev->regs +
> +		HC_LENGTH(readl(&pdev->cap_regs->hc_capbase));
> +	pdev->run_regs = pdev->regs +
> +		(readl(&pdev->cap_regs->run_regs_off) & RTSOFF_MASK);
> +
> +	/* Cache read-only capability registers */
> +	pdev->hcs_params1 = readl(&pdev->cap_regs->hcs_params1);
> +	pdev->hcc_params = readl(&pdev->cap_regs->hc_capbase);
> +	pdev->hci_version = HC_VERSION(pdev->hcc_params);
> +	pdev->hcc_params = readl(&pdev->cap_regs->hcc_params);
> +
> +	cdnsp_get_rev_cap(pdev);
> +
> +	/* Make sure the Device Controller is halted. */
> +	ret = cdnsp_halt(pdev);
> +	if (ret)
> +		return ret;
> +
> +	/* Reset the internal controller memory state and registers. */
> +	ret = cdnsp_reset(pdev);
> +	if (ret)
> +		return ret;
> +
> +	/*
> +	 * Set dma_mask and coherent_dma_mask to 64-bits,
> +	 * if controller supports 64-bit addressing.
> +	 */
> +	if (HCC_64BIT_ADDR(pdev->hcc_params) &&
> +	    !dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) {
> +		dev_dbg(pdev->dev, "Enabling 64-bit DMA addresses.\n");
> +		dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64));
> +	} else {
> +		/*
> +		 * This is to avoid error in cases where a 32-bit USB
> +		 * controller is used on a 64-bit capable system.
> +		 */
> +		ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32));
> +		if (ret)
> +			return ret;
> +		dev_dbg(pdev->dev, "Enabling 32-bit DMA addresses.\n");
> +		dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(32));
> +	}
> +
> +	spin_lock_init(&pdev->lock);
> +
> +	ret = cdnsp_mem_init(pdev, GFP_KERNEL);
> +	if (ret)
> +		return ret;
> +
> +	return 0;
> +}
> +
> +static int __cdnsp_gadget_init(struct cdns *cdns)
> +{
> +	struct cdnsp_device *pdev;
> +	u32 max_speed;
> +	int ret = -ENOMEM;
> +
> +	cdns_drd_gadget_on(cdns);
> +
> +	pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
> +	if (!pdev)
> +		return -ENOMEM;
> +
> +	pm_runtime_get_sync(cdns->dev);
> +
> +	cdns->gadget_dev = pdev;
> +	pdev->dev = cdns->dev;
> +	pdev->regs = cdns->dev_regs;
> +	max_speed = usb_get_maximum_speed(cdns->dev);
> +
> +	switch (max_speed) {
> +	case USB_SPEED_FULL:
> +	case USB_SPEED_HIGH:
> +	case USB_SPEED_SUPER:
> +	case USB_SPEED_SUPER_PLUS:
> +		break;
> +	default:
> +		dev_err(cdns->dev, "invalid speed parameter %d\n", max_speed);
> +		fallthrough;
> +	case USB_SPEED_UNKNOWN:
> +		/* Default to SSP */
> +		max_speed = USB_SPEED_SUPER_PLUS;
> +		break;
> +	}
> +
> +	pdev->gadget.ops = &cdnsp_gadget_ops;
> +	pdev->gadget.name = "cdnsp-gadget";
> +	pdev->gadget.speed = USB_SPEED_UNKNOWN;
> +	pdev->gadget.sg_supported = 1;
> +	pdev->gadget.max_speed = USB_SPEED_SUPER_PLUS;
> +	pdev->gadget.lpm_capable = 1;
> +
> +	pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL);
> +	if (!pdev->setup_buf)
> +		goto free_pdev;
> +
> +	/*
> +	 * Controller supports not aligned buffer but it should improve
> +	 * performance.
> +	 */
> +	pdev->gadget.quirk_ep_out_aligned_size = true;
> +
> +	ret = cdnsp_gen_setup(pdev);
> +	if (ret) {
> +		dev_err(pdev->dev, "Generic initialization failed %d\n", ret);
> +		goto free_setup;
> +	}
> +
> +	ret = cdnsp_gadget_init_endpoints(pdev);
> +	if (ret) {
> +		dev_err(pdev->dev, "failed to initialize endpoints\n");
> +		goto halt_pdev;
> +	}
> +
> +	ret = usb_add_gadget_udc(pdev->dev, &pdev->gadget);
> +	if (ret) {
> +		dev_err(pdev->dev, "failed to register udc\n");
> +		goto free_endpoints;
> +	}
> +
> +	ret = devm_request_threaded_irq(pdev->dev, cdns->dev_irq,
> +					cdnsp_irq_handler,
> +					cdnsp_thread_irq_handler, IRQF_SHARED,
> +					dev_name(pdev->dev), pdev);
> +	if (ret)
> +		goto del_gadget;
> +
> +	return 0;
> +
> +del_gadget:
> +	usb_del_gadget_udc(&pdev->gadget);
> +free_endpoints:
> +	cdnsp_gadget_free_endpoints(pdev);
> +halt_pdev:
> +	cdnsp_halt(pdev);
> +	cdnsp_reset(pdev);
> +	cdnsp_mem_cleanup(pdev);
> +free_setup:
> +	kfree(pdev->setup_buf);
> +free_pdev:
> +	kfree(pdev);
> +
> +	return ret;
> +}
> +
> +static void cdnsp_gadget_exit(struct cdns *cdns)
> +{
> +	struct cdnsp_device *pdev = cdns->gadget_dev;
> +
> +	devm_free_irq(pdev->dev, cdns->dev_irq, pdev);
> +	pm_runtime_mark_last_busy(cdns->dev);
> +	pm_runtime_put_autosuspend(cdns->dev);
> +	usb_del_gadget_udc(&pdev->gadget);
> +	cdnsp_gadget_free_endpoints(pdev);
> +	cdnsp_mem_cleanup(pdev);
> +	kfree(pdev);
> +	cdns->gadget_dev = NULL;
> +	cdns_drd_gadget_off(cdns);
> +}
> +
> +static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup)
> +{
> +	struct cdnsp_device *pdev = cdns->gadget_dev;
> +	unsigned long flags;
> +
> +	if (pdev->link_state == XDEV_U3)
> +		return 0;
> +
> +	spin_lock_irqsave(&pdev->lock, flags);
> +	cdnsp_disconnect_gadget(pdev);
> +	cdnsp_stop(pdev);
> +	spin_unlock_irqrestore(&pdev->lock, flags);
> +
> +	return 0;
> +}
> +
> +static int cdnsp_gadget_resume(struct cdns *cdns, bool hibernated)
> +{
> +	struct cdnsp_device *pdev = cdns->gadget_dev;
> +	enum usb_device_speed max_speed;
> +	unsigned long flags;
> +	int ret;
> +
> +	if (!pdev->gadget_driver)
> +		return 0;
> +
> +	spin_lock_irqsave(&pdev->lock, flags);
> +	max_speed = pdev->gadget_driver->max_speed;
> +
> +	/* Limit speed if necessary. */
> +	max_speed = min(max_speed, pdev->gadget.max_speed);
> +
> +	ret = cdnsp_run(pdev, max_speed);
> +
> +	if (pdev->link_state == XDEV_U3)
> +		__cdnsp_gadget_wakeup(pdev);
> +
> +	spin_unlock_irqrestore(&pdev->lock, flags);
> +
> +	return ret;
> +}
> +
> +/**
> + * cdnsp_gadget_init - initialize device structure
> + * @cdns: cdnsp instance
> + *
> + * This function initializes the gadget.
> + */
> +int cdnsp_gadget_init(struct cdns *cdns)
> +{
> +	struct cdns_role_driver *rdrv;
> +
> +	rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
> +	if (!rdrv)
> +		return -ENOMEM;
> +
> +	rdrv->start	= __cdnsp_gadget_init;
> +	rdrv->stop	= cdnsp_gadget_exit;
> +	rdrv->suspend	= cdnsp_gadget_suspend;
> +	rdrv->resume	= cdnsp_gadget_resume;
> +	rdrv->state	= CDNS_ROLE_STATE_INACTIVE;
> +	rdrv->name	= "gadget";
> +	cdns->roles[USB_ROLE_DEVICE] = rdrv;
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(cdnsp_gadget_init);
> diff --git a/drivers/usb/cdnsp/gadget.h b/drivers/usb/cdnsp/gadget.h
> index bfc4196c3b10..547516681fbe 100644
> --- a/drivers/usb/cdnsp/gadget.h
> +++ b/drivers/usb/cdnsp/gadget.h
> @@ -1456,4 +1456,143 @@ struct cdnsp_device {
>  	u16 test_mode;
>  };
>  




[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux