[PATCH 4/4 v5] xHCI: PCI power management implementation

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



>From 9ed8f6d0ecdfdbc983fdcae9fa8e21c3094cc354 Mon Sep 17 00:00:00 2001
From: Andiry Xu <andiry.xu@xxxxxxx>
Date: Mon, 26 Jul 2010 15:34:28 +0800
Subject: [PATCH 4/4] xHCI: PCI power management implementation

This patch implements the PCI suspend/resume.

Please refer to xHCI spec for doing the suspend/resume operation.

For S3, CSS/SRS in USBCMD is used to save/restore the internal state.
However, an error maybe occurs while restoring the internal state.
In this case, it means that HC internal state is wrong and HC reset is
needed.

Signed-off-by: Libin Yang <libin.yang@xxxxxxx>
Signed-off-by: Dong Nguyen <dong.nguyen@xxxxxxx>
Signed-off-by: Andiry Xu <andiry.xu@xxxxxxx>
---
 drivers/usb/host/xhci-mem.c |   75 +++++++++++++++
 drivers/usb/host/xhci-pci.c |   34 +++++++-
 drivers/usb/host/xhci.c     |  212 +++++++++++++++++++++++++++++++++++++++++++
 drivers/usb/host/xhci.h     |   19 ++++-
 4 files changed, 338 insertions(+), 2 deletions(-)

diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 57128b1..aecad80 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -199,6 +199,81 @@ fail:
 	return NULL;
 }
 
+/*
+ * initialize command ring
+ */
+void xhci_cmd_ring_init(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+	u64	val_64;
+	struct xhci_segment	*seg = ring->first_seg;
+
+	/* reset cmd_ring trbs */
+	memset(seg->trbs, 0, sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
+	/* reset enqueue and dequeue pointer */
+	xhci_link_segments(xhci, seg, seg->next, 1);
+	xhci_initialize_ring_info(ring);
+	INIT_LIST_HEAD(&ring->td_list);
+
+	/* set hw cmd_ring register match with internal cmd ringi buffer */
+	val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+	val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
+			  (seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
+			  ring->cycle_state;
+
+	xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
+			 (long unsigned long) val_64);
+	xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
+}
+
+/*
+ * initialize event ring
+ */
+void xhci_event_ring_init(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+	unsigned int val;
+	u64	val_64;
+	struct xhci_segment	*seg = ring->first_seg;
+
+	do {
+		memset(seg->trbs, 0,
+				sizeof(union xhci_trb)*TRBS_PER_SEGMENT);
+		xhci_link_segments(xhci, seg, seg->next, 0);
+		seg = seg->next;
+	} while (seg != ring->first_seg);
+	xhci_initialize_ring_info(ring);
+	INIT_LIST_HEAD(&ring->td_list);
+
+	memset(xhci->erst.entries, 0,
+			sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS);
+	xhci->erst.num_entries = ERST_NUM_SEGS;
+
+	/* set ring base address and size for each segment table entry */
+	for (val = 0, seg = ring->first_seg; val < ERST_NUM_SEGS; val++) {
+		struct xhci_erst_entry *entry = &xhci->erst.entries[val];
+		entry->seg_addr = seg->dma;
+		entry->seg_size = TRBS_PER_SEGMENT;
+		entry->rsvd = 0;
+		seg = seg->next;
+	}
+
+	/* set ERST count with the number of entries in the segment table */
+	val = xhci_readl(xhci, &xhci->ir_set->erst_size);
+	val &= ERST_SIZE_MASK;
+	val |= ERST_NUM_SEGS;
+	xhci_writel(xhci, val, &xhci->ir_set->erst_size);
+
+	/* set the segment table base address */
+	xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n",
+		(unsigned long long)xhci->erst.erst_dma_addr);
+	val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
+	val_64 &= ERST_PTR_MASK;
+	val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
+	xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
+
+	/* Set the event ring dequeue address */
+	xhci_set_hc_event_deq(xhci);
+}
+
 void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
 		struct xhci_virt_device *virt_dev,
 		unsigned int ep_index)
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index b0e6120..12bdc29 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -107,6 +107,30 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
 	return xhci_pci_reinit(xhci, pdev);
 }
 
+#ifdef CONFIG_PM
+static int xhci_pci_suspend(struct usb_hcd *hcd)
+{
+	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	int	retval = 0;
+
+	if (hcd->state != HC_STATE_SUSPENDED)
+		return -EINVAL;
+
+	retval = xhci_suspend(xhci);
+
+	return retval;
+}
+
+static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
+{
+	struct xhci_hcd		*xhci = hcd_to_xhci(hcd);
+	int			retval = 0;
+
+	retval = xhci_resume(xhci, hibernated);
+	return retval;
+}
+#endif /* CONFIG_PM */
+
 static const struct hc_driver xhci_pci_hc_driver = {
 	.description =		hcd_name,
 	.product_desc =		"xHCI Host Controller",
@@ -123,7 +147,10 @@ static const struct hc_driver xhci_pci_hc_driver = {
 	 */
 	.reset =		xhci_pci_setup,
 	.start =		xhci_run,
-	/* suspend and resume implemented later */
+#ifdef CONFIG_PM
+	.pci_suspend =          xhci_pci_suspend,
+	.pci_resume =           xhci_pci_resume,
+#endif
 	.stop =			xhci_stop,
 	.shutdown =		xhci_shutdown,
 
@@ -179,6 +206,11 @@ static struct pci_driver xhci_pci_driver = {
 	/* suspend and resume implemented later */
 
 	.shutdown = 	usb_hcd_pci_shutdown,
+#ifdef CONFIG_PM_SLEEP
+	.driver = {
+		.pm = &usb_hcd_pci_pm_ops
+	},
+#endif
 };
 
 int xhci_register_pci(void)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 93c020d..b3cb815 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -20,6 +20,7 @@
  * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#include <linux/pci.h>
 #include <linux/irq.h>
 #include <linux/log2.h>
 #include <linux/module.h>
@@ -570,6 +571,217 @@ void xhci_shutdown(struct usb_hcd *hcd)
 		    xhci_readl(xhci, &xhci->op_regs->status));
 }
 
+static void xhci_save_registers(struct xhci_hcd *xhci)
+{
+	xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
+	xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
+	xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
+	xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
+	xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+	xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
+	xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
+	xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
+	xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+}
+
+static void xhci_restore_registers(struct xhci_hcd *xhci)
+{
+	xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
+	xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
+	xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
+	xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
+	xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
+	xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
+	xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
+	xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
+}
+
+/*
+ * Stop HC (not bus-specific)
+ *
+ * This is called when the machine transition into S3/S4 mode.
+ *
+ */
+int xhci_suspend(struct xhci_hcd *xhci)
+{
+	int			rc = 0;
+	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
+	u32			command;
+
+	spin_lock_irq(&xhci->lock);
+	clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+	/* step 1: stop endpoint */
+	/* skipped assuming that port suspend has done */
+
+	/* step 2: clear Run/Stop bit */
+	command = xhci_readl(xhci, &xhci->op_regs->command);
+	command &= ~CMD_RUN;
+	xhci_writel(xhci, command, &xhci->op_regs->command);
+	if (handshake(xhci, &xhci->op_regs->status,
+		      STS_HALT, STS_HALT, 100*100)) {
+		xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
+		spin_unlock_irq(&xhci->lock);
+		return -ETIMEDOUT;
+	}
+
+	/* step 3: save registers */
+	xhci_save_registers(xhci);
+
+	/* step 4: set CSS flag */
+	command = xhci_readl(xhci, &xhci->op_regs->command);
+	command |= CMD_CSS;
+	xhci_writel(xhci, command, &xhci->op_regs->command);
+	if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
+		xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
+		spin_unlock_irq(&xhci->lock);
+		return -ETIMEDOUT;
+	}
+	/* step 5: remove core well power */
+	spin_unlock_irq(&xhci->lock);
+
+	return rc;
+}
+
+/*
+ * start xHC (not bus-specific)
+ *
+ * This is called when the machine transition from S3/S4 mode.
+ *
+ */
+int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
+{
+	u32			command, temp = 0;
+	struct usb_hcd		*hcd = xhci_to_hcd(xhci);
+	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
+	u64	val_64;
+	int	old_state, retval;
+	unsigned int	val, val2;
+	int	i;
+
+	old_state = hcd->state;
+	if (time_before(jiffies, xhci->next_statechange))
+		msleep(100);
+
+	spin_lock_irq(&xhci->lock);
+
+	if (!hibernated) {
+		/* step 1: restore register */
+		xhci_restore_registers(xhci);
+		xhci_set_hc_event_deq(xhci);
+		/* step 2: initialize command ring buffer */
+		val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
+		val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
+			 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
+					       xhci->cmd_ring->dequeue) &
+			 (u64) ~CMD_RING_RSVD_BITS) |
+			 xhci->cmd_ring->cycle_state;
+		xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
+				(long unsigned long) val_64);
+		xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
+		/* step 3: restore state and start state*/
+		/* step 3: set CRS flag */
+		command = xhci_readl(xhci, &xhci->op_regs->command);
+		command |= CMD_CRS;
+		xhci_writel(xhci, command, &xhci->op_regs->command);
+		if (handshake(xhci, &xhci->op_regs->status,
+			      STS_RESTORE, 0, 10*100)) {
+			xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
+			spin_unlock_irq(&xhci->lock);
+			return -ETIMEDOUT;
+		}
+		temp = xhci_readl(xhci, &xhci->op_regs->status);
+	}
+
+	/* If restore operation fails, reset the HC during resume */
+	if ((temp & STS_SRE) || hibernated) {
+		usb_root_hub_lost_power(hcd->self.root_hub);
+
+		retval = xhci_halt(xhci);
+		if (retval) {
+			spin_unlock_irq(&xhci->lock);
+			return retval;
+		}
+
+		xhci_dbg(xhci, "Resetting HCD\n");
+		/* Reset the internal HC memory state and registers. */
+		retval = xhci_reset(xhci);
+		if (retval) {
+			spin_unlock_irq(&xhci->lock);
+			return retval;
+		}
+
+		xhci_dbg(xhci, "Reset complete\n");
+
+		/*
+		 * Initialize HCD and host controller data structures.
+		 */
+		val = HCS_MAX_SLOTS(xhci_readl(xhci,
+					       &xhci->cap_regs->hcs_params1));
+		val2 = xhci_readl(xhci, &xhci->op_regs->config_reg);
+		val |= (val2 & ~HCS_SLOTS_MASK);
+		xhci_writel(xhci, val, &xhci->op_regs->config_reg);
+
+		xhci_write_64(xhci, xhci->dcbaa->dma,
+			      &xhci->op_regs->dcbaa_ptr);
+
+		xhci_cmd_ring_init(xhci, xhci->cmd_ring);
+
+		xhci_event_ring_init(xhci, xhci->event_ring);
+
+		for (i = 0; i < MAX_HC_PORTS; ++i)
+			xhci->resume_done[i] = 0;
+
+		for (i = 1; i < MAX_HC_SLOTS; ++i) {
+			if (xhci->devs[i])
+				xhci_free_virt_device(xhci, i);
+		}
+
+		if (!pci_set_mwi(pdev))
+			xhci_dbg(xhci, "MWI active\n");
+
+		hcd->uses_new_polling = 1;
+		hcd->poll_rh = 0;
+
+		temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
+		temp &= ~ER_IRQ_INTERVAL_MASK;
+		temp |= (u32) 160;
+		xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
+
+		temp = xhci_readl(xhci, &xhci->op_regs->command);
+		temp |= (CMD_EIE);
+		xhci_writel(xhci, temp, &xhci->op_regs->command);
+
+		temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
+		xhci_writel(xhci, ER_IRQ_ENABLE(temp),
+				&xhci->ir_set->irq_pending);
+	}
+
+	/* step 4: set Run/Stop bit */
+	command = xhci_readl(xhci, &xhci->op_regs->command);
+	command |= CMD_RUN;
+	xhci_writel(xhci, command, &xhci->op_regs->command);
+	handshake(xhci, &xhci->op_regs->status, STS_HALT,
+		  0, 250 * 1000);
+
+	/* step 5: walk topology and initialize portsc,
+	 * portpmsc and portli
+	 */
+	/* this is done in bus_resume */
+
+	/* step 6: restart each of the previously
+	 * Running endpoints by ringing their doorbells
+	 */
+
+	set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+	if (!hibernated)
+		hcd->state = old_state;
+	else
+		hcd->state = HC_STATE_SUSPENDED;
+
+	spin_unlock_irq(&xhci->lock);
+	return 0;
+}
+
 /*-------------------------------------------------------------------------*/
 
 /**
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 79497a2..070709a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -31,6 +31,7 @@
 /* Code sharing between pci-quirks and xhci hcd */
 #include	"xhci-ext-caps.h"
 
+#define HCD_FLAG_HW_ACCESSIBLE	0x00000001
 /* xHCI PCI Configuration Registers */
 #define XHCI_SBRN_OFFSET	(0x60)
 
@@ -191,7 +192,7 @@ struct xhci_op_regs {
 /* bits 4:6 are reserved (and should be preserved on writes). */
 /* light reset (port status stays unchanged) - reset completed when this is 0 */
 #define CMD_LRESET	(1 << 7)
-/* FIXME: ignoring host controller save/restore state for now. */
+/* host controller save/restore state. */
 #define CMD_CSS		(1 << 8)
 #define CMD_CRS		(1 << 9)
 /* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
@@ -1110,6 +1111,17 @@ struct xhci_scratchpad {
 #define XHCI_STOP_EP_CMD_TIMEOUT	5
 /* XXX: Make these module parameters */
 
+struct s3_save {
+	u32	command;
+	u32	dev_nt;
+	u64	dcbaa_ptr;
+	u32	config_reg;
+	u32	irq_pending;
+	u32	irq_control;
+	u32	erst_size;
+	u64	erst_base;
+	u64	erst_dequeue;
+};
 
 /* There is one ehci_hci structure per controller */
 struct xhci_hcd {
@@ -1178,6 +1190,7 @@ struct xhci_hcd {
 	unsigned long		next_statechange;
 
 	u32			command;
+	struct s3_save		s3;
 /* Host controller is dying - not responding to commands. "I'm not dead yet!"
  *
  * xHC interrupts have been disabled and a watchdog timer will (or has already)
@@ -1310,6 +1323,8 @@ void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
 /* xHCI memory management */
 void xhci_mem_cleanup(struct xhci_hcd *xhci);
 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
+void xhci_cmd_ring_init(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_event_ring_init(struct xhci_hcd *xhci, struct xhci_ring *ring);
 void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
 int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
 int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
@@ -1377,6 +1392,8 @@ int xhci_init(struct usb_hcd *hcd);
 int xhci_run(struct usb_hcd *hcd);
 void xhci_stop(struct usb_hcd *hcd);
 void xhci_shutdown(struct usb_hcd *hcd);
+int xhci_suspend(struct xhci_hcd *xhci);
+int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
 int xhci_get_frame(struct usb_hcd *hcd);
 irqreturn_t xhci_irq(struct usb_hcd *hcd);
 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
-- 
1.7.0.4



--
To unsubscribe from this list: send the line "unsubscribe linux-usb" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html


[Index of Archives]     [Linux Media]     [Linux Input]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Old Linux USB Devel Archive]

  Powered by Linux