[PATCH v4 09/12] PCI/PM: Check if pcie_capability_read_*() reads ~0

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



On failure pcie_capability_read_*() sets it's last parameter, val
to 0. However, with Patch 12/12, it is possible that val is set
to ~0 on failure. This would introduce a bug because
(x & x) == (~0 & x).

Since ~0 is an invalid value in here,

Add extra check for ~0 to ensure success or failure.

pci_enable_atomic_ops_to_root():
Continue looping through the device heirarchy on failure.

pcie_wait_for_link_delay():
Add extra check for ~0 to the condition for breaking out of the
loop. Delay only on success otherwise report error and return
false.

pcie_bandwidth_available():
On read failure move up the device heirarchy and continue.

pcie_get_speed_cap():
On read failure, report error and return PCI_SPEED_UNKNOWN

Suggested-by: Bjorn Helgaas <bjorn@xxxxxxxxxxx>
Signed-off-by: Saheed O. Bolarinwa <refactormyself@xxxxxxxxx>
---
 drivers/pci/pci.c | 34 ++++++++++++++++++++++++----------
 1 file changed, 24 insertions(+), 10 deletions(-)

diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index c9338f914a0e..1dd3659f1388 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3216,7 +3216,7 @@ void pci_configure_ari(struct pci_dev *dev)
 		return;
 
 	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
-	if (!(cap & PCI_EXP_DEVCAP2_ARI))
+	if ((cap == (u32)~0) || !(cap & PCI_EXP_DEVCAP2_ARI))
 		return;
 
 	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
@@ -3635,13 +3635,13 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
 		/* Ensure switch ports support AtomicOp routing */
 		case PCI_EXP_TYPE_UPSTREAM:
 		case PCI_EXP_TYPE_DOWNSTREAM:
-			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
+			if ((cap == (u32)~0) || !(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
 				return -EINVAL;
 			break;
 
 		/* Ensure root port supports all the sizes we care about */
 		case PCI_EXP_TYPE_ROOT_PORT:
-			if ((cap & cap_mask) != cap_mask)
+			if ((cap == (u32)~0) || ((cap & cap_mask) != cap_mask)
 				return -EINVAL;
 			break;
 		}
@@ -3650,7 +3650,7 @@ int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
 		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
 			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
 						   &ctl2);
-			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
+			if ((ctl2 != (u32)~0) && (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK))
 				return -EINVAL;
 		}
 
@@ -4512,7 +4512,7 @@ bool pcie_has_flr(struct pci_dev *dev)
 		return false;
 
 	pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
-	return cap & PCI_EXP_DEVCAP_FLR;
+	return ((cap != (u32)~0) && (cap & PCI_EXP_DEVCAP_FLR));
 }
 EXPORT_SYMBOL_GPL(pcie_has_flr);
 
@@ -4672,19 +4672,19 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
 	for (;;) {
 		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
 		ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
-		if (ret == active)
+		if ((lnk_status != (u16)~0) && (ret == active))
 			break;
 		if (timeout <= 0)
 			break;
 		msleep(10);
 		timeout -= 10;
 	}
-	if (active && ret)
+	if ((lnk_status != (u16)~0) && active && ret)
 		msleep(delay);
-	else if (ret != active)
+	else if ((lnk_status == (u16)~0) || (ret != active))
 		pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
 			active ? "set" : "cleared");
-	return ret == active;
+	return ((lnk_status != (u16)~0) && (ret == active));
 }
 
 /**
@@ -5773,6 +5773,11 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
 	while (dev) {
 		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
 
+		if (lnksta == (u16)~0) {
+			dev = pci_upstream_bridge(dev);
+			continue;
+		}
+
 		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
 		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
 			PCI_EXP_LNKSTA_NLW_SHIFT;
@@ -5819,12 +5824,21 @@ enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
 	 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
 	 */
 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
+	if (lnkcap2 == (u32)~0) {
+		dev_err(dev, "Read link speed capability has failed.\n");
+		return PCI_SPEED_UNKNOWN;
+	}
 
 	/* PCIe r3.0-compliant */
 	if (lnkcap2)
 		return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
 
 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
+	if (lnkcap == (u32)~0) {
+		dev_err(dev, "Read link speed capability has failed.\n");
+		return PCI_SPEED_UNKNOWN;
+	}
+
 	if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
 		return PCIE_SPEED_5_0GT;
 	else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
@@ -5846,7 +5860,7 @@ enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
 	u32 lnkcap;
 
 	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
-	if (lnkcap)
+	if (lnkcap && (lnkcap != (u32)~0))
 		return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
 
 	return PCIE_LNK_WIDTH_UNKNOWN;
-- 
2.18.4




[Index of Archives]     [DMA Engine]     [Linux Coverity]     [Linux USB]     [Video for Linux]     [Linux Audio Users]     [Yosemite News]     [Linux Kernel]     [Linux SCSI]     [Greybus]

  Powered by Linux