- qla3xxx-nic-driver-updates-2.patch removed from -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled

     Qlogic qla3xxx driver v2.02.00-k36 for upstream inclusion.

has been removed from the -mm tree.  Its filename is

     qla3xxx-nic-driver-updates-2.patch

This patch was dropped because it was folded into qla3xxx-NIC-driver.patch

------------------------------------------------------
Subject: Qlogic qla3xxx driver v2.02.00-k36 for upstream inclusion.
From: "Ron Mercer" <ron.mercer@xxxxxxxxxx>

-Removed potential infinite loop in ql_sem_spinlock().
-Relaxed hardware locking granularity.
-Fixed irq_request() where shared flag was used in MSI environment.
-Removed queue containing TX control blocks. This resource has a one to one=
  correspondence to each entry in the TX queue.
-Removed unnecessary tx_lock.
-Changed version to v2.02.00-k36.

The above changes plus the changes from the k35 patch address Jeff Garzik's=
 concerns from his response. His response can be reviewed at this URL:

http://marc.theaimsgroup.com/?l=3Dlinux-netdev&m=3D115101855424635&w=3D2

This driver has been through several iterations on the netdev list and we f=
eel this driver is ready for inclusion in the upstream kernel.

It has been built and tested on x86 and PPC64 platforms.

Use this URL to view the entire driver.

ftp://ftp.qlogic.com/outgoing/linux/network/upstream/2.02.00k36/qla3xxx-ful=
lpatch-v2.02.00-k36.txt

Signed-off-by: Ron Mercer <ron.mercer@xxxxxxxxxx>
Cc: Jeff Garzik <jeff@xxxxxxxxxx>
Cc: Stephen Hemminger <shemminger@xxxxxxxx>
Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
---

 drivers/net/qla3xxx.c |  567 ++++++++++++++++++++++------------------
 drivers/net/qla3xxx.h |   37 --
 2 files changed, 328 insertions(+), 276 deletions(-)

diff -puN drivers/net/qla3xxx.c~qla3xxx-nic-driver-updates-2 drivers/net/qla3xxx.c
--- a/drivers/net/qla3xxx.c~qla3xxx-nic-driver-updates-2
+++ a/drivers/net/qla3xxx.c
@@ -38,7 +38,7 @@
 
 #define DRV_NAME  	"qla3xxx"
 #define DRV_STRING 	"QLogic ISP3XXX Network Driver"
-#define DRV_VERSION	"v2.02.00-k35"
+#define DRV_VERSION	"v2.02.00-k36"
 #define PFX		DRV_NAME " "
 
 static const char ql3xxx_driver_name[] = DRV_NAME;
@@ -69,49 +69,47 @@ static struct pci_device_id ql3xxx_pci_t
 
 MODULE_DEVICE_TABLE(pci, ql3xxx_pci_tbl);
 
-static void ql_sem_spinlock(struct ql3_adapter *qdev,
+/*
+ * Caller must take hw_lock.
+ */
+static int ql_sem_spinlock(struct ql3_adapter *qdev,
 			    u32 sem_mask, u32 sem_bits)
 {
 	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 	u32 value;
-	unsigned long hw_flags;
+	unsigned int seconds = 3;
 
-	while (1) {
-		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+	do {
 		writel((sem_mask | sem_bits),
 		       &port_regs->CommonRegs.semaphoreReg);
 		value = readl(&port_regs->CommonRegs.semaphoreReg);
-		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 		if ((value & (sem_mask >> 16)) == sem_bits)
-			break;
-		cpu_relax();
-	}
+			return 0;
+		ssleep(1);
+	} while(--seconds);
+	return -1;
 }
 
 static void ql_sem_unlock(struct ql3_adapter *qdev, u32 sem_mask)
 {
 	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
-	unsigned long hw_flags;
-
-	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 	writel(sem_mask, &port_regs->CommonRegs.semaphoreReg);
 	readl(&port_regs->CommonRegs.semaphoreReg);
-	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 }
 
 static int ql_sem_lock(struct ql3_adapter *qdev, u32 sem_mask, u32 sem_bits)
 {
 	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 	u32 value;
-	unsigned long hw_flags;
 
-	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 	writel((sem_mask | sem_bits), &port_regs->CommonRegs.semaphoreReg);
 	value = readl(&port_regs->CommonRegs.semaphoreReg);
-	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 	return ((value & (sem_mask >> 16)) == sem_bits);
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
 {
 	int i = 0;
@@ -149,7 +147,7 @@ static void ql_set_register_page(struct 
 	qdev->current_page = page;
 }
 
-static u32 ql_read_common_reg(struct ql3_adapter *qdev,
+static u32 ql_read_common_reg_l(struct ql3_adapter *qdev,
 			      u32 __iomem * reg)
 {
 	u32 value;
@@ -162,7 +160,13 @@ static u32 ql_read_common_reg(struct ql3
 	return value;
 }
 
-static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
+static u32 ql_read_common_reg(struct ql3_adapter *qdev,
+			      u32 __iomem * reg)
+{
+	return readl(reg);
+}
+
+static u32 ql_read_page0_reg_l(struct ql3_adapter *qdev, u32 __iomem *reg)
 {
 	u32 value;
 	unsigned long hw_flags;
@@ -177,7 +181,14 @@ static u32 ql_read_page0_reg(struct ql3_
 	return value;
 }
 
-static void ql_write_common_reg(struct ql3_adapter *qdev,
+static u32 ql_read_page0_reg(struct ql3_adapter *qdev, u32 __iomem *reg)
+{
+	if (qdev->current_page != 0)
+		ql_set_register_page(qdev,0);
+	return readl(reg);
+}
+
+static void ql_write_common_reg_l(struct ql3_adapter *qdev,
 				u32 * reg, u32 value)
 {
 	unsigned long hw_flags;
@@ -189,51 +200,47 @@ static void ql_write_common_reg(struct q
 	return;
 }
 
+static void ql_write_common_reg(struct ql3_adapter *qdev,
+				u32 * reg, u32 value)
+{
+	writel(value, (u32 *) reg);
+	readl(reg);
+	return;
+}
+
 static void ql_write_page0_reg(struct ql3_adapter *qdev,
 			       u32 * reg, u32 value)
 {
-	unsigned long hw_flags;
-
-	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
-
 	if (qdev->current_page != 0)
 		ql_set_register_page(qdev,0);
 	writel(value, (u32 *) reg);
 	readl(reg);
-
-	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 	return;
 }
 
+/*
+ * Caller holds hw_lock. Only called during init.
+ */
 static void ql_write_page1_reg(struct ql3_adapter *qdev,
 			       u32 * reg, u32 value)
 {
-	unsigned long hw_flags;
-
-	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
-
 	if (qdev->current_page != 1)
 		ql_set_register_page(qdev,1);
 	writel(value, (u32 *) reg);
 	readl(reg);
-
-	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 	return;
 }
 
+/*
+ * Caller holds hw_lock. Only called during init.
+ */
 static void ql_write_page2_reg(struct ql3_adapter *qdev,
 			       u32 * reg, u32 value)
 {
-	unsigned long hw_flags;
-
-	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
-
 	if (qdev->current_page != 2)
 		ql_set_register_page(qdev,2);
 	writel(value, (u32 *) reg);
 	readl(reg);
-
-	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 	return;
 }
 
@@ -241,7 +248,7 @@ static void ql_disable_interrupts(struct
 {
 	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 
-	ql_write_common_reg(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
+	ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
 			    (ISP_IMR_ENABLE_INT << 16));
 
 }
@@ -250,7 +257,7 @@ static void ql_enable_interrupts(struct 
 {
 	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 
-	ql_write_common_reg(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
+	ql_write_common_reg_l(qdev, &port_regs->CommonRegs.ispInterruptMaskReg,
 			    ((0xff << 16) | ISP_IMR_ENABLE_INT));
 
 }
@@ -320,10 +327,14 @@ static void fm93c56a_deselect(struct ql3
 static void eeprom_readword(struct ql3_adapter *qdev, u32 eepromAddr,
 			    unsigned short *value);
 
+/*
+ * Caller holds hw_lock.
+ */
 static void fm93c56a_select(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
 	    		qdev->mem_map_registers;
+
 	qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1;
 	ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
 			    ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
@@ -331,6 +342,9 @@ static void fm93c56a_select(struct ql3_a
 			    ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data));
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static void fm93c56a_cmd(struct ql3_adapter *qdev, u32 cmd, u32 eepromAddr)
 {
 	int i;
@@ -421,6 +435,9 @@ static void fm93c56a_cmd(struct ql3_adap
 	}
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static void fm93c56a_deselect(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
@@ -430,6 +447,9 @@ static void fm93c56a_deselect(struct ql3
 			    ISP_NVRAM_MASK | qdev->eeprom_cmd_data);
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static void fm93c56a_datain(struct ql3_adapter *qdev, unsigned short *value)
 {
 	int i;
@@ -461,6 +481,9 @@ static void fm93c56a_datain(struct ql3_a
 	*value = (u16) data;
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static void eeprom_readword(struct ql3_adapter *qdev,
 			    u32 eepromAddr, unsigned short *value)
 {
@@ -491,12 +514,21 @@ static int ql_get_nvram_params(struct ql
 	u16 *pEEPROMData;
 	u16 checksum = 0;
 	u32 index;
+	unsigned long hw_flags;
+
+	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 
 	pEEPROMData = (u16 *) & qdev->nvram_data;
 	qdev->eeprom_cmd_data = 0;
-	ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
+	if(ql_sem_spinlock(qdev, QL_NVRAM_SEM_MASK,
 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-			 2) << 10);
+			 2) << 10)) {
+		printk(KERN_ERR PFX"%s: Failed ql_sem_spinlock().\n",
+			__func__);
+		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+		return -1;
+	}
+
 	for (index = 0; index < EEPROM_SIZE; index++) {
 		eeprom_readword(qdev, index, pEEPROMData);
 		checksum += *pEEPROMData;
@@ -505,8 +537,9 @@ static int ql_get_nvram_params(struct ql
 	ql_sem_unlock(qdev, QL_NVRAM_SEM_MASK);
 
 	if (checksum != 0) {
-		printk(KERN_DEBUG PFX "%s: checksum should be zero, is %x!!\n",
+		printk(KERN_ERR PFX "%s: checksum should be zero, is %x!!\n",
 		       qdev->ndev->name, checksum);
+		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 		return -1;
 	}
 
@@ -522,6 +555,7 @@ static int ql_get_nvram_params(struct ql
 	pEEPROMData = (u16 *) & qdev->nvram_data.version;
 	*pEEPROMData = le16_to_cpu(*pEEPROMData);
 
+	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 	return checksum;
 }
 
@@ -610,10 +644,6 @@ static int ql_mii_write_reg_ex(struct ql
 	    		qdev->mem_map_registers;
 	u8 scanWasEnabled;
 
-	ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
-			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-			 2) << 7);
-
 	scanWasEnabled = ql_mii_disable_scan_mode(qdev);
 
 	if (ql_wait_for_mii_ready(qdev)) {
@@ -622,7 +652,6 @@ static int ql_mii_write_reg_ex(struct ql
 			       "%s Timed out waiting for management port to "
 			       "get free before issuing command.\n",
 			       qdev->ndev->name);
-		ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
 		return -1;
 	}
 
@@ -638,14 +667,12 @@ static int ql_mii_write_reg_ex(struct ql
 			       "%s: Timed out waiting for management port to"
 			       "get free before issuing command.\n",
 			       qdev->ndev->name);
-		ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
 		return -1;
 	}
 
 	if (scanWasEnabled)
 		ql_mii_enable_scan_mode(qdev);
 
-	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
 	return 0;
 }
 
@@ -657,10 +684,6 @@ static int ql_mii_read_reg_ex(struct ql3
 	u8 scanWasEnabled;
 	u32 temp;
 
-	ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
-			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-			 2) << 7);
-
 	scanWasEnabled = ql_mii_disable_scan_mode(qdev);
 
 	if (ql_wait_for_mii_ready(qdev)) {
@@ -669,7 +692,6 @@ static int ql_mii_read_reg_ex(struct ql3
 			       "%s: Timed out waiting for management port to "
 			       "get free before issuing command.\n",
 			       qdev->ndev->name);
-		ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
 		return -1;
 	}
 
@@ -689,7 +711,6 @@ static int ql_mii_read_reg_ex(struct ql3
 			       "%s: Timed out waiting for management port to "
 			       "get free after issuing command.\n",
 			       qdev->ndev->name);
-		ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
 		return -1;
 	}
 
@@ -699,8 +720,6 @@ static int ql_mii_read_reg_ex(struct ql3
 	if (scanWasEnabled)
 		ql_mii_enable_scan_mode(qdev);
 
-	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
-
 	return 0;
 }
 
@@ -709,10 +728,6 @@ static int ql_mii_write_reg(struct ql3_a
 	struct ql3xxx_port_registers __iomem *port_regs =
 	    		qdev->mem_map_registers;
 
-	ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
-			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-			 2) << 7);
-
 	ql_mii_disable_scan_mode(qdev);
 
 	if (ql_wait_for_mii_ready(qdev)) {
@@ -721,7 +736,6 @@ static int ql_mii_write_reg(struct ql3_a
 			       "%s: Timed out waiting for management port to "
 			       "get free before issuing command.\n",
 			       qdev->ndev->name);
-		ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
 		return -1;
 	}
 
@@ -730,20 +744,18 @@ static int ql_mii_write_reg(struct ql3_a
 
 	ql_write_page0_reg(qdev, &port_regs->macMIIMgmtDataReg, value);
 
-	/* Wait for write to complete 9/10/04 SJP */
+	/* Wait for write to complete. */
 	if (ql_wait_for_mii_ready(qdev)) {
 		if (netif_msg_link(qdev))
 			printk(KERN_WARNING PFX
 			       "%s: Timed out waiting for management port to "
 			       "get free before issuing command.\n",
 			       qdev->ndev->name);
-		ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
 		return -1;
 	}
 
 	ql_mii_enable_scan_mode(qdev);
 
-	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
 	return 0;
 }
 
@@ -753,10 +765,6 @@ static int ql_mii_read_reg(struct ql3_ad
 	struct ql3xxx_port_registers __iomem *port_regs =
 	    		qdev->mem_map_registers;
 
-	ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
-			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-			 2) << 7);
-
 	ql_mii_disable_scan_mode(qdev);
 
 	if (ql_wait_for_mii_ready(qdev)) {
@@ -765,7 +773,6 @@ static int ql_mii_read_reg(struct ql3_ad
 			       "%s: Timed out waiting for management port to "
 			       "get free before issuing command.\n",
 			       qdev->ndev->name);
-		ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
 		return -1;
 	}
 
@@ -785,7 +792,6 @@ static int ql_mii_read_reg(struct ql3_ad
 			       "%s: Timed out waiting for management port to "
 			       "get free before issuing command.\n",
 			       qdev->ndev->name);
-		ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
 		return -1;
 	}
 
@@ -794,7 +800,6 @@ static int ql_mii_read_reg(struct ql3_ad
 
 	ql_mii_enable_scan_mode(qdev);
 
-	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
 	return 0;
 }
 
@@ -906,6 +911,9 @@ static int ql_is_phy_neg_pause(struct ql
 	return (reg & PHY_NEG_PAUSE) != 0;
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static void ql_mac_enable(struct ql3_adapter *qdev, u32 enable)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
@@ -923,6 +931,9 @@ static void ql_mac_enable(struct ql3_ada
 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static void ql_mac_cfg_soft_reset(struct ql3_adapter *qdev, u32 enable)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
@@ -940,6 +951,9 @@ static void ql_mac_cfg_soft_reset(struct
 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static void ql_mac_cfg_gig(struct ql3_adapter *qdev, u32 enable)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
@@ -957,6 +971,9 @@ static void ql_mac_cfg_gig(struct ql3_ad
 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static void ql_mac_cfg_full_dup(struct ql3_adapter *qdev, u32 enable)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
@@ -974,6 +991,9 @@ static void ql_mac_cfg_full_dup(struct q
 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static void ql_mac_cfg_pause(struct ql3_adapter *qdev, u32 enable)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
@@ -993,6 +1013,9 @@ static void ql_mac_cfg_pause(struct ql3_
 		ql_write_page0_reg(qdev, &port_regs->mac0ConfigReg, value);
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static int ql_is_fiber(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
@@ -1020,6 +1043,9 @@ static int ql_is_auto_cfg(struct ql3_ada
 	return (reg & 0x1000) != 0;
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static int ql_is_auto_neg_complete(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
@@ -1098,6 +1124,9 @@ static int ql_is_link_full_dup(struct ql
 		return ql_is_full_dup(qdev);
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static int ql_link_down_detect(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
@@ -1119,6 +1148,9 @@ static int ql_link_down_detect(struct ql
 	return (temp & bitToCheck) != 0;
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static int ql_link_down_detect_clear(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
@@ -1146,6 +1178,9 @@ static int ql_link_down_detect_clear(str
 	return 0;
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static int ql_this_adapter_controls_port(struct ql3_adapter *qdev,
 					 u32 mac_index)
 {
@@ -1202,6 +1237,9 @@ static void ql_phy_init_ex(struct ql3_ad
 	ql_phy_start_neg_ex(qdev, mac_index);
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static u32 ql_get_link_state(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs =
@@ -1228,8 +1266,13 @@ static u32 ql_get_link_state(struct ql3_
 	}
 	return linkState;
 }
+
 static int ql_port_start(struct ql3_adapter *qdev)
 {
+	if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+			 2) << 7))
+		return -1;
 
 	if (ql_is_fiber(qdev)) {
 		ql_petbi_init(qdev);
@@ -1238,11 +1281,18 @@ static int ql_port_start(struct ql3_adap
 		ql_phy_init_ex(qdev, qdev->mac_index);
 	}
 
+	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
 	return 0;
 }
 
-static void ql_finish_auto_neg(struct ql3_adapter *qdev)
+static int ql_finish_auto_neg(struct ql3_adapter *qdev)
 {
+
+	if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+			 2) << 7))
+		return -1;
+
 	if (!ql_auto_neg_error(qdev)) {
 		if (test_bit(QL_LINK_MASTER,&qdev->flags)) {
 			/* configure the MAC */
@@ -1269,7 +1319,7 @@ static void ql_finish_auto_neg(struct ql
 				printk(KERN_DEBUG PFX
 				       "%s: Enabling mac.\n",
 				       qdev->ndev->
-				       name);
+					       name);
 			ql_mac_enable(qdev, 1);
 		}
 
@@ -1297,14 +1347,27 @@ static void ql_finish_auto_neg(struct ql
 				       "Calling ql_port_start().\n",
 				       qdev->ndev->
 				       name);
-			ql_port_start(qdev);	/* Restart port */
+			/*
+			 * ql_port_start() is shared code and needs
+			 * to lock the PHY on it's own.
+			 */
+			ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+			if(ql_port_start(qdev))	{/* Restart port */
+				return -1;
+			} else
+				return 0;
 		}
 	}
+	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+	return 0;
 }
 
 static void ql_link_state_machine(struct ql3_adapter *qdev)
 {
 	u32 curr_link_state;
+	unsigned long hw_flags;
+
+	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 
 	curr_link_state = ql_get_link_state(qdev);
 
@@ -1356,31 +1419,27 @@ static void ql_link_state_machine(struct
 		}
 		break;
 	}
+	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 }
 
+/*
+ * Caller must take hw_lock and QL_PHY_GIO_SEM.
+ */
 static void ql_get_phy_owner(struct ql3_adapter *qdev)
 {
-	ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
-			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-			 2) << 7);
-
 	if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
 		set_bit(QL_LINK_MASTER,&qdev->flags);
 	else
 		clear_bit(QL_LINK_MASTER,&qdev->flags);
-	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
 }
 
+/*
+ * Caller must take hw_lock and QL_PHY_GIO_SEM.
+ */
 static void ql_init_scan_mode(struct ql3_adapter *qdev)
 {
-	ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
-			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-			 2) << 7);
-
 	ql_mii_enable_scan_mode(qdev);
 
-	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
-
 	if (test_bit(QL_LINK_OPTICAL,&qdev->flags)) {
 		if (ql_this_adapter_controls_port(qdev, qdev->mac_index))
 			ql_petbi_init_ex(qdev, qdev->mac_index);
@@ -1396,15 +1455,16 @@ static void ql_init_scan_mode(struct ql3
  * we had a way to disable MDC until after the PHY is out of reset, but we
  * don't have that capability.
  */
-static void ql_mii_setup(struct ql3_adapter *qdev)
+static int ql_mii_setup(struct ql3_adapter *qdev)
 {
 	u32 reg;
 	struct ql3xxx_port_registers __iomem *port_regs =
 	    		qdev->mem_map_registers;
 
-	ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+	if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
 			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-			 2) << 7);
+			 2) << 7))
+		return -1;
 
 	/* Divide 125MHz clock by 28 to meet PHY timing requirements */
 	reg = MAC_MII_CONTROL_CLK_SEL_DIV28;
@@ -1413,6 +1473,7 @@ static void ql_mii_setup(struct ql3_adap
 			   reg | ((MAC_MII_CONTROL_CLK_SEL_MASK) << 16));
 
 	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+	return 0;
 }
 
 static u32 ql_supported_modes(struct ql3_adapter *qdev)
@@ -1435,6 +1496,52 @@ static u32 ql_supported_modes(struct ql3
 	return supported;
 }
 
+static int ql_get_auto_cfg_status(struct ql3_adapter *qdev)
+{
+	int status;
+	unsigned long hw_flags;
+	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+	if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+			 2) << 7))
+		return 0;
+	status = ql_is_auto_cfg(qdev);
+	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+	return status;
+}
+
+static u32 ql_get_speed(struct ql3_adapter *qdev)
+{
+	u32 status;
+	unsigned long hw_flags;
+	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+	if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+			 2) << 7))
+		return 0;
+	status = ql_get_link_speed(qdev);
+	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+	return status;
+}
+
+static int ql_get_full_dup(struct ql3_adapter *qdev)
+{
+	int status;
+	unsigned long hw_flags;
+	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+	if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+		(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+			 2) << 7))
+		return 0;
+	status = ql_is_link_full_dup(qdev);
+	ql_sem_unlock(qdev, QL_PHY_GIO_SEM_MASK);
+	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+	return status;
+}
+
+
 static int ql_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
 {
 	struct ql3_adapter *qdev = netdev_priv(ndev);
@@ -1449,9 +1556,9 @@ static int ql_get_settings(struct net_de
 		ecmd->phy_address = qdev->PHYAddr;
 	}
 	ecmd->advertising = ql_supported_modes(qdev);
-	ecmd->autoneg = ql_is_auto_cfg(qdev);
-	ecmd->speed = ql_get_link_speed(qdev);
-	ecmd->duplex = ql_is_link_full_dup(qdev);
+	ecmd->autoneg = ql_get_auto_cfg_status(qdev);
+	ecmd->speed = ql_get_speed(qdev);
+	ecmd->duplex = ql_get_full_dup(qdev);
 	return 0;
 }
 
@@ -1490,34 +1597,6 @@ static struct ethtool_ops ql3xxx_ethtool
 	.set_msglevel = ql_set_msglevel,
 };
 
-static struct ql_tx_buf_cb *ql_alloc_txbuf(struct ql3_adapter *qdev)
-{
-	struct ql_tx_buf_cb *tx_buf_ptr = NULL;
-	unsigned long flags;
-
-	spin_lock_irqsave(&qdev->tx_cb_lock, flags);
-	tx_buf_ptr = qdev->tx_free_list;
-	if (tx_buf_ptr != NULL) {
-		/* Remove the buffer from freelist */
-		qdev->tx_free_list = tx_buf_ptr->next;
-		tx_buf_ptr->next = NULL;
-		qdev->tx_free_count--;
-	}
-	spin_unlock_irqrestore(&qdev->tx_cb_lock, flags);
-	return tx_buf_ptr;
-}
-
-static void ql_free_txbuf(struct ql3_adapter *qdev,
-			  struct ql_tx_buf_cb *tx_buf_ptr)
-{
-	spin_lock(&qdev->tx_cb_lock);
-	tx_buf_ptr->skb = NULL;
-	tx_buf_ptr->next = qdev->tx_free_list;
-	qdev->tx_free_list = tx_buf_ptr;
-	qdev->tx_free_count++;
-	spin_unlock(&qdev->tx_cb_lock);
-}
-
 static int ql_populate_free_queue(struct ql3_adapter *qdev)
 {
 	struct ql_rcv_buf_cb *lrg_buf_cb = qdev->lrg_buf_free_head;
@@ -1560,6 +1639,9 @@ static int ql_populate_free_queue(struct
 	return 0;
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static void ql_update_lrg_bufq_prod_index(struct ql3_adapter *qdev)
 {
 	struct bufq_addr_element *lrg_buf_q_ele;
@@ -1614,27 +1696,17 @@ static void ql_update_lrg_bufq_prod_inde
 static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
 				   struct ob_mac_iocb_rsp *mac_rsp)
 {
-	u32 tid;
-	struct ql_tx_buf_cb *tx_buf_ptr;
-
-	if ((tid = mac_rsp->transaction_id) > (XMIT_CB_CNT - 1)) {
-		printk(KERN_ERR PFX
-		       "%s: BAD tid=0x%x!!! Stopping TX Queue.\n",
-		       qdev->ndev->name, tid);
-		qdev->stats.tx_errors++;
-		netif_stop_queue(qdev->ndev);
-		return;
-	}
+	struct ql_tx_buf_cb *tx_cb;
 
-	tx_buf_ptr = &qdev->tx_buf[tid];
+	tx_cb = &qdev->tx_buf[mac_rsp->transaction_id];
 	pci_unmap_single(qdev->pdev,
-			 pci_unmap_addr(tx_buf_ptr, mapaddr),
-			 pci_unmap_len(tx_buf_ptr, maplen), PCI_DMA_TODEVICE);
-	dev_kfree_skb_irq(tx_buf_ptr->skb);
-	tx_buf_ptr->skb = NULL;
+			 pci_unmap_addr(tx_cb, mapaddr),
+			 pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
+	dev_kfree_skb_irq(tx_cb->skb);
 	qdev->stats.tx_packets++;
-	qdev->stats.tx_bytes += cpu_to_le16(tx_buf_ptr->u.mac_iocb.data_len);
-	ql_free_txbuf(qdev, tx_buf_ptr);
+	qdev->stats.tx_bytes += tx_cb->skb->len;
+	tx_cb->skb = NULL;
+	atomic_inc(&qdev->tx_count);
 }
 
 static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
@@ -1782,6 +1854,7 @@ static int ql_tx_rx_clean(struct ql3_ada
 	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 	struct net_rsp_iocb *net_rsp;
 	struct net_device *ndev = qdev->ndev;
+	unsigned long hw_flags;
 
 	/* While there are entries in the completion queue. */
 	while ((cpu_to_le32(*(qdev->prsp_producer_index)) !=
@@ -1836,6 +1909,8 @@ static int ql_tx_rx_clean(struct ql3_ada
 		}
 	}
 
+	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
 	ql_update_lrg_bufq_prod_index(qdev);
 
 	if (qdev->small_buf_release_cnt >= 16) {
@@ -1857,16 +1932,12 @@ static int ql_tx_rx_clean(struct ql3_ada
 	ql_write_common_reg(qdev,
 			    (u32 *) & port_regs->CommonRegs.rspQConsumerIndex,
 			    qdev->rsp_consumer_index);
+	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 
 	if (unlikely(netif_queue_stopped(qdev->ndev))) {
-		spin_lock(&qdev->tx_cb_lock);
 		if (netif_queue_stopped(qdev->ndev) &&
-		    (qdev->tx_free_count > (NUM_REQ_Q_ENTRIES / 4))) {
-			printk(KERN_DEBUG PFX "%s: waking queue.\n",
-			       qdev->ndev->name);
+		    (atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4)))
 			netif_wake_queue(qdev->ndev);
-		}
-		spin_unlock(&qdev->tx_cb_lock);
 	}
 
 	return *tx_cleaned + *rx_cleaned;
@@ -1907,7 +1978,7 @@ static irqreturn_t ql3xxx_isr(int irq, v
 	port_regs = qdev->mem_map_registers;
 
 	value =
-	    ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
+	    ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
 
 	if (value & (ISP_CONTROL_FE | ISP_CONTROL_RI)) {
 		spin_lock(&qdev->adapter_lock);
@@ -1922,7 +1993,7 @@ static irqreturn_t ql3xxx_isr(int irq, v
 			 * Chip Fatal Error.
 			 */
 			var =
-			    ql_read_page0_reg(qdev,
+			    ql_read_page0_reg_l(qdev,
 					      &port_regs->PortFatalErrStatus);
 			printk(KERN_WARNING PFX
 			       "%s: Resetting chip. PortFatalErrStatus "
@@ -1956,46 +2027,36 @@ static int ql3xxx_send(struct sk_buff *s
 {
 	struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
 	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
-	struct ql_tx_buf_cb *tx_buf_ptr;
+	struct ql_tx_buf_cb *tx_cb;
 	struct ob_mac_iocb_req *mac_iocb_ptr;
 	u64 map;
 
-	if (unlikely((tx_buf_ptr = ql_alloc_txbuf(qdev)) == NULL)) {
-		if (!netif_queue_stopped(ndev)) {
+	if (unlikely(atomic_read(&qdev->tx_count) < 2)) {
+		if (!netif_queue_stopped(ndev))
 			netif_stop_queue(ndev);
-			if (qdev->tx_count % 10)
-				printk(KERN_DEBUG PFX
-				       "\r%s: No TX resources, stop queue.",
-				       ndev->name);
-		}
 		return NETDEV_TX_BUSY;
 	}
-
-	mac_iocb_ptr = (struct ob_mac_iocb_req *)qdev->preq_q_curr;
+	tx_cb = &qdev->tx_buf[qdev->req_producer_index] ;
+	mac_iocb_ptr = tx_cb->queue_entry;
 	memset((void *)mac_iocb_ptr, 0, sizeof(struct ob_mac_iocb_req));
 	mac_iocb_ptr->opcode = qdev->mac_ob_opcode;
 	mac_iocb_ptr->flags |= qdev->mb_bit_mask;
-	mac_iocb_ptr->transaction_id = tx_buf_ptr->index;
+	mac_iocb_ptr->transaction_id = qdev->req_producer_index;
 	mac_iocb_ptr->data_len = cpu_to_le16((u16) skb->len);
-	tx_buf_ptr->skb = skb;
+	tx_cb->skb = skb;
 	map = pci_map_single(qdev->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
 	mac_iocb_ptr->buf_addr0_low = cpu_to_le32(LS_64BITS(map));
 	mac_iocb_ptr->buf_addr0_high = cpu_to_le32(MS_64BITS(map));
 	mac_iocb_ptr->buf_0_len = cpu_to_le32(skb->len | OB_MAC_IOCB_REQ_E);
-	pci_unmap_addr_set(tx_buf_ptr, mapaddr, map);
-	pci_unmap_len_set(tx_buf_ptr, maplen, skb->len);
-
-	/* Make sure all the descriptors written */
-	wmb();
+	pci_unmap_addr_set(tx_cb, mapaddr, map);
+	pci_unmap_len_set(tx_cb, maplen, skb->len);
+	atomic_dec(&qdev->tx_count);
 
 	qdev->req_producer_index++;
-	if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES) {
+	if (qdev->req_producer_index == NUM_REQ_Q_ENTRIES)
 		qdev->req_producer_index = 0;
-		qdev->preq_q_curr = qdev->req_q_virt_addr;
-	} else {
-		qdev->preq_q_curr++;
-	}
-	ql_write_common_reg(qdev,
+	wmb();
+	ql_write_common_reg_l(qdev,
 			    (u32 *) & port_regs->CommonRegs.reqQProducerIndex,
 			    qdev->req_producer_index);
 
@@ -2009,7 +2070,7 @@ static int ql3xxx_send(struct sk_buff *s
 static int ql_alloc_net_req_rsp_queues(struct ql3_adapter *qdev)
 {
 	qdev->req_q_size =
-	    (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct net_req_iocb));
+	    (u32) (NUM_REQ_Q_ENTRIES * sizeof(struct ob_mac_iocb_req));
 
 	qdev->req_q_virt_addr =
 	    pci_alloc_consistent(qdev->pdev,
@@ -2291,23 +2352,17 @@ static int ql_alloc_large_buffers(struct
 
 static void ql_create_send_free_list(struct ql3_adapter *qdev)
 {
-	struct ql_tx_buf_cb *tx_buf_ptr;
+	struct ql_tx_buf_cb *tx_cb;
 	int i;
+	struct ob_mac_iocb_req *req_q_curr =
+					qdev->req_q_virt_addr;
 
 	/* Create free list of transmit buffers */
-
-	qdev->tx_free_count = 0;
-	qdev->tx_free_list = NULL;
-	qdev->tx_list_head = qdev->tx_list_tail = NULL;
-
-	for (i = 0; i < XMIT_CB_CNT; i++) {
-		tx_buf_ptr = &qdev->tx_buf[i];
-		tx_buf_ptr->type = QL_BUF_TYPE_MACIOCB;
-		tx_buf_ptr->index = i;
-		tx_buf_ptr->skb = NULL;
-		tx_buf_ptr->next = qdev->tx_free_list;
-		qdev->tx_free_list = tx_buf_ptr;
-		qdev->tx_free_count++;
+	for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
+		tx_cb = &qdev->tx_buf[i];
+		tx_cb->skb = NULL;
+		tx_cb->queue_entry = req_q_curr;
+		req_q_curr++;
 	}
 }
 
@@ -2385,7 +2440,6 @@ static int ql_alloc_mem_resources(struct
 	ql_init_large_buffers(qdev);
 	ql_create_send_free_list(qdev);
 
-	qdev->preq_q_curr = qdev->req_q_virt_addr;
 	qdev->rsp_current = qdev->rsp_q_virt_addr;
 
 	return 0;
@@ -2418,11 +2472,16 @@ static void ql_free_mem_resources(struct
 	}
 }
 
-static void ql_init_misc_registers(struct ql3_adapter *qdev)
+static int ql_init_misc_registers(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_local_ram_registers *local_ram =
 	    (struct ql3xxx_local_ram_registers *)qdev->mem_map_registers;
 
+	if(ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
+			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+			 2) << 4))
+		return -1;
+
 	ql_write_page2_reg(qdev,
 			   &local_ram->bufletSize, qdev->nvram_data.bufletSize);
 
@@ -2467,17 +2526,21 @@ static void ql_init_misc_registers(struc
 	ql_write_page2_reg(qdev,
 			   &local_ram->maxDrbCount,
 			   qdev->nvram_data.drbTableSize);
+	ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
+	return 0;
 }
 
 static int ql_adapter_initialize(struct ql3_adapter *qdev)
 {
 	u32 value;
 	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
-	struct ql3xxx_host_memory_registers *pHostMem =
+	struct ql3xxx_host_memory_registers __iomem *hmem_regs =
 	    (struct ql3xxx_host_memory_registers *)port_regs;
 	u32 delay = 10;
+	int status = 0;
 
-	ql_mii_setup(qdev);
+	if(ql_mii_setup(qdev))
+		return -1;
 
 	/* Bring out PHY out of reset */
 	ql_write_common_reg(qdev, &port_regs->CommonRegs.serialPortInterfaceReg,
@@ -2494,23 +2557,23 @@ static int ql_adapter_initialize(struct 
 
 	/* Request Queue Registers */
 	*((u32 *) (qdev->preq_consumer_index)) = 0;
-	qdev->req_count = NUM_REQ_Q_ENTRIES;
+	atomic_set(&qdev->tx_count,NUM_REQ_Q_ENTRIES);
 	qdev->req_producer_index = 0;
 
 	ql_write_page1_reg(qdev,
-			   &pHostMem->reqConsumerIndexAddrHigh,
+			   &hmem_regs->reqConsumerIndexAddrHigh,
 			   qdev->req_consumer_index_phy_addr_high);
 	ql_write_page1_reg(qdev,
-			   &pHostMem->reqConsumerIndexAddrLow,
+			   &hmem_regs->reqConsumerIndexAddrLow,
 			   qdev->req_consumer_index_phy_addr_low);
 
 	ql_write_page1_reg(qdev,
-			   &pHostMem->reqBaseAddrHigh,
+			   &hmem_regs->reqBaseAddrHigh,
 			   MS_64BITS(qdev->req_q_phy_addr));
 	ql_write_page1_reg(qdev,
-			   &pHostMem->reqBaseAddrLow,
+			   &hmem_regs->reqBaseAddrLow,
 			   LS_64BITS(qdev->req_q_phy_addr));
-	ql_write_page1_reg(qdev, &pHostMem->reqLength, NUM_REQ_Q_ENTRIES);
+	ql_write_page1_reg(qdev, &hmem_regs->reqLength, NUM_REQ_Q_ENTRIES);
 
 	/* Response Queue Registers */
 	*((u16 *) (qdev->prsp_producer_index)) = 0;
@@ -2518,50 +2581,50 @@ static int ql_adapter_initialize(struct 
 	qdev->rsp_current = qdev->rsp_q_virt_addr;
 
 	ql_write_page1_reg(qdev,
-			   &pHostMem->rspProducerIndexAddrHigh,
+			   &hmem_regs->rspProducerIndexAddrHigh,
 			   qdev->rsp_producer_index_phy_addr_high);
 
 	ql_write_page1_reg(qdev,
-			   &pHostMem->rspProducerIndexAddrLow,
+			   &hmem_regs->rspProducerIndexAddrLow,
 			   qdev->rsp_producer_index_phy_addr_low);
 
 	ql_write_page1_reg(qdev,
-			   &pHostMem->rspBaseAddrHigh,
+			   &hmem_regs->rspBaseAddrHigh,
 			   MS_64BITS(qdev->rsp_q_phy_addr));
 
 	ql_write_page1_reg(qdev,
-			   &pHostMem->rspBaseAddrLow,
+			   &hmem_regs->rspBaseAddrLow,
 			   LS_64BITS(qdev->rsp_q_phy_addr));
 
-	ql_write_page1_reg(qdev, &pHostMem->rspLength, NUM_RSP_Q_ENTRIES);
+	ql_write_page1_reg(qdev, &hmem_regs->rspLength, NUM_RSP_Q_ENTRIES);
 
 	/* Large Buffer Queue */
 	ql_write_page1_reg(qdev,
-			   &pHostMem->rxLargeQBaseAddrHigh,
+			   &hmem_regs->rxLargeQBaseAddrHigh,
 			   MS_64BITS(qdev->lrg_buf_q_phy_addr));
 
 	ql_write_page1_reg(qdev,
-			   &pHostMem->rxLargeQBaseAddrLow,
+			   &hmem_regs->rxLargeQBaseAddrLow,
 			   LS_64BITS(qdev->lrg_buf_q_phy_addr));
 
-	ql_write_page1_reg(qdev, &pHostMem->rxLargeQLength, NUM_LBUFQ_ENTRIES);
+	ql_write_page1_reg(qdev, &hmem_regs->rxLargeQLength, NUM_LBUFQ_ENTRIES);
 
 	ql_write_page1_reg(qdev,
-			   &pHostMem->rxLargeBufferLength,
+			   &hmem_regs->rxLargeBufferLength,
 			   qdev->lrg_buffer_len);
 
 	/* Small Buffer Queue */
 	ql_write_page1_reg(qdev,
-			   &pHostMem->rxSmallQBaseAddrHigh,
+			   &hmem_regs->rxSmallQBaseAddrHigh,
 			   MS_64BITS(qdev->small_buf_q_phy_addr));
 
 	ql_write_page1_reg(qdev,
-			   &pHostMem->rxSmallQBaseAddrLow,
+			   &hmem_regs->rxSmallQBaseAddrLow,
 			   LS_64BITS(qdev->small_buf_q_phy_addr));
 
-	ql_write_page1_reg(qdev, &pHostMem->rxSmallQLength, NUM_SBUFQ_ENTRIES);
+	ql_write_page1_reg(qdev, &hmem_regs->rxSmallQLength, NUM_SBUFQ_ENTRIES);
 	ql_write_page1_reg(qdev,
-			   &pHostMem->rxSmallBufferLength,
+			   &hmem_regs->rxSmallBufferLength,
 			   QL_SMALL_BUFFER_SIZE);
 
 	qdev->small_buf_q_producer_index = NUM_SBUFQ_ENTRIES - 1;
@@ -2589,15 +2652,15 @@ static int ql_adapter_initialize(struct 
 	 * Find out if the chip has already been initialized.  If it has, then
 	 * we skip some of the initialization.
 	 */
-
-	ql_sem_spinlock(qdev, QL_DDR_RAM_SEM_MASK,
-			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-			 2) << 4);
 	clear_bit(QL_LINK_MASTER, &qdev->flags);
 	value = ql_read_page0_reg(qdev, &port_regs->portStatus);
 	if ((value & PORT_STATUS_IC) == 0) {
+
 		/* Chip has not been configured yet, so let it rip. */
-		ql_init_misc_registers(qdev);
+		if(ql_init_misc_registers(qdev)) {
+			status = -1;
+			goto out;
+		}
 
 		if (qdev->mac_index)
 			ql_write_page0_reg(qdev,
@@ -2613,9 +2676,12 @@ static int ql_adapter_initialize(struct 
 
 		value = (0xFFFF << 16) | qdev->nvram_data.extHwConfig;
 
-		ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
+		if(ql_sem_spinlock(qdev, QL_FLASH_SEM_MASK,
 				(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
-				 * 2) << 13);
+				 * 2) << 13)) {
+			status = -1;
+			goto out;
+		}
 		ql_write_page0_reg(qdev, &port_regs->ExternalHWConfig, value);
 		ql_write_page0_reg(qdev, &port_regs->InternalChipConfig,
 				   (((INTERNAL_CHIP_SD | INTERNAL_CHIP_WE) <<
@@ -2624,16 +2690,17 @@ static int ql_adapter_initialize(struct 
 		ql_sem_unlock(qdev, QL_FLASH_SEM_MASK);
 	}
 
-	ql_sem_unlock(qdev, QL_DDR_RAM_SEM_MASK);
 
-	ql_init_scan_mode(qdev);
+	if(ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
+			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
+			 2) << 7)) {
+		status = -1;
+		goto out;
+	}
 
+	ql_init_scan_mode(qdev);
 	ql_get_phy_owner(qdev);
 
-	ql_sem_spinlock(qdev, QL_PHY_GIO_SEM_MASK,
-			(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) *
-			 2) << 7);
-
 	/* Load the MAC Configuration */
 
 	/* Program lower 32 bits of the MAC address */
@@ -2685,7 +2752,8 @@ static int ql_adapter_initialize(struct 
 	if (delay == 0) {
 		printk(KERN_ERR PFX
 		       "%s: Hw Initialization timeout.\n", qdev->ndev->name);
-		return 1;
+		status = -1;
+		goto out;
 	}
 
 	/* Enable Ethernet Function */
@@ -2694,9 +2762,14 @@ static int ql_adapter_initialize(struct 
 	     PORT_CONTROL_HH);
 	ql_write_page0_reg(qdev, &port_regs->portControl,
 			   ((value << 16) | value));
-	return 0;
+
+out:
+	return status;
 }
 
+/*
+ * Caller holds hw_lock.
+ */
 static int ql_adapter_reset(struct ql3_adapter *qdev)
 {
 	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
@@ -2788,7 +2861,7 @@ static void ql_set_mac_info(struct ql3_a
 
 	/* Get the function number */
 	value =
-	    ql_read_common_reg(qdev, &port_regs->CommonRegs.ispControlStatus);
+	    ql_read_common_reg_l(qdev, &port_regs->CommonRegs.ispControlStatus);
 	func_number = (u8) ((value >> 4) & OPCODE_FUNC_ID_MASK);
 	port_status = ql_read_page0_reg(qdev, &port_regs->portStatus);
 	switch (value & ISP_CONTROL_FN_MASK) {
@@ -2865,6 +2938,7 @@ static void ql_display_dev_info(struct n
 static int ql_adapter_down(struct ql3_adapter *qdev, int do_reset)
 {
 	struct net_device *ndev = qdev->ndev;
+	int retval = 0;
 
 	netif_stop_queue(ndev);
 	netif_carrier_off(ndev);
@@ -2889,6 +2963,9 @@ static int ql_adapter_down(struct ql3_ad
 
 	if (do_reset) {
 		int soft_reset;
+		unsigned long hw_flags;
+
+		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 		if (ql_wait_for_drvr_lock(qdev)) {
 			if ((soft_reset = ql_adapter_reset(qdev))) {
 				printk(KERN_ERR PFX
@@ -2901,17 +2978,20 @@ static int ql_adapter_down(struct ql3_ad
 			printk(KERN_ERR PFX
 			       "%s: Could not acquire driver lock to do "
 			       "reset!\n", ndev->name);
-			return -1;
+			retval = -1;
 		}
+		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 	}
 	ql_free_mem_resources(qdev);
-	return 0;
+	return retval;
 }
 
 static int ql_adapter_up(struct ql3_adapter *qdev)
 {
 	struct net_device *ndev = qdev->ndev;
 	int err;
+	unsigned long irq_flags = SA_SAMPLE_RANDOM | SA_SHIRQ;
+	unsigned long hw_flags;
 
 	if (ql_alloc_mem_resources(qdev)) {
 		printk(KERN_ERR PFX
@@ -2929,18 +3009,21 @@ static int ql_adapter_up(struct ql3_adap
 		} else {
 			printk(KERN_INFO PFX "%s: MSI Enabled...\n", qdev->ndev->name);
 			set_bit(QL_MSI_ENABLED,&qdev->flags);
+			irq_flags &= ~SA_SHIRQ;
 		}
 	}
 
 	if ((err = request_irq(qdev->pdev->irq,
 			       ql3xxx_isr,
-			       SA_INTERRUPT | SA_SHIRQ, ndev->name, ndev))) {
+			       irq_flags, ndev->name, ndev))) {
 		printk(KERN_ERR PFX
 		       "%s: Failed to reserve interrupt %d already in use.\n",
 		       ndev->name, qdev->pdev->irq);
 		goto err_irq;
 	}
 
+	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+
 	if ((err = ql_wait_for_drvr_lock(qdev))) {
 		if ((err = ql_adapter_initialize(qdev))) {
 			printk(KERN_ERR PFX
@@ -2958,6 +3041,8 @@ static int ql_adapter_up(struct ql3_adap
 		goto err_lock;
 	}
 
+	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+
 	set_bit(QL_ADAPTER_UP,&qdev->flags);
 
 	mod_timer(&qdev->adapter_timer, jiffies + HZ * 1);
@@ -3055,6 +3140,7 @@ static int ql3xxx_set_mac_address(struct
 	struct ql3xxx_port_registers __iomem *port_regs =
 	    		qdev->mem_map_registers;
 	struct sockaddr *addr = p;
+	unsigned long hw_flags;
 
 	if (netif_running(ndev))
 		return -EBUSY;
@@ -3064,6 +3150,7 @@ static int ql3xxx_set_mac_address(struct
 
 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
 
+	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 	/* Program lower 32 bits of the MAC address */
 	ql_write_page0_reg(qdev, &port_regs->macAddrIndirectPtrReg,
 			   (MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16));
@@ -3077,6 +3164,7 @@ static int ql3xxx_set_mac_address(struct
 			   ((MAC_ADDR_INDIRECT_PTR_REG_RP_MASK << 16) | 1));
 	ql_write_page0_reg(qdev, &port_regs->macAddrDataReg,
 			   ((ndev->dev_addr[0] << 8) | ndev->dev_addr[1]));
+	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 
 	return 0;
 }
@@ -3104,6 +3192,7 @@ static void ql_reset_work(struct ql3_ada
 	struct ql_tx_buf_cb *tx_cb;
 	int max_wait_time, i;
 	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
+	unsigned long hw_flags;
 
 	if (test_bit((QL_RESET_PER_SCSI | QL_RESET_START),&qdev->flags)) {
 		clear_bit(QL_LINK_MASTER,&qdev->flags);
@@ -3111,30 +3200,24 @@ static void ql_reset_work(struct ql3_ada
 		/*
 		 * Loop through the active list and return the skb.
 		 */
-		spin_lock(&qdev->tx_cb_lock);
-		tx_cb = qdev->tx_list_head;
-		while (tx_cb != NULL) {
-			if (tx_cb->skb) {
-				dev_kfree_skb(tx_cb->skb);
-				tx_cb->skb = NULL;
-			}
-			tx_cb = tx_cb->next;
-		}
-		for (i = 0; i < XMIT_CB_CNT; i++) {
+		for (i = 0; i < NUM_REQ_Q_ENTRIES; i++) {
 			tx_cb = &qdev->tx_buf[i];
 			if (tx_cb->skb) {
 
 				printk(KERN_DEBUG PFX
 				       "%s: Freeing lost SKB.\n",
 				       qdev->ndev->name);
+				pci_unmap_single(qdev->pdev,
+					pci_unmap_addr(tx_cb, mapaddr),
+					pci_unmap_len(tx_cb, maplen), PCI_DMA_TODEVICE);
 				dev_kfree_skb(tx_cb->skb);
 				tx_cb->skb = NULL;
 			}
 		}
-		spin_unlock(&qdev->tx_cb_lock);
 
 		printk(KERN_ERR PFX
 		       "%s: Clearing NRI after reset.\n", qdev->ndev->name);
+		spin_lock_irqsave(&qdev->hw_lock, hw_flags);
 		ql_write_common_reg(qdev,
 				    &port_regs->CommonRegs.
 				    ispControlStatus,
@@ -3146,6 +3229,7 @@ static void ql_reset_work(struct ql3_ada
 		do {
 			value = ql_read_common_reg(qdev,
 						   &port_regs->CommonRegs.
+
 						   ispControlStatus);
 			if ((value & ISP_CONTROL_SR) == 0) {
 				printk(KERN_DEBUG PFX
@@ -3169,6 +3253,7 @@ static void ql_reset_work(struct ql3_ada
 
 			ssleep(1);
 		} while (--max_wait_time);
+		spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
 
 		if (value & ISP_CONTROL_SR) {
 
@@ -3190,9 +3275,6 @@ static void ql_reset_work(struct ql3_ada
 		clear_bit(QL_RESET_ACTIVE,&qdev->flags);
 		clear_bit(QL_RESET_PER_SCSI,&qdev->flags);
 		clear_bit(QL_RESET_START,&qdev->flags);
-		printk(KERN_DEBUG PFX
-		       "%s: Finishing reset, calling ql_cycle_adapter() and "
-		       "ql_adapter_up().\n", ndev->name);
 		ql_cycle_adapter(qdev,QL_NO_RESET);
 	}
 }
@@ -3207,7 +3289,7 @@ static void ql_get_board_info(struct ql3
 	struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
 	u32 value;
 
-	value = ql_read_page0_reg(qdev, &port_regs->portStatus);
+	value = ql_read_page0_reg_l(qdev, &port_regs->portStatus);
 
 	qdev->chip_rev_id = ((value & PORT_STATUS_REV_ID_MASK) >> 12);
 	if (value & PORT_STATUS_64)
@@ -3311,7 +3393,6 @@ static int __devinit ql3xxx_probe(struct
 
 	spin_lock_init(&qdev->adapter_lock);
 	spin_lock_init(&qdev->hw_lock);
-	spin_lock_init(&qdev->tx_cb_lock);
 
 	/* Set driver entry points */
 	ndev->open = ql3xxx_open;
@@ -3328,7 +3409,7 @@ static int __devinit ql3xxx_probe(struct
 	ndev->poll = &ql_poll;
 	ndev->weight = 64;
 
-	qdev->irq = ndev->irq = pdev->irq;
+	ndev->irq = pdev->irq;
 
 	/* make sure the EEPROM is good */
 	if (ql_get_nvram_params(qdev)) {
diff -puN drivers/net/qla3xxx.h~qla3xxx-nic-driver-updates-2 drivers/net/qla3xxx.h
--- a/drivers/net/qla3xxx.h~qla3xxx-nic-driver-updates-2
+++ a/drivers/net/qla3xxx.h
@@ -255,10 +255,6 @@ struct ib_tcp_iocb_rsp {
 	__le32 ial_high;
 };
 
-struct net_req_iocb {
-	u8 byte[64];
-};
-
 struct net_rsp_iocb {
 	u8 opcode;
 	u8 flags;
@@ -986,7 +982,6 @@ struct eeprom_data {
 #define QL_ADDR_ELE_PER_BUFQ_ENTRY \
 (sizeof(struct lrg_buf_q_entry) / sizeof(struct bufq_addr_element))
     /* Each send has at least control block.  This is how many we keep. */
-#define XMIT_CB_CNT    	NUM_REQ_Q_ENTRIES
 #define NUM_SMALL_BUFFERS     	NUM_SBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
 #define NUM_LARGE_BUFFERS     	NUM_LBUFQ_ENTRIES * QL_ADDR_ELE_PER_BUFQ_ENTRY
 #define QL_HEADER_SPACE 32	/* make header space at top of skb. */
@@ -1044,18 +1039,10 @@ struct ql_rcv_buf_cb {
 };
 
 struct ql_tx_buf_cb {
-	struct ql_tx_buf_cb *next;	/* next buffer */
-	u32 type;
-	u32 index;
 	struct sk_buff *skb;
+	struct ob_mac_iocb_req *queue_entry ;
 	 DECLARE_PCI_UNMAP_ADDR(mapaddr);
 	 DECLARE_PCI_UNMAP_LEN(maplen);
-	union {
-		struct ob_tcp_iocb_req tcp_iocb;
-		struct ob_ip_iocb_req ip_iocb;
-		struct ob_mac_iocb_req mac_iocb;
-	} u;
-
 };
 
 /* definitions for type field */
@@ -1110,8 +1097,6 @@ struct ql3_adapter {
 	struct ql3xxx_port_registers __iomem *mem_map_registers;
 	u32 current_page;	/* tracks current register page */
 
-	int irq;
-
 	u32 msg_enable;
 	u8 reserved_01[2];
 	u8 reserved_02[2];
@@ -1123,29 +1108,15 @@ struct ql3_adapter {
 	/* Net Request Queue */
 	u32 req_q_size;
 	u32 reserved_03;
-	struct net_req_iocb *req_q_virt_addr;
+	struct ob_mac_iocb_req *req_q_virt_addr;
 	dma_addr_t req_q_phy_addr;
-	struct net_req_iocb *preq_q_curr;
 	u16 req_producer_index;
 	u16 reserved_04;
 	u16 *preq_consumer_index;
 	u32 req_consumer_index_phy_addr_high;
 	u32 req_consumer_index_phy_addr_low;
-	unsigned int req_count;
-
-	u32 tx_count;
-
-	/* Transmit Buffers */
-	spinlock_t tx_cb_lock;
-	u32 tx_free_count;
-	struct ql_tx_buf_cb *tx_free_list;
-	struct ql_tx_buf_cb *tx_list_head;
-	struct ql_tx_buf_cb *tx_list_tail;
-
-	struct ql_tx_buf_cb tx_buf[XMIT_CB_CNT];
-	void *tx_buf_virt_addr;
-	dma_addr_t tx_buf_phy_addr;
-	u32 tx_buf_tot_size_in_bytes;
+	atomic_t tx_count;
+	struct ql_tx_buf_cb tx_buf[NUM_REQ_Q_ENTRIES];
 
 	/* Net Response Queue */
 	u32 rsp_q_size;
_

Patches currently in -mm which might be from ron.mercer@xxxxxxxxxx are

qla3xxx-NIC-driver.patch
qla3xxx-nic-driver-updates-2.patch

-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux