+ forcedeth-add-support-for-configuration.patch added to -mm tree

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

 



The patch titled

     forcedeth: add support for configuration

has been added to the -mm tree.  Its filename is

     forcedeth-add-support-for-configuration.patch

See http://www.zip.com.au/~akpm/linux/patches/stuff/added-to-mm.txt to find
out what to do about this


From: Manfred Spraul <manfred@xxxxxxxxxxxxxxxx>

Add support for configuration of various parameters.  This includes module
parameters and ethtool commands.

Signed-off-by: Ayaz Abdulla <aabdulla@xxxxxxxxxx>
Signed-off-by: Manfred Spraul <manfred@xxxxxxxxxxxxxxxx
Signed-off-by: Andrew Morton <akpm@xxxxxxxx>
---

 drivers/net/forcedeth.c | 2203 ++++++++++++++++++++++++++++++--------
 1 files changed, 1763 insertions(+), 440 deletions(-)

diff -puN drivers/net/forcedeth.c~forcedeth-add-support-for-configuration drivers/net/forcedeth.c
--- devel/drivers/net/forcedeth.c~forcedeth-add-support-for-configuration	2006-04-18 22:44:35.000000000 -0700
+++ devel-akpm/drivers/net/forcedeth.c	2006-04-18 22:44:35.000000000 -0700
@@ -106,6 +106,7 @@
  *	0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
  *	0.52: 20 Jan 2006: Add MSI/MSIX support.
  *	0.53: 20 Jan 2006: Add flow control (pause frame).
+ *	0.54: 20 Jan 2006: Additional ethtool and moduleparam support.
  *
  * Known bugs:
  * We suspect that on some hardware no TX done interrupts are generated.
@@ -117,7 +118,7 @@
  * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  * superfluous timer interrupts from the nic.
  */
-#define FORCEDETH_VERSION		"0.53"
+#define FORCEDETH_VERSION		"0.54"
 #define DRV_NAME			"forcedeth"
 
 #include <linux/module.h>
@@ -136,6 +137,7 @@
 #include <linux/init.h>
 #include <linux/if_vlan.h>
 #include <linux/dma-mapping.h>
+#include <linux/rtnetlink.h>
 
 #include <asm/irq.h>
 #include <asm/io.h>
@@ -162,6 +164,8 @@
 #define DEV_HAS_MSI             0x0040  /* device supports MSI */
 #define DEV_HAS_MSI_X           0x0080  /* device supports MSI-X */
 #define DEV_HAS_PAUSEFRAME_TX   0x0100  /* device supports tx pause frames */
+#define DEV_HAS_STATISTICS      0x0200  /* device supports hw statistics */
+#define DEV_HAS_TEST_EXTENDED   0x0400  /* device supports extended diagnostic test */
 
 enum {
 	NvRegIrqStatus = 0x000,
@@ -216,6 +220,7 @@ enum {
 #define NVREG_PFF_ALWAYS	0x7F0000
 #define NVREG_PFF_PROMISC	0x80
 #define NVREG_PFF_MYADDR	0x20
+#define NVREG_PFF_LOOPBACK	0x10
 
 	NvRegOffloadConfig = 0x90,
 #define NVREG_OFFLOAD_HOMEPHY	0x601
@@ -328,6 +333,34 @@ enum {
 #define NVREG_POWERSTATE_D1		0x0001
 #define NVREG_POWERSTATE_D2		0x0002
 #define NVREG_POWERSTATE_D3		0x0003
+	NvRegTxCnt = 0x280,
+	NvRegTxZeroReXmt = 0x284,
+	NvRegTxOneReXmt = 0x288,
+	NvRegTxManyReXmt = 0x28c,
+	NvRegTxLateCol = 0x290,
+	NvRegTxUnderflow = 0x294,
+	NvRegTxLossCarrier = 0x298,
+	NvRegTxExcessDef = 0x29c,
+	NvRegTxRetryErr = 0x2a0,
+	NvRegRxFrameErr = 0x2a4,
+	NvRegRxExtraByte = 0x2a8,
+	NvRegRxLateCol = 0x2ac,
+	NvRegRxRunt = 0x2b0,
+	NvRegRxFrameTooLong = 0x2b4,
+	NvRegRxOverflow = 0x2b8,
+	NvRegRxFCSErr = 0x2bc,
+	NvRegRxFrameAlignErr = 0x2c0,
+	NvRegRxLenErr = 0x2c4,
+	NvRegRxUnicast = 0x2c8,
+	NvRegRxMulticast = 0x2cc,
+	NvRegRxBroadcast = 0x2d0,
+	NvRegTxDef = 0x2d4,
+	NvRegTxFrame = 0x2d8,
+	NvRegRxCnt = 0x2dc,
+	NvRegTxPause = 0x2e0,
+	NvRegRxPause = 0x2e4,
+	NvRegRxDropFrame = 0x2e8,
+
 	NvRegVlanControl = 0x300,
 #define NVREG_VLANCONTROL_ENABLE	0x2000
 	NvRegMSIXMap0 = 0x3e0,
@@ -446,16 +479,18 @@ typedef union _ring_type {
 /* General driver defaults */
 #define NV_WATCHDOG_TIMEO	(5*HZ)
 
-#define RX_RING		128
-#define TX_RING		256
+#define RX_RING_DEFAULT		128
+#define TX_RING_DEFAULT		64
+#define RX_RING_MIN		RX_RING_DEFAULT
+#define TX_RING_MIN		TX_RING_DEFAULT
+#define RING_MAX_DESC_VER_1	1024
+#define RING_MAX_DESC_VER_2_3	16384
 /* 
- * If your nic mysteriously hangs then try to reduce the limits
- * to 1/0: It might be required to set NV_TX_LASTPACKET in the
- * last valid ring entry. But this would be impossible to
- * implement - probably a disassembly error.
+ * Difference between the get and put pointers for the tx ring.
+ * This is used to throttle the amount of data outstanding in the
+ * tx ring.
  */
-#define TX_LIMIT_STOP	255
-#define TX_LIMIT_START	254
+#define TX_LIMIT_DIFFERENCE	1
 
 /* rx/tx mac addr + type + vlan + align + slack*/
 #define NV_RX_HEADERS		(64)
@@ -469,6 +504,7 @@ typedef union _ring_type {
 #define OOM_REFILL	(1+HZ/20)
 #define POLL_WAIT	(1+HZ/100)
 #define LINK_TIMEOUT	(3*HZ)
+#define STATS_INTERVAL	(10*HZ)
 
 /* 
  * desc_ver values:
@@ -507,6 +543,9 @@ typedef union _ring_type {
 #define NV_PAUSEFRAME_TX_CAPABLE 0x0002
 #define NV_PAUSEFRAME_RX_ENABLE  0x0004
 #define NV_PAUSEFRAME_TX_ENABLE  0x0008
+#define NV_PAUSEFRAME_RX_REQ     0x0010
+#define NV_PAUSEFRAME_TX_REQ     0x0020
+#define NV_PAUSEFRAME_AUTONEG    0x0040
 
 /* MSI/MSI-X defines */
 #define NV_MSI_X_MAX_VECTORS  8
@@ -521,6 +560,109 @@ typedef union _ring_type {
 #define NV_MSI_X_VECTOR_TX    0x1
 #define NV_MSI_X_VECTOR_OTHER 0x2
 
+/* statistics */
+#define NV_STATS_COUNT_SW 10
+
+struct nv_ethtool_str {
+	char name[ETH_GSTRING_LEN];
+};
+
+static const struct nv_ethtool_str nv_estats_str[] = {
+	{ "tx_dropped" },
+	{ "tx_fifo_errors" },
+	{ "tx_carrier_errors" },
+	{ "tx_packets" },
+	{ "tx_bytes" },
+	{ "rx_crc_errors" },
+	{ "rx_over_errors" },
+	{ "rx_errors_total" },
+	{ "rx_packets" },
+	{ "rx_bytes" },
+
+	/* hardware counters */
+	{ "tx_zero_rexmt" },
+	{ "tx_one_rexmt" },
+	{ "tx_many_rexmt" },
+	{ "tx_late_collision" },
+	{ "tx_excess_deferral" },
+	{ "tx_retry_error" },
+	{ "rx_frame_error" },
+	{ "rx_extra_byte" },
+	{ "rx_late_collision" },
+	{ "rx_runt" },
+	{ "rx_frame_too_long" },
+	{ "rx_frame_align_error" },
+	{ "rx_length_error" },
+	{ "rx_unicast" },
+	{ "rx_multicast" },
+	{ "rx_broadcast" },
+	{ "tx_deferral" },
+	{ "tx_pause" },
+	{ "rx_pause" },
+	{ "rx_drop_frame" }
+};
+
+struct nv_ethtool_stats {
+	u64 tx_dropped;
+	u64 tx_fifo_errors;
+	u64 tx_carrier_errors;
+	u64 tx_packets;
+	u64 tx_bytes;
+	u64 rx_crc_errors;
+	u64 rx_over_errors;
+	u64 rx_errors_total;
+	u64 rx_packets;
+	u64 rx_bytes;
+
+	/* hardware counters */
+	u64 tx_zero_rexmt;
+	u64 tx_one_rexmt;
+	u64 tx_many_rexmt;
+	u64 tx_late_collision;
+	u64 tx_excess_deferral;
+	u64 tx_retry_error;
+	u64 rx_frame_error;
+	u64 rx_extra_byte;
+	u64 rx_late_collision;
+	u64 rx_runt;
+	u64 rx_frame_too_long;
+	u64 rx_frame_align_error;
+	u64 rx_length_error;
+	u64 rx_unicast;
+	u64 rx_multicast;
+	u64 rx_broadcast;
+	u64 tx_deferral;
+	u64 tx_pause;
+	u64 rx_pause;
+	u64 rx_drop_frame;
+};
+
+/* diagnostics */
+#define NV_TEST_COUNT_BASE 3
+#define NV_TEST_COUNT_EXTENDED 4
+
+static const struct nv_ethtool_str nv_etests_str[] = {
+	{ "link      (online/offline)" },
+	{ "register  (offline)       " },
+	{ "interrupt (offline)       " },
+	{ "loopback  (offline)       " }
+};
+
+struct register_test {
+	u32 reg;
+	u32 mask;
+};
+
+static const struct register_test nv_registers_test[] = {
+	{ NvRegUnknownSetupReg6, 0x01 },
+	{ NvRegMisc1, 0x03c },
+	{ NvRegOffloadConfig, 0x03ff },
+	{ NvRegMulticastAddrA, 0xffffffff },
+	{ NvRegUnknownSetupReg3, 0x0ff },
+	{ NvRegWakeUpFlags, 0x07777 },
+	{ 0,0 }
+};
+
 /*
  * SMP locking:
  * All hardware access under dev->priv->lock, except the performance
@@ -539,6 +681,7 @@ struct fe_priv {
 	/* General data:
 	 * Locking: spin_lock(&np->lock); */
 	struct net_device_stats stats;
+	struct nv_ethtool_stats estats;
 	int in_shutdown;
 	u32 linkspeed;
 	int duplex;
@@ -548,6 +691,7 @@ struct fe_priv {
 	int wolenabled;
 	unsigned int phy_oui;
 	u16 gigabit;
+	int intr_test;
 
 	/* General data: RO fields */
 	dma_addr_t ring_addr;
@@ -557,6 +701,7 @@ struct fe_priv {
 	u32 desc_ver;
 	u32 txrxctl_bits;
 	u32 vlanctl_bits;
+	u32 driver_data;
 
 	void __iomem *base;
 
@@ -565,13 +710,15 @@ struct fe_priv {
 	 */
 	ring_type rx_ring;
 	unsigned int cur_rx, refill_rx;
-	struct sk_buff *rx_skbuff[RX_RING];
-	dma_addr_t rx_dma[RX_RING];
+	struct sk_buff **rx_skbuff;
+	dma_addr_t *rx_dma;
 	unsigned int rx_buf_sz;
 	unsigned int pkt_limit;
 	struct timer_list oom_kick;
 	struct timer_list nic_poll;
+	struct timer_list stats_poll;
 	u32 nic_poll_irq;
+	int rx_ring_size;
 
 	/* media detection workaround.
 	 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
@@ -583,10 +730,13 @@ struct fe_priv {
 	 */
 	ring_type tx_ring;
 	unsigned int next_tx, nic_tx;
-	struct sk_buff *tx_skbuff[TX_RING];
-	dma_addr_t tx_dma[TX_RING];
-	unsigned int tx_dma_len[TX_RING];
+	struct sk_buff **tx_skbuff;
+	dma_addr_t *tx_dma;
+	unsigned int *tx_dma_len;
 	u32 tx_flags;
+	int tx_ring_size;
+	int tx_limit_start;
+	int tx_limit_stop;
 
 	/* vlan fields */
 	struct vlan_group *vlangrp;
@@ -611,8 +761,10 @@ static int max_interrupt_work = 5;
  * Throughput Mode: Every tx and rx packet will generate an interrupt.
  * CPU Mode: Interrupts are controlled by a timer.
  */
-#define NV_OPTIMIZATION_MODE_THROUGHPUT 0
-#define NV_OPTIMIZATION_MODE_CPU        1
+enum {
+	NV_OPTIMIZATION_MODE_THROUGHPUT,
+	NV_OPTIMIZATION_MODE_CPU
+};
 static int optimization_mode = NV_OPTIMIZATION_MODE_THROUGHPUT;
 
 /*
@@ -625,14 +777,136 @@ static int optimization_mode = NV_OPTIMI
 static int poll_interval = -1;
 
 /*
- * Disable MSI interrupts
+ * MSI interrupts
+ */
+enum {
+	NV_MSI_INT_DISABLED,
+	NV_MSI_INT_ENABLED
+};
+static int msi = NV_MSI_INT_ENABLED;
+
+/*
+ * MSIX interrupts
+ */
+enum {
+	NV_MSIX_INT_DISABLED,
+	NV_MSIX_INT_ENABLED
+};
+static int msix = NV_MSIX_INT_ENABLED;
+
+/*
+ * PHY Speed and Duplex
+ */
+enum {
+	NV_SPEED_DUPLEX_AUTO,
+	NV_SPEED_DUPLEX_10_HALF_DUPLEX,
+	NV_SPEED_DUPLEX_10_FULL_DUPLEX,
+	NV_SPEED_DUPLEX_100_HALF_DUPLEX,
+	NV_SPEED_DUPLEX_100_FULL_DUPLEX,
+	NV_SPEED_DUPLEX_1000_FULL_DUPLEX
+};
+static int speed_duplex = NV_SPEED_DUPLEX_AUTO;
+
+/*
+ * PHY autonegotiation
+ */
+static int autoneg = AUTONEG_ENABLE;
+
+/*
+ * Scatter gather
+ */
+enum {
+	NV_SCATTER_GATHER_DISABLED,
+	NV_SCATTER_GATHER_ENABLED
+};
+static int scatter_gather = NV_SCATTER_GATHER_ENABLED;
+
+/*
+ * TCP Segmentation Offload (TSO)
+ */
+enum {
+	NV_TSO_DISABLED,
+	NV_TSO_ENABLED
+};
+static int tso_offload = NV_TSO_ENABLED;
+
+/*
+ * MTU settings
+ */
+static int mtu = ETH_DATA_LEN;
+
+/*
+ * Tx checksum offload
+ */
+enum {
+	NV_TX_CHECKSUM_DISABLED,
+	NV_TX_CHECKSUM_ENABLED
+};
+static int tx_checksum_offload = NV_TX_CHECKSUM_ENABLED;
+
+/*
+ * Rx checksum offload
+ */
+enum {
+	NV_RX_CHECKSUM_DISABLED,
+	NV_RX_CHECKSUM_ENABLED
+};
+static int rx_checksum_offload = NV_RX_CHECKSUM_ENABLED;
+
+/*
+ * Tx ring size
+ */
+static int tx_ring_size = TX_RING_DEFAULT;
+
+/*
+ * Rx ring size
+ */
+static int rx_ring_size = RX_RING_DEFAULT;
+
+/*
+ * Tx flow control
  */
-static int disable_msi = 0;
+enum {
+	NV_TX_FLOW_CONTROL_DISABLED,
+	NV_TX_FLOW_CONTROL_ENABLED
+};
+static int tx_flow_control = NV_TX_FLOW_CONTROL_ENABLED;
+
+/*
+ * Rx flow control
+ */
+enum {
+	NV_RX_FLOW_CONTROL_DISABLED,
+	NV_RX_FLOW_CONTROL_ENABLED
+};
+static int rx_flow_control = NV_RX_FLOW_CONTROL_ENABLED;
+
+/*
+ * DMA 64bit
+ */
+enum {
+	NV_DMA_64BIT_DISABLED,
+	NV_DMA_64BIT_ENABLED
+};
+static int dma_64bit = NV_DMA_64BIT_ENABLED;
+
+/*
+ * Wake On Lan
+ */
+enum {
+	NV_WOL_DISABLED,
+	NV_WOL_ENABLED
+};
+static int wol = NV_WOL_DISABLED;
 
 /*
- * Disable MSIX interrupts
+ * Tagging 802.1pq
  */
-static int disable_msix = 0;
+enum {
+	NV_8021PQ_DISABLED,
+	NV_8021PQ_ENABLED
+};
+static int tagging_8021pq = NV_8021PQ_ENABLED;
 
 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
 {
@@ -692,7 +966,7 @@ static void setup_hw_rings(struct net_de
 			writel((u32) cpu_to_le64(np->ring_addr), base + NvRegRxRingPhysAddr);
 		}
 		if (rxtx_flags & NV_SETUP_TX_RING) {
-			writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
+			writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
 		}
 	} else {
 		if (rxtx_flags & NV_SETUP_RX_RING) {
@@ -700,12 +974,88 @@ static void setup_hw_rings(struct net_de
 			writel((u32) (cpu_to_le64(np->ring_addr) >> 32), base + NvRegRxRingPhysAddrHigh);
 		}
 		if (rxtx_flags & NV_SETUP_TX_RING) {
-			writel((u32) cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
-			writel((u32) (cpu_to_le64(np->ring_addr + RX_RING*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
+			writel((u32) cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)), base + NvRegTxRingPhysAddr);
+			writel((u32) (cpu_to_le64(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc_ex)) >> 32), base + NvRegTxRingPhysAddrHigh);
 		}
 	}
 }
 
+static void free_rings(struct net_device *dev)
+{
+	struct fe_priv *np = get_nvpriv(dev);
+
+	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+		if(np->rx_ring.orig)
+			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
+					    np->rx_ring.orig, np->ring_addr);
+	} else {
+		if (np->rx_ring.ex)
+			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
+					    np->rx_ring.ex, np->ring_addr);
+	}
+	if (np->rx_skbuff)
+		kfree(np->rx_skbuff);
+	if (np->rx_dma)
+		kfree(np->rx_dma);
+	if (np->tx_skbuff)
+		kfree(np->tx_skbuff);
+	if (np->tx_dma)
+		kfree(np->tx_dma);
+	if (np->tx_dma_len)
+		kfree(np->tx_dma_len);
+}
+
+static void nv_enable_irq(struct net_device *dev)
+{
+	struct fe_priv *np = get_nvpriv(dev);
+
+	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
+	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+		enable_irq(dev->irq);
+	} else {
+		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+		enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+	}
+}
+
+static void nv_disable_irq(struct net_device *dev)
+{
+	struct fe_priv *np = get_nvpriv(dev);
+
+	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
+	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+		disable_irq(dev->irq);
+	} else {
+		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+		disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+	}
+}
+
+static void nv_enable_hw_interrupts(struct net_device *dev, u32 mask)
+{
+	u8 __iomem *base = get_hwbase(dev);
+
+	writel(mask, base + NvRegIrqMask);
+}
+
+static void nv_disable_hw_interrupts(struct net_device *dev, u32 mask)
+{
+	struct fe_priv *np = get_nvpriv(dev);
+	u8 __iomem *base = get_hwbase(dev);
+
+	if (np->msi_flags & NV_MSI_X_ENABLED) {
+		writel(mask, base + NvRegIrqMask);
+	} else {
+		if (np->msi_flags & NV_MSI_ENABLED)
+			writel(0, base + NvRegMSIIrqMask);
+		writel(0, base + NvRegIrqMask);
+	}
+}
+
 #define MII_READ	(-1)
 /* mii_rw: read/write a register on the PHY.
  *
@@ -789,7 +1139,23 @@ static int phy_init(struct net_device *d
 
 	/* set advertise register */
 	reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
-	reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
+	reg &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+	if (speed_duplex == NV_SPEED_DUPLEX_AUTO)
+		reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL);
+	if (speed_duplex == NV_SPEED_DUPLEX_10_HALF_DUPLEX)
+		reg |= ADVERTISE_10HALF;
+	if (speed_duplex == NV_SPEED_DUPLEX_10_FULL_DUPLEX)
+		reg |= ADVERTISE_10FULL;
+	if (speed_duplex == NV_SPEED_DUPLEX_100_HALF_DUPLEX)
+		reg |= ADVERTISE_100HALF;
+	if (speed_duplex == NV_SPEED_DUPLEX_100_FULL_DUPLEX)
+		reg |= ADVERTISE_100FULL;
+	if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
+		reg |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+	if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+		reg |= ADVERTISE_PAUSE_ASYM;
+	np->fixed_mode = reg;
+
 	if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
 		printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
 		return PHY_ERROR;
@@ -804,11 +1170,15 @@ static int phy_init(struct net_device *d
 		np->gigabit = PHY_GIGABIT;
 		mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
 		mii_control_1000 &= ~ADVERTISE_1000HALF;
-		if (phyinterface & PHY_RGMII)
+		if (phyinterface & PHY_RGMII &&
+		    (speed_duplex == NV_SPEED_DUPLEX_AUTO ||
+		     (speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && autoneg == AUTONEG_ENABLE)))
 			mii_control_1000 |= ADVERTISE_1000FULL;
-		else
+		else {
+			if (speed_duplex == NV_SPEED_DUPLEX_1000_FULL_DUPLEX && autoneg == AUTONEG_DISABLE)
+				printk(KERN_INFO "%s: 1000mpbs full only allowed with autoneg\n", pci_name(np->pci_dev));
 			mii_control_1000 &= ~ADVERTISE_1000FULL;
-
+		}
 		if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
 			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
 			return PHY_ERROR;
@@ -817,6 +1187,21 @@ static int phy_init(struct net_device *d
 	else
 		np->gigabit = 0;
 
+	if (autoneg == AUTONEG_DISABLE){
+		np->pause_flags &= ~(NV_PAUSEFRAME_RX_ENABLE | NV_PAUSEFRAME_TX_ENABLE);
+		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)
+			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+		mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+		mii_control |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
+		if (reg & (ADVERTISE_10FULL|ADVERTISE_100FULL))
+			mii_control |= BMCR_FULLDPLX;
+		if (reg & (ADVERTISE_100HALF|ADVERTISE_100FULL))
+			mii_control |= BMCR_SPEED100;
+		mii_rw(dev, np->phyaddr, MII_BMCR, mii_control);
+	}
+
 	/* reset the phy */
 	if (phy_reset(dev)) {
 		printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
@@ -851,10 +1236,12 @@ static int phy_init(struct net_device *d
 	mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
 
 	/* restart auto negotiation */
-	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-	mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
-	if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
-		return PHY_ERROR;
+	if (autoneg == AUTONEG_ENABLE) {
+		mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+		mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
+		if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
+			return PHY_ERROR;
+		}
 	}
 
 	return 0;
@@ -960,7 +1347,7 @@ static int nv_alloc_rx(struct net_device
 	while (np->cur_rx != refill_rx) {
 		struct sk_buff *skb;
 
-		nr = refill_rx % RX_RING;
+		nr = refill_rx % np->rx_ring_size;
 		if (np->rx_skbuff[nr] == NULL) {
 
 			skb = dev_alloc_skb(np->rx_buf_sz + NV_RX_ALLOC_PAD);
@@ -989,7 +1376,7 @@ static int nv_alloc_rx(struct net_device
 		refill_rx++;
 	}
 	np->refill_rx = refill_rx;
-	if (np->cur_rx - refill_rx == RX_RING)
+	if (np->cur_rx - refill_rx == np->rx_ring_size)
 		return 1;
 	return 0;
 }
@@ -1027,9 +1414,9 @@ static void nv_init_rx(struct net_device
 	struct fe_priv *np = netdev_priv(dev);
 	int i;
 
-	np->cur_rx = RX_RING;
+	np->cur_rx = np->rx_ring_size;
 	np->refill_rx = 0;
-	for (i = 0; i < RX_RING; i++)
+	for (i = 0; i < np->rx_ring_size; i++)
 		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
 			np->rx_ring.orig[i].FlagLen = 0;
 	        else
@@ -1042,7 +1429,7 @@ static void nv_init_tx(struct net_device
 	int i;
 
 	np->next_tx = np->nic_tx = 0;
-	for (i = 0; i < TX_RING; i++) {
+	for (i = 0; i < np->tx_ring_size; i++) {
 		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
 			np->tx_ring.orig[i].FlagLen = 0;
 	        else
@@ -1087,7 +1474,7 @@ static void nv_drain_tx(struct net_devic
 	struct fe_priv *np = netdev_priv(dev);
 	unsigned int i;
 	
-	for (i = 0; i < TX_RING; i++) {
+	for (i = 0; i < np->tx_ring_size; i++) {
 		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
 			np->tx_ring.orig[i].FlagLen = 0;
 		else
@@ -1101,7 +1488,7 @@ static void nv_drain_rx(struct net_devic
 {
 	struct fe_priv *np = netdev_priv(dev);
 	int i;
-	for (i = 0; i < RX_RING; i++) {
+	for (i = 0; i < np->rx_ring_size; i++) {
 		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
 			np->rx_ring.orig[i].FlagLen = 0;
 		else
@@ -1133,8 +1520,8 @@ static int nv_start_xmit(struct sk_buff 
 	u32 tx_flags = 0;
 	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
 	unsigned int fragments = skb_shinfo(skb)->nr_frags;
-	unsigned int nr = (np->next_tx - 1) % TX_RING;
-	unsigned int start_nr = np->next_tx % TX_RING;
+	unsigned int nr = (np->next_tx - 1) % np->tx_ring_size;
+	unsigned int start_nr = np->next_tx % np->tx_ring_size;
 	unsigned int i;
 	u32 offset = 0;
 	u32 bcnt;
@@ -1150,7 +1537,7 @@ static int nv_start_xmit(struct sk_buff 
 
 	spin_lock_irq(&np->lock);
 
-	if ((np->next_tx - np->nic_tx + entries - 1) > TX_LIMIT_STOP) {
+	if ((np->next_tx - np->nic_tx + entries - 1) > np->tx_limit_stop) {
 		spin_unlock_irq(&np->lock);
 		netif_stop_queue(dev);
 		return NETDEV_TX_BUSY;
@@ -1159,7 +1546,7 @@ static int nv_start_xmit(struct sk_buff 
 	/* setup the header buffer */
 	do {
 		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
-		nr = (nr + 1) % TX_RING;
+		nr = (nr + 1) % np->tx_ring_size;
 
 		np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
 						PCI_DMA_TODEVICE);
@@ -1186,7 +1573,7 @@ static int nv_start_xmit(struct sk_buff 
 
 		do {
 			bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
-			nr = (nr + 1) % TX_RING;
+			nr = (nr + 1) % np->tx_ring_size;
 
 			np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
 						      PCI_DMA_TODEVICE);
@@ -1268,7 +1655,7 @@ static void nv_tx_done(struct net_device
 	struct sk_buff *skb;
 
 	while (np->nic_tx != np->next_tx) {
-		i = np->nic_tx % TX_RING;
+		i = np->nic_tx % np->tx_ring_size;
 
 		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
 			Flags = le32_to_cpu(np->tx_ring.orig[i].FlagLen);
@@ -1313,7 +1700,7 @@ static void nv_tx_done(struct net_device
 		nv_release_txskb(dev, i);
 		np->nic_tx++;
 	}
-	if (np->next_tx - np->nic_tx < TX_LIMIT_START)
+	if (np->next_tx - np->nic_tx < np->tx_limit_start)
 		netif_wake_queue(dev);
 }
 
@@ -1350,7 +1737,7 @@ static void nv_tx_timeout(struct net_dev
 					readl(base + i + 24), readl(base + i + 28));
 		}
 		printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
-		for (i=0;i<TX_RING;i+= 4) {
+		for (i=0;i<np->tx_ring_size;i+= 4) {
 			if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
 				printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
 				       i, 
@@ -1462,12 +1849,12 @@ static void nv_rx_process(struct net_dev
 	u32 Flags;
 	u32 vlanflags = 0;
 
-	for (; np->cur_rx - np->refill_rx < RX_RING; np->cur_rx++) {
+	for (; np->cur_rx - np->refill_rx < np->rx_ring_size; np->cur_rx++) {
 		struct sk_buff *skb;
 		int len;
 		int i;
 
-		i = np->cur_rx % RX_RING;
+		i = np->cur_rx % np->rx_ring_size;
 		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
 			Flags = le32_to_cpu(np->rx_ring.orig[i].FlagLen);
 			len = nv_descr_getlength(&np->rx_ring.orig[i], np->desc_ver);
@@ -1567,14 +1954,16 @@ static void nv_rx_process(struct net_dev
 					}
 				}
 			}
-			Flags &= NV_RX2_CHECKSUMMASK;
-			if (Flags == NV_RX2_CHECKSUMOK1 ||
-					Flags == NV_RX2_CHECKSUMOK2 ||
-					Flags == NV_RX2_CHECKSUMOK3) {
-				dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
-				np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
-			} else {
-				dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
+			if (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) {
+				Flags &= NV_RX2_CHECKSUMMASK;
+				if (Flags == NV_RX2_CHECKSUMOK1 ||
+				    Flags == NV_RX2_CHECKSUMOK2 ||
+				    Flags == NV_RX2_CHECKSUMOK3) {
+					dprintk(KERN_DEBUG "%s: hw checksum hit!.\n", dev->name);
+					np->rx_skbuff[i]->ip_summed = CHECKSUM_UNNECESSARY;
+				} else {
+					dprintk(KERN_DEBUG "%s: hwchecksum miss!.\n", dev->name);
+				}
 			}
 		}
 		/* got a valid packet - forward it to the network core */
@@ -1639,15 +2028,7 @@ static int nv_change_mtu(struct net_devi
 		 * guessed, there is probably a simpler approach.
 		 * Changing the MTU is a rare event, it shouldn't matter.
 		 */
-		if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
-		    ((np->msi_flags & NV_MSI_X_ENABLED) && 
-		     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
-			disable_irq(dev->irq);
-		} else {
-			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
-			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
-			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
-		}
+		nv_disable_irq(dev);
 		spin_lock_bh(&dev->xmit_lock);
 		spin_lock(&np->lock);
 		/* stop engines */
@@ -1658,18 +2039,15 @@ static int nv_change_mtu(struct net_devi
 		nv_drain_rx(dev);
 		nv_drain_tx(dev);
 		/* reinit driver view of the rx queue */
-		nv_init_rx(dev);
-		nv_init_tx(dev);
-		/* alloc new rx buffers */
 		set_bufsize(dev);
-		if (nv_alloc_rx(dev)) {
+		if (nv_init_ring(dev)) {
 			if (!np->in_shutdown)
 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
 		}
 		/* reinit nic view of the rx queue */
 		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
 		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
-		writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
+		writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
 			base + NvRegRingSizes);
 		pci_push(base);
 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -1680,15 +2058,7 @@ static int nv_change_mtu(struct net_devi
 		nv_start_tx(dev);
 		spin_unlock(&np->lock);
 		spin_unlock_bh(&dev->xmit_lock);
-		if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
-		    ((np->msi_flags & NV_MSI_X_ENABLED) && 
-		     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
-			enable_irq(dev->irq);
-		} else {
-			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
-			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
-			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
-		}
+		nv_enable_irq(dev);
 	}
 	return 0;
 }
@@ -1751,16 +2121,16 @@ static void nv_set_multicast(struct net_
 	u8 __iomem *base = get_hwbase(dev);
 	u32 addr[2];
 	u32 mask[2];
-	u32 pff;
+	u32 pff = readl(base + NvRegPacketFilterFlags) & NVREG_PFF_PAUSE_RX;
 
 	memset(addr, 0, sizeof(addr));
 	memset(mask, 0, sizeof(mask));
 
 	if (dev->flags & IFF_PROMISC) {
 		printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
-		pff = NVREG_PFF_PROMISC;
+		pff |= NVREG_PFF_PROMISC;
 	} else {
-		pff = NVREG_PFF_MYADDR;
+		pff |= NVREG_PFF_MYADDR;
 
 		if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
 			u32 alwaysOff[2];
@@ -1805,6 +2175,35 @@ static void nv_set_multicast(struct net_
 	spin_unlock_irq(&np->lock);
 }
 
+void nv_update_pause(struct net_device *dev, u32 pause_flags)
+{
+	struct fe_priv *np = netdev_priv(dev);
+	u8 __iomem *base = get_hwbase(dev);
+
+	np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
+
+	if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
+		u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
+		if (pause_flags & NV_PAUSEFRAME_RX_ENABLE) {
+			writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
+			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+		} else {
+			writel(pff, base + NvRegPacketFilterFlags);
+		}
+	}
+	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
+		u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
+		if (pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
+			writel(NVREG_TX_PAUSEFRAME_ENABLE,  base + NvRegTxPauseFrame);
+			writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
+			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+		} else {
+			writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
+			writel(regmisc, base + NvRegMisc1);
+		}
+	}
+}
+
 /**
  * nv_update_linkspeed: Setup the MAC according to the link partner
  * @dev: Network device to be configured
@@ -1827,7 +2226,7 @@ static int nv_update_linkspeed(struct ne
 	int newdup = np->duplex;
 	int mii_status;
 	int retval = 0;
-	u32 control_1000, status_1000, phyreg;
+	u32 control_1000, status_1000, phyreg, pause_flags;
 
 	/* BMSR_LSTATUS is latched, read it twice:
 	 * we want the current value.
@@ -1873,6 +2272,10 @@ static int nv_update_linkspeed(struct ne
 		goto set_speed;
 	}
 
+	adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+	lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
+	dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
+				dev->name, adv, lpa);
 	retval = 1;
 	if (np->gigabit == PHY_GIGABIT) {
 		control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
@@ -1888,11 +2291,6 @@ static int nv_update_linkspeed(struct ne
 		}
 	}
 
-	adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
-	lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
-	dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
-				dev->name, adv, lpa);
-
 	/* FIXME: handle parallel detection properly */
 	adv_lpa = lpa & adv;
 	if (adv_lpa & LPA_100FULL) {
@@ -1951,55 +2349,45 @@ set_speed:
 	writel(np->linkspeed, base + NvRegLinkSpeed);
 	pci_push(base);
 
-	/* setup pause frame based on advertisement and link partner */
-	np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
-
+	pause_flags = 0;
+	/* setup pause frame */
 	if (np->duplex != 0) {
-		adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
-		lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
-
-		switch (adv_pause) {
-		case (ADVERTISE_PAUSE_CAP):
-			if (lpa_pause & LPA_PAUSE_CAP) {
-				np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE;
-			}
-			break;
-		case (ADVERTISE_PAUSE_ASYM):
-			if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
-			{
-				np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
-			}
-			break;
-		case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
-			if (lpa_pause & LPA_PAUSE_CAP)
-			{
-				np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE;
-			}
-			if (lpa_pause == LPA_PAUSE_ASYM)
-			{
-				np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+		if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
+			adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
+			lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
+
+			switch (adv_pause) {
+			case (ADVERTISE_PAUSE_CAP):
+				if (lpa_pause & LPA_PAUSE_CAP) {
+					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+				}
+				break;
+			case (ADVERTISE_PAUSE_ASYM):
+				if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
+				{
+					pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+				}
+				break;
+			case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
+				if (lpa_pause & LPA_PAUSE_CAP)
+				{
+					pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
+					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+				}
+				if (lpa_pause == LPA_PAUSE_ASYM)
+				{
+					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+				}
+				break;
 			}
-			break;
-		}
-	}
-
-	if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
-		u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
-		if (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE)
-			writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
-		else
-			writel(pff, base + NvRegPacketFilterFlags);
-	}
-	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
-		u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
-		if (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
-			writel(NVREG_TX_PAUSEFRAME_ENABLE,  base + NvRegTxPauseFrame);
-			writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
 		} else {
-			writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
-			writel(regmisc, base + NvRegMisc1);
+			pause_flags = np->pause_flags;
 		}
 	}
+	nv_update_pause(dev, pause_flags);
 
 	return retval;
 }
@@ -2261,56 +2649,219 @@ static irqreturn_t nv_nic_irq_other(int 
 	return IRQ_RETVAL(i);
 }
 
-static void nv_do_nic_poll(unsigned long data)
+static irqreturn_t nv_nic_irq_test(int foo, void *data, struct pt_regs *regs)
 {
 	struct net_device *dev = (struct net_device *) data;
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base = get_hwbase(dev);
-	u32 mask = 0;
+	u32 events;
 
-	/*
-	 * First disable irq(s) and then
-	 * reenable interrupts on the nic, we have to do this before calling
-	 * nv_nic_irq because that may decide to do otherwise
-	 */
+	dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
 
-	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
-	    ((np->msi_flags & NV_MSI_X_ENABLED) && 
-	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
-		disable_irq(dev->irq);
-		mask = np->irqmask;
+	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+		events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
+		writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
 	} else {
-		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
-			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
-			mask |= NVREG_IRQ_RX_ALL;
-		}
-		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
-			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
-			mask |= NVREG_IRQ_TX_ALL;
-		}
-		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
-			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
-			mask |= NVREG_IRQ_OTHER;
-		}
+		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
+		writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
 	}
-	np->nic_poll_irq = 0;
-
-	/* FIXME: Do we need synchronize_irq(dev->irq) here? */
-	
-	writel(mask, base + NvRegIrqMask);
 	pci_push(base);
+	dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
+	if (!(events & NVREG_IRQ_TIMER))
+		return IRQ_RETVAL(0);
 
-	if (!(np->msi_flags & NV_MSI_X_ENABLED) || 
-	    ((np->msi_flags & NV_MSI_X_ENABLED) && 
-	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
-		nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
-		enable_irq(dev->irq);
-	} else {
-		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
-			nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
-			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
-		}
-		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
+	spin_lock(&np->lock);
+	np->intr_test = 1;
+	spin_unlock(&np->lock);
+
+	dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
+
+	return IRQ_RETVAL(1);
+}
+
+static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
+{
+	u8 __iomem *base = get_hwbase(dev);
+	int i;
+	u32 msixmap = 0;
+
+	/* Each interrupt bit can be mapped to a MSIX vector (4 bits).
+	 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
+	 * the remaining 8 interrupts.
+	 */
+	for (i = 0; i < 8; i++) {
+		if ((irqmask >> i) & 0x1) {
+			msixmap |= vector << (i << 2);
+		}
+	}
+	writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
+
+	msixmap = 0;
+	for (i = 0; i < 8; i++) {
+		if ((irqmask >> (i + 8)) & 0x1) {
+			msixmap |= vector << (i << 2);
+		}
+	}
+	writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
+}
+
+static int nv_request_irq(struct net_device *dev, int intr_test)
+{
+	struct fe_priv *np = get_nvpriv(dev);
+	u8 __iomem *base = get_hwbase(dev);
+	int ret = 1;
+	int i;
+
+	if (np->msi_flags & NV_MSI_X_CAPABLE) {
+		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+			np->msi_x_entry[i].entry = i;
+		}
+		if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
+			np->msi_flags |= NV_MSI_X_ENABLED;
+			if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
+				/* Request irq for rx handling */
+				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
+					printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
+					pci_disable_msix(np->pci_dev);
+					np->msi_flags &= ~NV_MSI_X_ENABLED;
+					return 1;
+				}
+				/* Request irq for tx handling */
+				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
+					printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
+					pci_disable_msix(np->pci_dev);
+					np->msi_flags &= ~NV_MSI_X_ENABLED;
+					return 1;
+				}
+				/* Request irq for link and timer handling */
+				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
+					printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
+					pci_disable_msix(np->pci_dev);
+					np->msi_flags &= ~NV_MSI_X_ENABLED;
+					return 1;
+				}
+				/* map interrupts to their respective vector */
+				writel(0, base + NvRegMSIXMap0);
+				writel(0, base + NvRegMSIXMap1);
+				set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
+				set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
+				set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
+			} else {
+				/* Request irq for all interrupts */
+				if ((!intr_test &&
+				     request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
+				    (intr_test &&
+				     request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
+					printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+					pci_disable_msix(np->pci_dev);
+					np->msi_flags &= ~NV_MSI_X_ENABLED;
+					return 1;
+				}
+
+				/* map interrupts to vector 0 */
+				writel(0, base + NvRegMSIXMap0);
+				writel(0, base + NvRegMSIXMap1);
+			}
+		}
+	}
+	if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
+		if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
+			np->msi_flags |= NV_MSI_ENABLED;
+			if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
+			    (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0)) {
+				printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+				pci_disable_msi(np->pci_dev);
+				np->msi_flags &= ~NV_MSI_ENABLED;
+				return 1;
+			}
+
+			/* map interrupts to vector 0 */
+			writel(0, base + NvRegMSIMap0);
+			writel(0, base + NvRegMSIMap1);
+			/* enable msi vector 0 */
+			writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+		}
+	}
+	if (ret != 0) {
+		if ((!intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) ||
+		    (intr_test && request_irq(np->pci_dev->irq, &nv_nic_irq_test, SA_SHIRQ, dev->name, dev) != 0))
+			return 1;
+
+	}
+
+	return 0;
+}
+
+static void nv_free_irq(struct net_device *dev)
+{
+	struct fe_priv *np = get_nvpriv(dev);
+	int i;
+
+	if (np->msi_flags & NV_MSI_X_ENABLED) {
+		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+			free_irq(np->msi_x_entry[i].vector, dev);
+		}
+		pci_disable_msix(np->pci_dev);
+		np->msi_flags &= ~NV_MSI_X_ENABLED;
+	} else {
+		free_irq(np->pci_dev->irq, dev);
+		if (np->msi_flags & NV_MSI_ENABLED) {
+			pci_disable_msi(np->pci_dev);
+			np->msi_flags &= ~NV_MSI_ENABLED;
+		}
+	}
+}
+
+static void nv_do_nic_poll(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *) data;
+	struct fe_priv *np = netdev_priv(dev);
+	u8 __iomem *base = get_hwbase(dev);
+	u32 mask = 0;
+
+	/*
+	 * First disable irq(s) and then
+	 * reenable interrupts on the nic, we have to do this before calling
+	 * nv_nic_irq because that may decide to do otherwise
+	 */
+
+	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
+	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+		disable_irq(dev->irq);
+		mask = np->irqmask;
+	} else {
+		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
+			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+			mask |= NVREG_IRQ_RX_ALL;
+		}
+		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
+			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
+			mask |= NVREG_IRQ_TX_ALL;
+		}
+		if (np->nic_poll_irq & NVREG_IRQ_OTHER) {
+			disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector);
+			mask |= NVREG_IRQ_OTHER;
+		}
+	}
+	np->nic_poll_irq = 0;
+
+	/* FIXME: Do we need synchronize_irq(dev->irq) here? */
+
+	writel(mask, base + NvRegIrqMask);
+	pci_push(base);
+
+	if (!(np->msi_flags & NV_MSI_X_ENABLED) ||
+	    ((np->msi_flags & NV_MSI_X_ENABLED) &&
+	     ((np->msi_flags & NV_MSI_X_VECTORS_MASK) == 0x1))) {
+		nv_nic_irq((int) 0, (void *) data, (struct pt_regs *) NULL);
+		enable_irq(dev->irq);
+	} else {
+		if (np->nic_poll_irq & NVREG_IRQ_RX_ALL) {
+			nv_nic_irq_rx((int) 0, (void *) data, (struct pt_regs *) NULL);
+			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
+		}
+		if (np->nic_poll_irq & NVREG_IRQ_TX_ALL) {
 			nv_nic_irq_tx((int) 0, (void *) data, (struct pt_regs *) NULL);
 			enable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector);
 		}
@@ -2328,6 +2879,63 @@ static void nv_poll_controller(struct ne
 }
 #endif
 
+static void nv_do_stats_poll(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *) data;
+	struct fe_priv *np = netdev_priv(dev);
+	u8 __iomem *base = get_hwbase(dev);
+
+	spin_lock_irq(&np->lock);
+
+	np->estats.tx_dropped = np->stats.tx_dropped;
+	np->estats.rx_errors_total = np->stats.rx_errors;
+	np->estats.rx_packets = np->stats.rx_packets;
+	if (np->driver_data & DEV_HAS_STATISTICS) {
+		np->estats.tx_packets += readl(base + NvRegTxFrame);
+		np->estats.tx_fifo_errors += readl(base + NvRegTxUnderflow);
+		np->estats.tx_carrier_errors += readl(base + NvRegTxLossCarrier);
+		np->estats.tx_bytes += readl(base + NvRegTxCnt);
+		np->estats.rx_bytes += readl(base + NvRegRxCnt);
+		np->estats.rx_crc_errors += readl(base + NvRegRxFCSErr);
+		np->estats.rx_over_errors += readl(base + NvRegRxOverflow);
+	} else {
+		np->estats.tx_packets = np->stats.tx_packets;
+		np->estats.tx_fifo_errors = np->stats.tx_fifo_errors;
+		np->estats.tx_carrier_errors = np->stats.tx_carrier_errors;
+		np->estats.tx_bytes = np->stats.tx_bytes;
+		np->estats.rx_bytes = np->stats.rx_bytes;
+		np->estats.rx_crc_errors = np->stats.rx_crc_errors;
+		np->estats.rx_over_errors = np->stats.rx_over_errors;
+	}
+
+	if (np->driver_data & DEV_HAS_STATISTICS) {
+		np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
+		np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
+		np->estats.tx_many_rexmt += readl(base + NvRegTxManyReXmt);
+		np->estats.tx_late_collision += readl(base + NvRegTxLateCol);
+		np->estats.tx_excess_deferral += readl(base + NvRegTxExcessDef);
+		np->estats.tx_retry_error += readl(base + NvRegTxRetryErr);
+		np->estats.rx_frame_error += readl(base + NvRegRxFrameErr);
+		np->estats.rx_extra_byte += readl(base + NvRegRxExtraByte);
+		np->estats.rx_late_collision += readl(base + NvRegRxLateCol);
+		np->estats.rx_runt += readl(base + NvRegRxRunt);
+		np->estats.rx_frame_too_long += readl(base + NvRegRxFrameTooLong);
+		np->estats.rx_frame_align_error += readl(base + NvRegRxFrameAlignErr);
+		np->estats.rx_length_error += readl(base + NvRegRxLenErr);
+		np->estats.rx_unicast += readl(base + NvRegRxUnicast);
+		np->estats.rx_multicast += readl(base + NvRegRxMulticast);
+		np->estats.rx_broadcast += readl(base + NvRegRxBroadcast);
+		np->estats.tx_deferral += readl(base + NvRegTxDef);
+		np->estats.tx_pause += readl(base + NvRegTxPause);
+		np->estats.rx_pause += readl(base + NvRegRxPause);
+		np->estats.rx_drop_frame += readl(base + NvRegRxDropFrame);
+	}
+
+	if (!np->in_shutdown)
+		mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
+	spin_unlock_irq(&np->lock);
+}
+
 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
 {
 	struct fe_priv *np = netdev_priv(dev);
@@ -2351,17 +2959,19 @@ static int nv_set_wol(struct net_device 
 {
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base = get_hwbase(dev);
+	u32 flags = 0;
 
-	spin_lock_irq(&np->lock);
 	if (wolinfo->wolopts == 0) {
-		writel(0, base + NvRegWakeUpFlags);
 		np->wolenabled = 0;
-	}
-	if (wolinfo->wolopts & WAKE_MAGIC) {
-		writel(NVREG_WAKEUPFLAGS_ENABLE, base + NvRegWakeUpFlags);
+	} else if (wolinfo->wolopts & WAKE_MAGIC) {
 		np->wolenabled = 1;
+		flags = NVREG_WAKEUPFLAGS_ENABLE;
+	}
+	if (netif_running(dev)) {
+		spin_lock_irq(&np->lock);
+		writel(flags, base + NvRegWakeUpFlags);
+		spin_unlock_irq(&np->lock);
 	}
-	spin_unlock_irq(&np->lock);
 	return 0;
 }
 
@@ -2375,9 +2985,17 @@ static int nv_get_settings(struct net_de
 	if (!netif_running(dev)) {
 		/* We do not track link speed / duplex setting if the
 		 * interface is disabled. Force a link check */
-		nv_update_linkspeed(dev);
+		if (nv_update_linkspeed(dev)) {
+			if (!netif_carrier_ok(dev))
+				netif_carrier_on(dev);
+		} else {
+			if (netif_carrier_ok(dev))
+				netif_carrier_off(dev);
+		}
 	}
-	switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
+
+	if (netif_carrier_ok(dev)) {
+		switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
 		case NVREG_LINKSPEED_10:
 			ecmd->speed = SPEED_10;
 			break;
@@ -2387,10 +3005,14 @@ static int nv_get_settings(struct net_de
 		case NVREG_LINKSPEED_1000:
 			ecmd->speed = SPEED_1000;
 			break;
+		}
+		ecmd->duplex = DUPLEX_HALF;
+		if (np->duplex)
+			ecmd->duplex = DUPLEX_FULL;
+	} else {
+		ecmd->speed = -1;
+		ecmd->duplex = -1;
 	}
-	ecmd->duplex = DUPLEX_HALF;
-	if (np->duplex)
-		ecmd->duplex = DUPLEX_FULL;
 
 	ecmd->autoneg = np->autoneg;
 
@@ -2398,23 +3020,20 @@ static int nv_get_settings(struct net_de
 	if (np->autoneg) {
 		ecmd->advertising |= ADVERTISED_Autoneg;
 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
-	} else {
-		adv = np->fixed_mode;
-	}
-	if (adv & ADVERTISE_10HALF)
-		ecmd->advertising |= ADVERTISED_10baseT_Half;
-	if (adv & ADVERTISE_10FULL)
-		ecmd->advertising |= ADVERTISED_10baseT_Full;
-	if (adv & ADVERTISE_100HALF)
-		ecmd->advertising |= ADVERTISED_100baseT_Half;
-	if (adv & ADVERTISE_100FULL)
-		ecmd->advertising |= ADVERTISED_100baseT_Full;
-	if (np->autoneg && np->gigabit == PHY_GIGABIT) {
-		adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
-		if (adv & ADVERTISE_1000FULL)
-			ecmd->advertising |= ADVERTISED_1000baseT_Full;
+		if (adv & ADVERTISE_10HALF)
+			ecmd->advertising |= ADVERTISED_10baseT_Half;
+		if (adv & ADVERTISE_10FULL)
+			ecmd->advertising |= ADVERTISED_10baseT_Full;
+		if (adv & ADVERTISE_100HALF)
+			ecmd->advertising |= ADVERTISED_100baseT_Half;
+		if (adv & ADVERTISE_100FULL)
+			ecmd->advertising |= ADVERTISED_100baseT_Full;
+		if (np->gigabit == PHY_GIGABIT) {
+			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+			if (adv & ADVERTISE_1000FULL)
+				ecmd->advertising |= ADVERTISED_1000baseT_Full;
+		}
 	}
-
 	ecmd->supported = (SUPPORTED_Autoneg |
 		SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
 		SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
@@ -2466,7 +3085,18 @@ static int nv_set_settings(struct net_de
 		return -EINVAL;
 	}
 
-	spin_lock_irq(&np->lock);
+	netif_carrier_off(dev);
+	if (netif_running(dev)) {
+		nv_disable_irq(dev);
+		spin_lock_bh(&dev->xmit_lock);
+		spin_lock(&np->lock);
+		/* stop engines */
+		nv_stop_rx(dev);
+		nv_stop_tx(dev);
+		spin_unlock(&np->lock);
+		spin_unlock_bh(&dev->xmit_lock);
+	}
+
 	if (ecmd->autoneg == AUTONEG_ENABLE) {
 		int adv, bmcr;
 
@@ -2478,110 +3108,799 @@ static int nv_set_settings(struct net_de
 		if (ecmd->advertising & ADVERTISED_10baseT_Half)
 			adv |= ADVERTISE_10HALF;
 		if (ecmd->advertising & ADVERTISED_10baseT_Full)
-			adv |= ADVERTISE_10FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+			adv |= ADVERTISE_10FULL;
 		if (ecmd->advertising & ADVERTISED_100baseT_Half)
 			adv |= ADVERTISE_100HALF;
 		if (ecmd->advertising & ADVERTISED_100baseT_Full)
-			adv |= ADVERTISE_100FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+			adv |= ADVERTISE_100FULL;
+		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ)  /* for rx we set both advertisments but disable tx pause */
+			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+			adv |=  ADVERTISE_PAUSE_ASYM;
+		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
+
+		if (np->gigabit == PHY_GIGABIT) {
+			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+			adv &= ~ADVERTISE_1000FULL;
+			if (ecmd->advertising & ADVERTISED_1000baseT_Full)
+				adv |= ADVERTISE_1000FULL;
+			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
+		}
+
+		if (netif_running(dev))
+			printk(KERN_INFO "%s: link down.\n", dev->name);
+		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+
+	} else {
+		int adv, bmcr;
+
+		np->autoneg = 0;
+
+		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+		if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
+			adv |= ADVERTISE_10HALF;
+		if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
+			adv |= ADVERTISE_10FULL;
+		if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
+			adv |= ADVERTISE_100HALF;
+		if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
+			adv |= ADVERTISE_100FULL;
+		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
+		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) {/* for rx we set both advertisments but disable tx pause */
+			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+		}
+		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) {
+			adv |=  ADVERTISE_PAUSE_ASYM;
+			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+		}
 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
+		np->fixed_mode = adv;
+
+		if (np->gigabit == PHY_GIGABIT) {
+			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+			adv &= ~ADVERTISE_1000FULL;
+			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
+		}
+
+		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+		bmcr &= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_SPEED1000|BMCR_FULLDPLX);
+		if (np->fixed_mode & (ADVERTISE_10FULL|ADVERTISE_100FULL))
+			bmcr |= BMCR_FULLDPLX;
+		if (np->fixed_mode & (ADVERTISE_100HALF|ADVERTISE_100FULL))
+			bmcr |= BMCR_SPEED100;
+		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+		if (np->phy_oui == PHY_OUI_MARVELL) {
+			/* reset the phy */
+			if (phy_reset(dev)) {
+				printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+				return -EINVAL;
+			}
+		} else if (netif_running(dev)) {
+			/* Wait a bit and then reconfigure the nic. */
+			udelay(10);
+			nv_linkchange(dev);
+		}
+	}
+
+	if (netif_running(dev)) {
+		nv_start_rx(dev);
+		nv_start_tx(dev);
+		nv_enable_irq(dev);
+	}
+
+	return 0;
+}
+
+#define FORCEDETH_REGS_VER	1
+#define FORCEDETH_REGS_SIZE	0x400 /* 256 32-bit registers */
+
+static int nv_get_regs_len(struct net_device *dev)
+{
+	return FORCEDETH_REGS_SIZE;
+}
+
+static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
+{
+	struct fe_priv *np = netdev_priv(dev);
+	u8 __iomem *base = get_hwbase(dev);
+	u32 *rbuf = buf;
+	int i;
+
+	regs->version = FORCEDETH_REGS_VER;
+	spin_lock_irq(&np->lock);
+	for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++)
+		rbuf[i] = readl(base + i*sizeof(u32));
+	spin_unlock_irq(&np->lock);
+}
+
+static int nv_nway_reset(struct net_device *dev)
+{
+	struct fe_priv *np = netdev_priv(dev);
+	int ret;
+
+	if (np->autoneg) {
+		int bmcr;
+
+		netif_carrier_off(dev);
+		if (netif_running(dev)) {
+			nv_disable_irq(dev);
+			spin_lock_bh(&dev->xmit_lock);
+			spin_lock(&np->lock);
+			/* stop engines */
+			nv_stop_rx(dev);
+			nv_stop_tx(dev);
+			spin_unlock(&np->lock);
+			spin_unlock_bh(&dev->xmit_lock);
+			printk(KERN_INFO "%s: link down.\n", dev->name);
+		}
+
+		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+
+		if (netif_running(dev)) {
+			nv_start_rx(dev);
+			nv_start_tx(dev);
+			nv_enable_irq(dev);
+		}
+		ret = 0;
+	} else {
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static void nv_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+{
+	struct fe_priv *np = netdev_priv(dev);
+
+	ring->rx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
+	ring->rx_mini_max_pending = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->tx_max_pending = (np->desc_ver == DESC_VER_1) ? RING_MAX_DESC_VER_1 : RING_MAX_DESC_VER_2_3;
+
+	ring->rx_pending = np->rx_ring_size;
+	ring->rx_mini_pending = 0;
+	ring->rx_jumbo_pending = 0;
+	ring->tx_pending = np->tx_ring_size;
+}
+
+static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring)
+{
+	struct fe_priv *np = netdev_priv(dev);
+	u8 __iomem *base = get_hwbase(dev);
+	u8 *rxtx_ring, *rx_skbuff, *tx_skbuff, *rx_dma, *tx_dma, *tx_dma_len;
+	dma_addr_t ring_addr;
+
+	if (ring->rx_pending < RX_RING_MIN ||
+	    ring->tx_pending < TX_RING_MIN ||
+	    ring->rx_mini_pending != 0 ||
+	    ring->rx_jumbo_pending != 0 ||
+	    (np->desc_ver == DESC_VER_1 &&
+	     (ring->rx_pending > RING_MAX_DESC_VER_1 ||
+	      ring->tx_pending > RING_MAX_DESC_VER_1)) ||
+	    (np->desc_ver != DESC_VER_1 &&
+	     (ring->rx_pending > RING_MAX_DESC_VER_2_3 ||
+	      ring->tx_pending > RING_MAX_DESC_VER_2_3))) {
+		return -EINVAL;
+	}
+
+	/* allocate new rings */
+	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+		rxtx_ring = pci_alloc_consistent(np->pci_dev,
+					    sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
+					    &ring_addr);
+	} else {
+		rxtx_ring = pci_alloc_consistent(np->pci_dev,
+					    sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
+					    &ring_addr);
+	}
+	rx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->rx_pending, GFP_KERNEL);
+	rx_dma = kmalloc(sizeof(dma_addr_t) * ring->rx_pending, GFP_KERNEL);
+	tx_skbuff = kmalloc(sizeof(struct sk_buff*) * ring->tx_pending, GFP_KERNEL);
+	tx_dma = kmalloc(sizeof(dma_addr_t) * ring->tx_pending, GFP_KERNEL);
+	tx_dma_len = kmalloc(sizeof(unsigned int) * ring->tx_pending, GFP_KERNEL);
+	if (!rxtx_ring || !rx_skbuff || !rx_dma || !tx_skbuff || !tx_dma || !tx_dma_len) {
+		/* fall back to old rings */
+		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+			if(rxtx_ring)
+				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
+						    rxtx_ring, ring_addr);
+		} else {
+			if (rxtx_ring)
+				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
+						    rxtx_ring, ring_addr);
+		}
+		if (rx_skbuff)
+			kfree(rx_skbuff);
+		if (rx_dma)
+			kfree(rx_dma);
+		if (tx_skbuff)
+			kfree(tx_skbuff);
+		if (tx_dma)
+			kfree(tx_dma);
+		if (tx_dma_len)
+			kfree(tx_dma_len);
+		goto exit;
+	}
+
+	if (netif_running(dev)) {
+		nv_disable_irq(dev);
+		spin_lock_bh(&dev->xmit_lock);
+		spin_lock(&np->lock);
+		/* stop engines */
+		nv_stop_rx(dev);
+		nv_stop_tx(dev);
+		nv_txrx_reset(dev);
+		/* drain queues */
+		nv_drain_rx(dev);
+		nv_drain_tx(dev);
+		/* delete queues */
+		free_rings(dev);
+	}
+
+	/* set new values */
+	np->rx_ring_size = ring->rx_pending;
+	np->tx_ring_size = ring->tx_pending;
+	np->tx_limit_stop = tx_ring_size - TX_LIMIT_DIFFERENCE;
+	np->tx_limit_start = tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
+	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+		np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
+		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
+	} else {
+		np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
+		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
+	}
+	np->rx_skbuff = (struct sk_buff**)rx_skbuff;
+	np->rx_dma = (dma_addr_t*)rx_dma;
+	np->tx_skbuff = (struct sk_buff**)tx_skbuff;
+	np->tx_dma = (dma_addr_t*)tx_dma;
+	np->tx_dma_len = (unsigned int*)tx_dma_len;
+	np->ring_addr = ring_addr;
+
+	memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
+	memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
+	memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
+	memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
+	memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
+
+	if (netif_running(dev)) {
+		/* reinit driver view of the queues */
+		set_bufsize(dev);
+		if (nv_init_ring(dev)) {
+			if (!np->in_shutdown)
+				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+		}
+
+		/* reinit nic view of the queues */
+		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+		writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+			base + NvRegRingSizes);
+		pci_push(base);
+		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+		pci_push(base);
+
+		/* restart engines */
+		nv_start_rx(dev);
+		nv_start_tx(dev);
+		spin_unlock(&np->lock);
+		spin_unlock_bh(&dev->xmit_lock);
+		nv_enable_irq(dev);
+	}
+	return 0;
+exit:
+	return -ENOMEM;
+}
+
+static void nv_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
+{
+	struct fe_priv *np = netdev_priv(dev);
+
+	pause->autoneg = (np->pause_flags & NV_PAUSEFRAME_AUTONEG) != 0;
+	pause->rx_pause = (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE) != 0;
+	pause->tx_pause = (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) != 0;
+}
+
+static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam* pause)
+{
+	struct fe_priv *np = netdev_priv(dev);
+	int adv, bmcr;
+
+	if ((!np->autoneg && np->duplex == 0) ||
+	    (np->autoneg && !pause->autoneg && np->duplex == 0)) {
+		printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
+		       dev->name);
+		return -EINVAL;
+	}
+	if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
+		printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
+		return -EINVAL;
+	}
+
+	netif_carrier_off(dev);
+	if (netif_running(dev)) {
+		nv_disable_irq(dev);
+		spin_lock_bh(&dev->xmit_lock);
+		spin_lock(&np->lock);
+		/* stop engines */
+		nv_stop_rx(dev);
+		nv_stop_tx(dev);
+		spin_unlock(&np->lock);
+		spin_unlock_bh(&dev->xmit_lock);
+	}
+
+	np->pause_flags &= ~(NV_PAUSEFRAME_RX_REQ|NV_PAUSEFRAME_TX_REQ);
+	if (pause->rx_pause)
+		np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
+	if (pause->tx_pause)
+		np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
+
+	if (np->autoneg && pause->autoneg) {
+		np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
+
+		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
+		adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+		if (np->pause_flags & NV_PAUSEFRAME_RX_REQ) /* for rx we set both advertisments but disable tx pause */
+			adv |=  ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+		if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
+			adv |=  ADVERTISE_PAUSE_ASYM;
+		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
+
+		if (netif_running(dev))
+			printk(KERN_INFO "%s: link down.\n", dev->name);
+		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+	} else {
+		np->pause_flags &= ~(NV_PAUSEFRAME_AUTONEG|NV_PAUSEFRAME_RX_ENABLE|NV_PAUSEFRAME_TX_ENABLE);
+		if (pause->rx_pause)
+			np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+		if (pause->tx_pause)
+			np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+
+		if (!netif_running(dev))
+			nv_update_linkspeed(dev);
+		else
+			nv_update_pause(dev, np->pause_flags);
+	}
+
+	if (netif_running(dev)) {
+		nv_start_rx(dev);
+		nv_start_tx(dev);
+		nv_enable_irq(dev);
+	}
+	return 0;
+}
+
+static u32 nv_get_rx_csum(struct net_device *dev)
+{
+	struct fe_priv *np = netdev_priv(dev);
+	return (np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) != 0;
+}
+
+static int nv_set_rx_csum(struct net_device *dev, u32 data)
+{
+	struct fe_priv *np = netdev_priv(dev);
+	u8 __iomem *base = get_hwbase(dev);
+	int retcode = 0;
+
+	if (np->driver_data & DEV_HAS_CHECKSUM) {
+
+		if (((np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && data) ||
+		    (!(np->txrxctl_bits & NVREG_TXRXCTL_RXCHECK) && !data)) {
+			/* already set or unset */
+			return 0;
+		}
+
+		if (data) {
+			np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
+		} else if (!(np->vlanctl_bits & NVREG_VLANCONTROL_ENABLE)) {
+			np->txrxctl_bits &= ~NVREG_TXRXCTL_RXCHECK;
+		} else {
+			printk(KERN_INFO "Can not disable rx checksum if vlan is enabled\n");
+			return -EINVAL;
+		}
+
+		if (netif_running(dev)) {
+			spin_lock_irq(&np->lock);
+			writel(np->txrxctl_bits, base + NvRegTxRxControl);
+			spin_unlock_irq(&np->lock);
+		}
+	} else {
+		return -EINVAL;
+	}
+
+	return retcode;
+}
+
+static int nv_set_tx_csum(struct net_device *dev, u32 data)
+{
+	struct fe_priv *np = netdev_priv(dev);
+
+	if (np->driver_data & DEV_HAS_CHECKSUM)
+		return ethtool_op_set_tx_hw_csum(dev, data);
+	else
+		return -EINVAL;
+}
+
+static int nv_set_sg(struct net_device *dev, u32 data)
+{
+	struct fe_priv *np = netdev_priv(dev);
+
+	if (np->driver_data & DEV_HAS_CHECKSUM)
+		return ethtool_op_set_sg(dev, data);
+	else
+		return -EINVAL;
+}
+
+#ifdef NETIF_F_TSO
+static int nv_set_tso(struct net_device *dev, u32 data)
+{
+	struct fe_priv *np = netdev_priv(dev);
+
+	if (np->driver_data & DEV_HAS_CHECKSUM)
+		return ethtool_op_set_tso(dev, data);
+	else
+		return -EINVAL;
+}
+#endif
+
+static int nv_get_stats_count(struct net_device *dev)
+{
+	struct fe_priv *np = netdev_priv(dev);
+
+	if (np->driver_data & DEV_HAS_STATISTICS)
+		return (sizeof(struct nv_ethtool_stats)/sizeof(u64));
+	else
+		return NV_STATS_COUNT_SW;
+}
+
+static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
+{
+	struct fe_priv *np = netdev_priv(dev);
+
+	/* update stats */
+	nv_do_stats_poll((unsigned long)dev);
+
+	memcpy(buffer, &np->estats, nv_get_stats_count(dev)*sizeof(u64));
+}
+
+static int nv_self_test_count(struct net_device *dev)
+{
+	struct fe_priv *np = netdev_priv(dev);
+
+	if (np->driver_data & DEV_HAS_TEST_EXTENDED)
+		return NV_TEST_COUNT_EXTENDED;
+	else
+		return NV_TEST_COUNT_BASE;
+}
+
+static int nv_link_test(struct net_device *dev)
+{
+	struct fe_priv *np = netdev_priv(dev);
+	int mii_status;
+
+	mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+
+	/* check phy link status */
+	if (!(mii_status & BMSR_LSTATUS))
+		return 0;
+	else
+		return 1;
+}
+
+static int nv_register_test(struct net_device *dev)
+{
+	u8 __iomem *base = get_hwbase(dev);
+	int i = 0;
+	u32 orig_read, new_read;
+
+	do {
+		orig_read = readl(base + nv_registers_test[i].reg);
+
+		/* xor with mask to toggle bits */
+		orig_read ^= nv_registers_test[i].mask;
+
+		writel(orig_read, base + nv_registers_test[i].reg);
+
+		new_read = readl(base + nv_registers_test[i].reg);
+
+		if ((new_read & nv_registers_test[i].mask) != (orig_read & nv_registers_test[i].mask))
+			return 0;
+
+		/* restore original value */
+		orig_read ^= nv_registers_test[i].mask;
+		writel(orig_read, base + nv_registers_test[i].reg);
+
+	} while (nv_registers_test[++i].reg != 0);
+
+	return 1;
+}
+
+static int nv_interrupt_test(struct net_device *dev)
+{
+	struct fe_priv *np = netdev_priv(dev);
+	u8 __iomem *base = get_hwbase(dev);
+	int ret = 1;
+	int testcnt;
+	u32 save_msi_flags, save_poll_interval = 0;
+
+	if (netif_running(dev)) {
+		/* free current irq */
+		nv_free_irq(dev);
+		save_poll_interval = readl(base+NvRegPollingInterval);
+	}
+
+	/* flag to test interrupt handler */
+	np->intr_test = 0;
+
+	/* setup test irq */
+	save_msi_flags = np->msi_flags;
+	np->msi_flags &= ~NV_MSI_X_VECTORS_MASK;
+	np->msi_flags |= 0x001; /* setup 1 vector */
+	if (nv_request_irq(dev, 1))
+		return 0;
+
+	/* setup timer interrupt */
+	writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
+	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
+
+	nv_enable_hw_interrupts(dev, NVREG_IRQ_TIMER);
+
+	/* wait for at least one interrupt */
+	msleep(100);
+
+	spin_lock_irq(&np->lock);
+
+	/* flag should be set within ISR */
+	testcnt = np->intr_test;
+	if (!testcnt)
+		ret = 2;
+
+	nv_disable_hw_interrupts(dev, NVREG_IRQ_TIMER);
+	if (!(np->msi_flags & NV_MSI_X_ENABLED))
+		writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+	else
+		writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
+
+	spin_unlock_irq(&np->lock);
+
+	nv_free_irq(dev);
+
+	np->msi_flags = save_msi_flags;
+
+	if (netif_running(dev)) {
+		writel(save_poll_interval, base + NvRegPollingInterval);
+		writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
+		/* restore original irq */
+		if (nv_request_irq(dev, 0))
+			return 0;
+	}
+
+	return ret;
+}
+
+static int nv_loopback_test(struct net_device *dev)
+{
+	struct fe_priv *np = netdev_priv(dev);
+	u8 __iomem *base = get_hwbase(dev);
+	struct sk_buff *tx_skb, *rx_skb;
+	dma_addr_t test_dma_addr;
+	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
+	u32 Flags;
+	int len, i, pkt_len;
+	u8 *pkt_data;
+	u32 filter_flags = 0;
+	u32 misc1_flags = 0;
+	int ret = 1;
+
+	if (netif_running(dev)) {
+		nv_disable_irq(dev);
+		filter_flags = readl(base + NvRegPacketFilterFlags);
+		misc1_flags = readl(base + NvRegMisc1);
+	} else {
+		nv_txrx_reset(dev);
+	}
+
+	/* reinit driver view of the rx queue */
+	set_bufsize(dev);
+	nv_init_ring(dev);
+
+	/* setup hardware for loopback */
+	writel(NVREG_MISC1_FORCE, base + NvRegMisc1);
+	writel(NVREG_PFF_ALWAYS | NVREG_PFF_LOOPBACK, base + NvRegPacketFilterFlags);
+
+	/* reinit nic view of the rx queue */
+	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+	writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+		base + NvRegRingSizes);
+	pci_push(base);
 
-		if (np->gigabit == PHY_GIGABIT) {
-			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
-			adv &= ~ADVERTISE_1000FULL;
-			if (ecmd->advertising & ADVERTISED_1000baseT_Full)
-				adv |= ADVERTISE_1000FULL;
-			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
-		}
+	/* restart rx engine */
+	nv_start_rx(dev);
+	nv_start_tx(dev);
 
-		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
-		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+	/* setup packet for tx */
+	pkt_len = ETH_DATA_LEN;
+	tx_skb = dev_alloc_skb(pkt_len);
+	pkt_data = skb_put(tx_skb, pkt_len);
+	for (i = 0; i < pkt_len; i++)
+		pkt_data[i] = (u8)(i & 0xff);
+	test_dma_addr = pci_map_single(np->pci_dev, tx_skb->data,
+				       tx_skb->end-tx_skb->data, PCI_DMA_FROMDEVICE);
 
+	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+		np->tx_ring.orig[0].PacketBuffer = cpu_to_le32(test_dma_addr);
+		np->tx_ring.orig[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
 	} else {
-		int adv, bmcr;
+		np->tx_ring.ex[0].PacketBufferHigh = cpu_to_le64(test_dma_addr) >> 32;
+		np->tx_ring.ex[0].PacketBufferLow = cpu_to_le64(test_dma_addr) & 0x0FFFFFFFF;
+		np->tx_ring.ex[0].FlagLen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
+	}
+	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+	pci_push(get_hwbase(dev));
 
-		np->autoneg = 0;
+	msleep(500);
 
-		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
-		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
-		if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
-			adv |= ADVERTISE_10HALF;
-		if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
-			adv |= ADVERTISE_10FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
-		if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
-			adv |= ADVERTISE_100HALF;
-		if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
-			adv |= ADVERTISE_100FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
-		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
-		np->fixed_mode = adv;
+	/* check for rx of the packet */
+	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
+		Flags = le32_to_cpu(np->rx_ring.orig[0].FlagLen);
+		len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
 
-		if (np->gigabit == PHY_GIGABIT) {
-			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
-			adv &= ~ADVERTISE_1000FULL;
-			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
-		}
+	} else {
+		Flags = le32_to_cpu(np->rx_ring.ex[0].FlagLen);
+		len = nv_descr_getlength_ex(&np->rx_ring.ex[0], np->desc_ver);
+	}
 
-		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-		bmcr |= ~(BMCR_ANENABLE|BMCR_SPEED100|BMCR_FULLDPLX);
-		if (adv & (ADVERTISE_10FULL|ADVERTISE_100FULL))
-			bmcr |= BMCR_FULLDPLX;
-		if (adv & (ADVERTISE_100HALF|ADVERTISE_100FULL))
-			bmcr |= BMCR_SPEED100;
-		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+	if (Flags & NV_RX_AVAIL) {
+		ret = 0;
+	} else if (np->desc_ver == DESC_VER_1) {
+		if (Flags & NV_RX_ERROR)
+			ret = 0;
+	} else {
+		if (Flags & NV_RX2_ERROR) {
+			ret = 0;
+		}
+	}
 
-		if (netif_running(dev)) {
-			/* Wait a bit and then reconfigure the nic. */
-			udelay(10);
-			nv_linkchange(dev);
+	if (ret) {
+		if (len != pkt_len) {
+			ret = 0;
+			dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
+				dev->name, len, pkt_len);
+		} else {
+			rx_skb = np->rx_skbuff[0];
+			for (i = 0; i < pkt_len; i++) {
+				if (rx_skb->data[i] != (u8)(i & 0xff)) {
+					ret = 0;
+					dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
+						dev->name, i);
+					break;
+				}
+			}
 		}
+	} else {
+		dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
 	}
-	spin_unlock_irq(&np->lock);
 
-	return 0;
-}
+	pci_unmap_page(np->pci_dev, test_dma_addr,
+		       tx_skb->end-tx_skb->data,
+		       PCI_DMA_TODEVICE);
+	dev_kfree_skb_any(tx_skb);
 
-#define FORCEDETH_REGS_VER	1
-#define FORCEDETH_REGS_SIZE	0x400 /* 256 32-bit registers */
+	/* stop engines */
+	nv_stop_rx(dev);
+	nv_stop_tx(dev);
+	nv_txrx_reset(dev);
+	/* drain rx queue */
+	nv_drain_rx(dev);
+	nv_drain_tx(dev);
 
-static int nv_get_regs_len(struct net_device *dev)
-{
-	return FORCEDETH_REGS_SIZE;
+	if (netif_running(dev)) {
+		writel(misc1_flags, base + NvRegMisc1);
+		writel(filter_flags, base + NvRegPacketFilterFlags);
+		nv_enable_irq(dev);
+	}
+
+	return ret;
 }
 
-static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
+static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64 *buffer)
 {
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base = get_hwbase(dev);
-	u32 *rbuf = buf;
-	int i;
+	int result;
+	memset(buffer, 0, nv_self_test_count(dev)*sizeof(u64));
 
-	regs->version = FORCEDETH_REGS_VER;
-	spin_lock_irq(&np->lock);
-	for (i=0;i<FORCEDETH_REGS_SIZE/sizeof(u32);i++)
-		rbuf[i] = readl(base + i*sizeof(u32));
-	spin_unlock_irq(&np->lock);
-}
+	if (!nv_link_test(dev)) {
+		test->flags |= ETH_TEST_FL_FAILED;
+		buffer[0] = 1;
+	}
 
-static int nv_nway_reset(struct net_device *dev)
-{
-	struct fe_priv *np = netdev_priv(dev);
-	int ret;
+	if (test->flags & ETH_TEST_FL_OFFLINE) {
+		if (netif_running(dev)) {
+			netif_stop_queue(dev);
+			spin_lock_bh(&dev->xmit_lock);
+			spin_lock_irq(&np->lock);
+			nv_disable_hw_interrupts(dev, np->irqmask);
+			if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+			} else {
+				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
+			}
+			/* stop engines */
+			nv_stop_rx(dev);
+			nv_stop_tx(dev);
+			nv_txrx_reset(dev);
+			/* drain rx queue */
+			nv_drain_rx(dev);
+			nv_drain_tx(dev);
+			spin_unlock_irq(&np->lock);
+			spin_unlock_bh(&dev->xmit_lock);
+		}
 
-	spin_lock_irq(&np->lock);
-	if (np->autoneg) {
-		int bmcr;
+		if (!nv_register_test(dev)) {
+			test->flags |= ETH_TEST_FL_FAILED;
+			buffer[1] = 1;
+		}
 
-		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
-		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
-		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
+		result = nv_interrupt_test(dev);
+		if (result != 1) {
+			test->flags |= ETH_TEST_FL_FAILED;
+			buffer[2] = 1;
+		}
+		if (result == 0) {
+			/* bail out */
+			return;
+		}
 
-		ret = 0;
-	} else {
-		ret = -EINVAL;
+		if (!nv_loopback_test(dev)) {
+			test->flags |= ETH_TEST_FL_FAILED;
+			buffer[3] = 1;
+		}
+
+		if (netif_running(dev)) {
+			/* reinit driver view of the rx queue */
+			set_bufsize(dev);
+			if (nv_init_ring(dev)) {
+				if (!np->in_shutdown)
+					mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+			}
+			/* reinit nic view of the rx queue */
+			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
+			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
+			writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+				base + NvRegRingSizes);
+			pci_push(base);
+			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
+			pci_push(base);
+			/* restart rx engine */
+			nv_start_rx(dev);
+			nv_start_tx(dev);
+			netif_start_queue(dev);
+			nv_enable_hw_interrupts(dev, np->irqmask);
+		}
 	}
-	spin_unlock_irq(&np->lock);
+}
 
-	return ret;
+static void nv_get_strings(struct net_device *dev, u32 stringset, u8 *buffer)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(buffer, &nv_estats_str, nv_get_stats_count(dev)*sizeof(struct nv_ethtool_str));
+		break;
+	case ETH_SS_TEST:
+		memcpy(buffer, &nv_etests_str, nv_self_test_count(dev)*sizeof(struct nv_ethtool_str));
+		break;
+	}
 }
 
 static struct ethtool_ops ops = {
@@ -2595,6 +3914,25 @@ static struct ethtool_ops ops = {
 	.get_regs = nv_get_regs,
 	.nway_reset = nv_nway_reset,
 	.get_perm_addr = ethtool_op_get_perm_addr,
+	.get_ringparam = nv_get_ringparam,
+	.set_ringparam = nv_set_ringparam,
+	.get_pauseparam = nv_get_pauseparam,
+	.set_pauseparam = nv_set_pauseparam,
+	.get_rx_csum = nv_get_rx_csum,
+	.set_rx_csum = nv_set_rx_csum,
+	.get_tx_csum = ethtool_op_get_tx_csum,
+	.set_tx_csum = nv_set_tx_csum,
+	.get_sg = ethtool_op_get_sg,
+	.set_sg = nv_set_sg,
+#ifdef NETIF_F_TSO
+	.get_tso = ethtool_op_get_tso,
+	.set_tso = nv_set_tso,
+#endif
+	.get_strings = nv_get_strings,
+	.get_stats_count = nv_get_stats_count,
+	.get_ethtool_stats = nv_get_ethtool_stats,
+	.self_test_count = nv_self_test_count,
+	.self_test = nv_self_test,
 };
 
 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
@@ -2625,32 +3963,6 @@ static void nv_vlan_rx_kill_vid(struct n
 	/* nothing to do */
 };
 
-static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
-{
-	u8 __iomem *base = get_hwbase(dev);
-	int i;
-	u32 msixmap = 0;
-
-	/* Each interrupt bit can be mapped to a MSIX vector (4 bits).
-	 * MSIXMap0 represents the first 8 interrupts and MSIXMap1 represents
-	 * the remaining 8 interrupts.
-	 */
-	for (i = 0; i < 8; i++) {
-		if ((irqmask >> i) & 0x1) {
-			msixmap |= vector << (i << 2);
-		}
-	}
-	writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
-
-	msixmap = 0;
-	for (i = 0; i < 8; i++) {
-		if ((irqmask >> (i + 8)) & 0x1) {
-			msixmap |= vector << (i << 2);
-		}
-	}
-	writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
-}
-
 static int nv_open(struct net_device *dev)
 {
 	struct fe_priv *np = netdev_priv(dev);
@@ -2692,7 +4004,7 @@ static int nv_open(struct net_device *de
 
 	/* 4) give hw rings */
 	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
-	writel( ((RX_RING-1) << NVREG_RINGSZ_RXSHIFT) + ((TX_RING-1) << NVREG_RINGSZ_TXSHIFT),
+	writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
 		base + NvRegRingSizes);
 
 	/* 5) continue setup */
@@ -2734,7 +4046,8 @@ static int nv_open(struct net_device *de
 			base + NvRegAdapterControl);
 	writel(NVREG_MIISPEED_BIT8|NVREG_MIIDELAY, base + NvRegMIISpeed);
 	writel(NVREG_UNKSETUP4_VAL, base + NvRegUnknownSetupReg4);
-	writel(NVREG_WAKEUPFLAGS_VAL, base + NvRegWakeUpFlags);
+	if (np->wolenabled)
+		writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
 
 	i = readl(base + NvRegPowerState);
 	if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
@@ -2744,86 +4057,18 @@ static int nv_open(struct net_device *de
 	udelay(10);
 	writel(readl(base + NvRegPowerState) | NVREG_POWERSTATE_VALID, base + NvRegPowerState);
 
-	writel(0, base + NvRegIrqMask);
+	nv_disable_hw_interrupts(dev, np->irqmask);
 	pci_push(base);
 	writel(NVREG_MIISTAT_MASK2, base + NvRegMIIStatus);
 	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
 	pci_push(base);
 
-	if (np->msi_flags & NV_MSI_X_CAPABLE) {
-		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
-			np->msi_x_entry[i].entry = i;
-		}
-		if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
-			np->msi_flags |= NV_MSI_X_ENABLED;
-			if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) {
-				/* Request irq for rx handling */
-				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) {
-					printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
-					pci_disable_msix(np->pci_dev);
-					np->msi_flags &= ~NV_MSI_X_ENABLED;
-					goto out_drain;
-				}
-				/* Request irq for tx handling */
-				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) {
-					printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
-					pci_disable_msix(np->pci_dev);
-					np->msi_flags &= ~NV_MSI_X_ENABLED;
-					goto out_drain;
-				}
-				/* Request irq for link and timer handling */
-				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) {
-					printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
-					pci_disable_msix(np->pci_dev);
-					np->msi_flags &= ~NV_MSI_X_ENABLED;
-					goto out_drain;
-				}
-
-				/* map interrupts to their respective vector */
-				writel(0, base + NvRegMSIXMap0);
-				writel(0, base + NvRegMSIXMap1);
-				set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL);
-				set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL);
-				set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
-			} else {
-				/* Request irq for all interrupts */
-				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
-					printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
-					pci_disable_msix(np->pci_dev);
-					np->msi_flags &= ~NV_MSI_X_ENABLED;
-					goto out_drain;
-				}
-
-				/* map interrupts to vector 0 */
-				writel(0, base + NvRegMSIXMap0);
-				writel(0, base + NvRegMSIXMap1);
-			}
-		}
-	}
-	if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
-		if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
-			np->msi_flags |= NV_MSI_ENABLED;
-			if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) {
-				printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
-				pci_disable_msi(np->pci_dev);
-				np->msi_flags &= ~NV_MSI_ENABLED;
-				goto out_drain;
-			}
-
-			/* map interrupts to vector 0 */
-			writel(0, base + NvRegMSIMap0);
-			writel(0, base + NvRegMSIMap1);
-			/* enable msi vector 0 */
-			writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
-		}
-	}
-	if (ret != 0) {
-		if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0)
-			goto out_drain;
+	if (nv_request_irq(dev, 0)) {
+		goto out_drain;
 	}
 
 	/* ask for interrupts */
-	writel(np->irqmask, base + NvRegIrqMask);
+	nv_enable_hw_interrupts(dev, np->irqmask);
 
 	spin_lock_irq(&np->lock);
 	writel(NVREG_MCASTADDRA_FORCE, base + NvRegMulticastAddrA);
@@ -2855,6 +4100,10 @@ static int nv_open(struct net_device *de
 	}
 	if (oom)
 		mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
+
+	/* start statistics timer */
+	mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
+
 	spin_unlock_irq(&np->lock);
 
 	return 0;
@@ -2867,7 +4116,6 @@ static int nv_close(struct net_device *d
 {
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base;
-	int i;
 
 	spin_lock_irq(&np->lock);
 	np->in_shutdown = 1;
@@ -2876,6 +4124,7 @@ static int nv_close(struct net_device *d
 
 	del_timer_sync(&np->oom_kick);
 	del_timer_sync(&np->nic_poll);
+	del_timer_sync(&np->stats_poll);
 
 	netif_stop_queue(dev);
 	spin_lock_irq(&np->lock);
@@ -2885,31 +4134,13 @@ static int nv_close(struct net_device *d
 
 	/* disable interrupts on the nic or we will lock up */
 	base = get_hwbase(dev);
-	if (np->msi_flags & NV_MSI_X_ENABLED) {
-		writel(np->irqmask, base + NvRegIrqMask);
-	} else {
-		if (np->msi_flags & NV_MSI_ENABLED)
-			writel(0, base + NvRegMSIIrqMask);
-		writel(0, base + NvRegIrqMask);
-	}
+	nv_disable_hw_interrupts(dev, np->irqmask);
 	pci_push(base);
 	dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
 
 	spin_unlock_irq(&np->lock);
 
-	if (np->msi_flags & NV_MSI_X_ENABLED) {
-		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
-			free_irq(np->msi_x_entry[i].vector, dev);
-		}
-		pci_disable_msix(np->pci_dev);
-		np->msi_flags &= ~NV_MSI_X_ENABLED;
-	} else {
-		free_irq(np->pci_dev->irq, dev);
-		if (np->msi_flags & NV_MSI_ENABLED) {
-			pci_disable_msi(np->pci_dev);
-			np->msi_flags &= ~NV_MSI_ENABLED;
-		}
-	}
+	nv_free_irq(dev);
 
 	drain_ring(dev);
 
@@ -2952,6 +4183,9 @@ static int __devinit nv_probe(struct pci
 	init_timer(&np->nic_poll);
 	np->nic_poll.data = (unsigned long) dev;
 	np->nic_poll.function = &nv_do_nic_poll;	/* timer handler */
+	init_timer(&np->stats_poll);
+	np->stats_poll.data = (unsigned long) dev;
+	np->stats_poll.function = &nv_do_stats_poll;	/* timer handler */
 
 	err = pci_enable_device(pci_dev);
 	if (err) {
@@ -2985,18 +4219,17 @@ static int __devinit nv_probe(struct pci
 		goto out_relreg;
 	}
 
+	/* copy of driver data for ethtool */
+	np->driver_data = id->driver_data;
+
 	/* handle different descriptor versions */
 	if (id->driver_data & DEV_HAS_HIGH_DMA) {
 		/* packet format 3: supports 40-bit addressing */
 		np->desc_ver = DESC_VER_3;
-		if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
-			printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
-					pci_name(pci_dev));
-		} else {
-			if (pci_set_consistent_dma_mask(pci_dev, 0x0000007fffffffffULL)) {
-				printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
-					pci_name(pci_dev));
-				goto out_relreg;
+		if (dma_64bit) {
+			if (pci_set_dma_mask(pci_dev, DMA_39BIT_MASK)) {
+				printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n",
+						pci_name(pci_dev));
 			} else {
 				dev->features |= NETIF_F_HIGHDMA;
 				printk(KERN_INFO "forcedeth: using HIGHDMA\n");
@@ -3013,39 +4246,68 @@ static int __devinit nv_probe(struct pci
 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_1;
 	}
 
+	if (dma_64bit && id->driver_data & DEV_HAS_HIGHDMA) {
+		if (pci_set_consistent_dma_mask(pci_dev, DMA_39BIT_MASK)) {
+			printk(KERN_INFO "forcedeth: 64-bit DMA (consistent) failed for device %s.\n",
+				pci_name(pci_dev));
+		}
+	}
+
 	np->pkt_limit = NV_PKTLIMIT_1;
 	if (id->driver_data & DEV_HAS_LARGEDESC)
 		np->pkt_limit = NV_PKTLIMIT_2;
+	if (mtu > np->pkt_limit) {
+		printk(KERN_INFO "forcedeth: MTU value of %d is too large. Setting to maximum value of %d\n",
+		       mtu, np->pkt_limit);
+		dev->mtu = np->pkt_limit;
+	} else {
+		dev->mtu = mtu;
+	}
 
 	if (id->driver_data & DEV_HAS_CHECKSUM) {
-		np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
-		dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
+		if (rx_checksum_offload) {
+			np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK;
+		}
+		if (tx_checksum_offload)
+			dev->features |= NETIF_F_HW_CSUM;
+		if (scatter_gather)
+			dev->features |= NETIF_F_SG;
 #ifdef NETIF_F_TSO
-		dev->features |= NETIF_F_TSO;
+		if (tso_offload)
+			dev->features |= NETIF_F_TSO;
 #endif
  	}
 
 	np->vlanctl_bits = 0;
-	if (id->driver_data & DEV_HAS_VLAN) {
-		np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
-		dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
-		dev->vlan_rx_register = nv_vlan_rx_register;
-		dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid;
+	if (id->driver_data & DEV_HAS_VLAN && tagging_8021pq) {
+		if (rx_checksum_offload) {
+			np->vlanctl_bits = NVREG_VLANCONTROL_ENABLE;
+			dev->features |= NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX;
+			dev->vlan_rx_register = nv_vlan_rx_register;
+			dev->vlan_rx_kill_vid = nv_vlan_rx_kill_vid;
+		} else {
+			printk(KERN_INFO "forcedeth: tagging_8021pq can not be enabled if rx_check_offload is disabled\n");
+		}
 	}
 
 	np->msi_flags = 0;
-	if ((id->driver_data & DEV_HAS_MSI) && !disable_msi) {
+	if ((id->driver_data & DEV_HAS_MSI) && msi) {
 		np->msi_flags |= NV_MSI_CAPABLE;
 	}
-	if ((id->driver_data & DEV_HAS_MSI_X) && !disable_msix) {
+	if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
 		np->msi_flags |= NV_MSI_X_CAPABLE;
 	}
 
 	np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE;
+	if (rx_flow_control == NV_RX_FLOW_CONTROL_ENABLED)
+		np->pause_flags |= NV_PAUSEFRAME_RX_REQ;
 	if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
 		np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE;
+		if (tx_flow_control == NV_TX_FLOW_CONTROL_ENABLED)
+			np->pause_flags |= NV_PAUSEFRAME_TX_REQ;
 	}
-
+	if (autoneg == AUTONEG_ENABLE)
+		np->pause_flags |= NV_PAUSEFRAME_AUTONEG;
 
 	err = -ENOMEM;
 	np->base = ioremap(addr, NV_PCI_REGSZ);
@@ -3055,21 +4317,61 @@ static int __devinit nv_probe(struct pci
 
 	dev->irq = pci_dev->irq;
 
+	if (np->desc_ver == DESC_VER_1) {
+		if (rx_ring_size > RING_MAX_DESC_VER_1) {
+			printk(KERN_INFO "forcedeth: rx_ring_size of %d is too large. Setting to maximum of %d\n",
+			       rx_ring_size, RING_MAX_DESC_VER_1);
+			rx_ring_size = RING_MAX_DESC_VER_1;
+		}
+		if (tx_ring_size > RING_MAX_DESC_VER_1) {
+			printk(KERN_INFO "forcedeth: tx_ring_size of %d is too large. Setting to maximum of %d\n",
+			       tx_ring_size, RING_MAX_DESC_VER_1);
+			tx_ring_size = RING_MAX_DESC_VER_1;
+		}
+	} else {
+		if (rx_ring_size > RING_MAX_DESC_VER_2_3) {
+			printk(KERN_INFO "forcedeth: rx_ring_size of %d is too large. Setting to maximum of %d\n",
+			       rx_ring_size, RING_MAX_DESC_VER_2_3);
+			rx_ring_size = RING_MAX_DESC_VER_2_3;
+		}
+		if (tx_ring_size > RING_MAX_DESC_VER_2_3) {
+			printk(KERN_INFO "forcedeth: tx_ring_size of %d is too large. Setting to maximum of %d\n",
+			       tx_ring_size, RING_MAX_DESC_VER_2_3);
+			tx_ring_size = RING_MAX_DESC_VER_2_3;
+		}
+	}
+	np->rx_ring_size = rx_ring_size;
+	np->tx_ring_size = tx_ring_size;
+	np->tx_limit_stop = tx_ring_size - TX_LIMIT_DIFFERENCE;
+	np->tx_limit_start = tx_ring_size - TX_LIMIT_DIFFERENCE - 1;
+
 	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
 		np->rx_ring.orig = pci_alloc_consistent(pci_dev,
-					sizeof(struct ring_desc) * (RX_RING + TX_RING),
+					sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
 					&np->ring_addr);
 		if (!np->rx_ring.orig)
 			goto out_unmap;
-		np->tx_ring.orig = &np->rx_ring.orig[RX_RING];
+		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
 	} else {
 		np->rx_ring.ex = pci_alloc_consistent(pci_dev,
-					sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
+					sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
 					&np->ring_addr);
 		if (!np->rx_ring.ex)
 			goto out_unmap;
-		np->tx_ring.ex = &np->rx_ring.ex[RX_RING];
+		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
 	}
+	np->rx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->rx_ring_size, GFP_KERNEL);
+	np->rx_dma = kmalloc(sizeof(dma_addr_t) * np->rx_ring_size, GFP_KERNEL);
+	np->tx_skbuff = kmalloc(sizeof(struct sk_buff*) * np->tx_ring_size, GFP_KERNEL);
+	np->tx_dma = kmalloc(sizeof(dma_addr_t) * np->tx_ring_size, GFP_KERNEL);
+	np->tx_dma_len = kmalloc(sizeof(unsigned int) * np->tx_ring_size, GFP_KERNEL);
+	if (!np->rx_skbuff || !np->rx_dma || !np->tx_skbuff || !np->tx_dma || !np->tx_dma_len)
+		goto out_freering;
+	memset(np->rx_skbuff, 0, sizeof(struct sk_buff*) * np->rx_ring_size);
+	memset(np->rx_dma, 0, sizeof(dma_addr_t) * np->rx_ring_size);
+	memset(np->tx_skbuff, 0, sizeof(struct sk_buff*) * np->tx_ring_size);
+	memset(np->tx_dma, 0, sizeof(dma_addr_t) * np->tx_ring_size);
+	memset(np->tx_dma_len, 0, sizeof(unsigned int) * np->tx_ring_size);
 
 	dev->open = nv_open;
 	dev->stop = nv_close;
@@ -3122,7 +4424,7 @@ static int __devinit nv_probe(struct pci
 
 	/* disable WOL */
 	writel(0, base + NvRegWakeUpFlags);
-	np->wolenabled = 0;
+	np->wolenabled = wol;
 
 	if (np->desc_ver == DESC_VER_1) {
 		np->tx_flags = NV_TX_VALID;
@@ -3177,7 +4479,7 @@ static int __devinit nv_probe(struct pci
 	if (i == 33) {
 		printk(KERN_INFO "%s: open: Could not find a valid PHY.\n",
 		       pci_name(pci_dev));
-		goto out_freering;
+		goto out_error;
 	}
 	
 	/* reset it */
@@ -3186,12 +4488,12 @@ static int __devinit nv_probe(struct pci
 	/* set default link speed settings */
 	np->linkspeed = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
 	np->duplex = 0;
-	np->autoneg = 1;
+	np->autoneg = autoneg;
 
 	err = register_netdev(dev);
 	if (err) {
 		printk(KERN_INFO "forcedeth: unable to register netdev: %d\n", err);
-		goto out_freering;
+		goto out_error;
 	}
 	printk(KERN_INFO "%s: forcedeth.c: subsystem: %05x:%04x bound to %s\n",
 			dev->name, pci_dev->subsystem_vendor, pci_dev->subsystem_device,
@@ -3199,14 +4501,10 @@ static int __devinit nv_probe(struct pci
 
 	return 0;
 
-out_freering:
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
-		pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING),
-				    np->rx_ring.orig, np->ring_addr);
-	else
-		pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING),
-				    np->rx_ring.ex, np->ring_addr);
+out_error:
 	pci_set_drvdata(pci_dev, NULL);
+out_freering:
+	free_rings(dev);
 out_unmap:
 	iounmap(get_hwbase(dev));
 out_relreg:
@@ -3222,15 +4520,11 @@ out:
 static void __devexit nv_remove(struct pci_dev *pci_dev)
 {
 	struct net_device *dev = pci_get_drvdata(pci_dev);
-	struct fe_priv *np = netdev_priv(dev);
 
 	unregister_netdev(dev);
 
 	/* free all structures */
-	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
-		pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (RX_RING + TX_RING), np->rx_ring.orig, np->ring_addr);
-	else
-		pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (RX_RING + TX_RING), np->rx_ring.ex, np->ring_addr);
+	free_rings(dev);
 	iounmap(get_hwbase(dev));
 	pci_release_regions(pci_dev);
 	pci_disable_device(pci_dev);
@@ -3293,11 +4587,11 @@ static struct pci_device_id pci_tbl[] = 
 	},
 	{	/* MCP55 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_PAUSEFRAME_TX,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
 	},
 	{	/* MCP55 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_PAUSEFRAME_TX,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_PAUSEFRAME_TX|DEV_HAS_STATISTICS|DEV_HAS_TEST_EXTENDED,
 	},
 	{0,},
 };
@@ -3327,10 +4621,39 @@ module_param(optimization_mode, int, 0);
 MODULE_PARM_DESC(optimization_mode, "In throughput mode (0), every tx & rx packet will generate an interrupt. In CPU mode (1), interrupts are controlled by a timer.");
 module_param(poll_interval, int, 0);
 MODULE_PARM_DESC(poll_interval, "Interval determines how frequent timer interrupt is generated by [(time_in_micro_secs * 100) / (2^10)]. Min is 0 and Max is 65535.");
-module_param(disable_msi, int, 0);
-MODULE_PARM_DESC(disable_msi, "Disable MSI interrupts by setting to 1.");
-module_param(disable_msix, int, 0);
-MODULE_PARM_DESC(disable_msix, "Disable MSIX interrupts by setting to 1.");
+module_param(msi, int, 0);
+MODULE_PARM_DESC(msi, "MSI interrupts are enabled by setting to 1 and disabled by setting to 0.");
+module_param(msix, int, 0);
+MODULE_PARM_DESC(msix, "MSIX interrupts are enabled by setting to 1 and disabled by setting to 0.");
+
+module_param(speed_duplex, int, 0);
+MODULE_PARM_DESC(speed_duplex, "PHY speed and duplex settings. Auto = 0, 10mbps half = 1, 10mbps full = 2, 100mbps half = 3, 100mbps full = 4, 1000mbps full = 5.");
+module_param(autoneg, int, 0);
+MODULE_PARM_DESC(autoneg, "PHY autonegotiate is enabled by setting to 1 and disabled by setting to 0.");
+module_param(scatter_gather, int, 0);
+MODULE_PARM_DESC(scatter_gather, "Scatter gather is enabled by setting to 1 and disabled by setting to 0.");
+module_param(tso_offload, int, 0);
+MODULE_PARM_DESC(tso_offload, "TCP Segmentation offload is enabled by setting to 1 and disabled by setting to 0.");
+module_param(mtu, int, 0);
+MODULE_PARM_DESC(mtu, "MTU value. Maximum value of 1500 or 9100 depending on hardware.");
+module_param(tx_checksum_offload, int, 0);
+MODULE_PARM_DESC(tx_checksum_offload, "Tx checksum offload is enabled by setting to 1 and disabled by setting to 0.");
+module_param(rx_checksum_offload, int, 0);
+MODULE_PARM_DESC(rx_checksum_offload, "Rx checksum offload is enabled by setting to 1 and disabled by setting to 0.");
+module_param(tx_ring_size, int, 0);
+MODULE_PARM_DESC(tx_ring_size, "Tx ring size. Maximum value of 1024 or 16384 depending on hardware.");
+module_param(rx_ring_size, int, 0);
+MODULE_PARM_DESC(rx_ring_size, "Rx ring size. Maximum value of 1024 or 16384 depending on hardware.");
+module_param(tx_flow_control, int, 0);
+MODULE_PARM_DESC(tx_flow_control, "Tx flow control is enabled by setting to 1 and disabled by setting to 0.");
+module_param(rx_flow_control, int, 0);
+MODULE_PARM_DESC(rx_flow_control, "Rx flow control is enabled by setting to 1 and disabled by setting to 0.");
+module_param(dma_64bit, int, 0);
+MODULE_PARM_DESC(dma_64bit, "High DMA is enabled by setting to 1 and disabled by setting to 0.");
+module_param(wol, int, 0);
+MODULE_PARM_DESC(wol, "Wake-On-Lan is enabled by setting to 1 and disabled by setting to 0.");
+module_param(tagging_8021pq, int, 0);
+MODULE_PARM_DESC(tagging_8021pq, "802.1pq tagging is enabled by setting to 1 and disabled by setting to 0.");
 
 MODULE_AUTHOR("Manfred Spraul <manfred@xxxxxxxxxxxxxxxx>");
 MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
_

Patches currently in -mm which might be from manfred@xxxxxxxxxxxxxxxx are

forcedeth-suggested-cleanups.patch
forcedeth-add-support-for-flow-control.patch
forcedeth-add-support-for-configuration.patch
slab-leak-detector.patch
mm-debug-dump-pageframes-on-bad_page.patch
slab-leaks3-default-y.patch

-
To unsubscribe from this list: send the line "unsubscribe mm-commits" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at  http://vger.kernel.org/majordomo-info.html

[Index of Archives]     [Kernel Newbies FAQ]     [Kernel Archive]     [IETF Annouce]     [DCCP]     [Netdev]     [Networking]     [Security]     [Bugtraq]     [Photo]     [Yosemite]     [MIPS Linux]     [ARM Linux]     [Linux Security]     [Linux RAID]     [Linux SCSI]

  Powered by Linux