broadcom: Move the Broadcom drivers

Moves the drivers for Broadcom devices into
drivers/net/ethernet/broadcom/ and the necessary Kconfig and Makefile
changes.

CC: Eilon Greenstein <eilong@broadcom.com>
CC: Michael Chan <mchan@broadcom.com>
CC: Matt Carlson <mcarlson@broadcom.com>
CC: Gary Zambrano <zambrano@broadcom.com>
CC: "Maciej W. Rozycki" <macro@linux-mips.org>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 56ed5ec..772fb49 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -14,5 +14,6 @@
 source "drivers/net/ethernet/3com/Kconfig"
 source "drivers/net/ethernet/8390/Kconfig"
 source "drivers/net/ethernet/amd/Kconfig"
+source "drivers/net/ethernet/broadcom/Kconfig"
 
 endif # ETHERNET
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index fc82588..3ef49a2 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -5,3 +5,4 @@
 obj-$(CONFIG_NET_VENDOR_3COM) += 3com/
 obj-$(CONFIG_NET_VENDOR_8390) += 8390/
 obj-$(CONFIG_NET_VENDOR_AMD) += amd/
+obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
new file mode 100644
index 0000000..8986e57
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -0,0 +1,119 @@
+#
+# Broadcom device configuration
+#
+
+config NET_VENDOR_BROADCOM
+	bool "Broadcom devices"
+	depends on (SSB_POSSIBLE && HAS_DMA) || PCI || BCM63XX || \
+		   SIBYTE_SB1xxx_SOC
+	---help---
+	  If you have a network (Ethernet) chipset belonging to this class,
+	  say Y.
+
+	  Note that the answer to this question does not directly affect
+	  the kernel: saying N will just case the configurator to skip all
+	  the questions regarding AMD chipsets. If you say Y, you will be asked
+	  for your specific chipset/driver in the following questions.
+
+if NET_VENDOR_BROADCOM
+
+config B44
+	tristate "Broadcom 440x/47xx ethernet support"
+	depends on SSB_POSSIBLE && HAS_DMA
+	select SSB
+	select MII
+	---help---
+	  If you have a network (Ethernet) controller of this type, say Y
+	  or M and read the Ethernet-HOWTO, available from
+	  <http://www.tldp.org/docs.html#howto>.
+
+	  To compile this driver as a module, choose M here. The module
+	  will be called b44.
+
+# Auto-select SSB PCI-HOST support, if possible
+config B44_PCI_AUTOSELECT
+	bool
+	depends on B44 && SSB_PCIHOST_POSSIBLE
+	select SSB_PCIHOST
+	default y
+
+# Auto-select SSB PCICORE driver, if possible
+config B44_PCICORE_AUTOSELECT
+	bool
+	depends on B44 && SSB_DRIVER_PCICORE_POSSIBLE
+	select SSB_DRIVER_PCICORE
+	default y
+
+config B44_PCI
+	bool
+	depends on B44_PCI_AUTOSELECT && B44_PCICORE_AUTOSELECT
+	default y
+
+config BCM63XX_ENET
+	tristate "Broadcom 63xx internal mac support"
+	depends on BCM63XX
+	select MII
+	select PHYLIB
+	help
+	  This driver supports the ethernet MACs in the Broadcom 63xx
+	  MIPS chipset family (BCM63XX).
+
+config BNX2
+	tristate "Broadcom NetXtremeII support"
+	depends on PCI
+	select CRC32
+	select FW_LOADER
+	---help---
+	  This driver supports Broadcom NetXtremeII gigabit Ethernet cards.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called bnx2.  This is recommended.
+
+config CNIC
+	tristate "Broadcom CNIC support"
+	depends on PCI
+	select BNX2
+	select UIO
+	---help---
+	  This driver supports offload features of Broadcom NetXtremeII
+	  gigabit Ethernet cards.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called cnic.  This is recommended.
+
+config SB1250_MAC
+	tristate "SB1250 Gigabit Ethernet support"
+	depends on SIBYTE_SB1xxx_SOC
+	select PHYLIB
+	---help---
+	  This driver supports Gigabit Ethernet interfaces based on the
+	  Broadcom SiByte family of System-On-a-Chip parts.  They include
+	  the BCM1120, BCM1125, BCM1125H, BCM1250, BCM1255, BCM1280, BCM1455
+	  and BCM1480 chips.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called sb1250-mac.
+
+config TIGON3
+	tristate "Broadcom Tigon3 support"
+	depends on PCI
+	select PHYLIB
+	---help---
+	  This driver supports Broadcom Tigon3 based gigabit Ethernet cards.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called tg3.  This is recommended.
+
+config BNX2X
+	tristate "Broadcom NetXtremeII 10Gb support"
+	depends on PCI
+	select FW_LOADER
+	select ZLIB_INFLATE
+	select LIBCRC32C
+	select MDIO
+	---help---
+	  This driver supports Broadcom NetXtremeII 10 gigabit Ethernet cards.
+	  To compile this driver as a module, choose M here: the module
+	  will be called bnx2x.  This is recommended.
+
+endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
new file mode 100644
index 0000000..b789605
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for the Broadcom network device drivers.
+#
+
+obj-$(CONFIG_B44) += b44.o
+obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
+obj-$(CONFIG_BNX2) += bnx2.o
+obj-$(CONFIG_CNIC) += cnic.o
+obj-$(CONFIG_BNX2X) += bnx2x/
+obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
+obj-$(CONFIG_TIGON3) += tg3.o
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
new file mode 100644
index 0000000..41ea84e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -0,0 +1,2374 @@
+/* b44.c: Broadcom 44xx/47xx Fast Ethernet device driver.
+ *
+ * Copyright (C) 2002 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2004 Pekka Pietikainen (pp@ee.oulu.fi)
+ * Copyright (C) 2004 Florian Schirmer (jolt@tuxbox.org)
+ * Copyright (C) 2006 Felix Fietkau (nbd@openwrt.org)
+ * Copyright (C) 2006 Broadcom Corporation.
+ * Copyright (C) 2007 Michael Buesch <m@bues.ch>
+ *
+ * Distribute under GPL.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/ssb/ssb.h>
+#include <linux/slab.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+
+#include "b44.h"
+
+#define DRV_MODULE_NAME		"b44"
+#define DRV_MODULE_VERSION	"2.0"
+#define DRV_DESCRIPTION		"Broadcom 44xx/47xx 10/100 PCI ethernet driver"
+
+#define B44_DEF_MSG_ENABLE	  \
+	(NETIF_MSG_DRV		| \
+	 NETIF_MSG_PROBE	| \
+	 NETIF_MSG_LINK		| \
+	 NETIF_MSG_TIMER	| \
+	 NETIF_MSG_IFDOWN	| \
+	 NETIF_MSG_IFUP		| \
+	 NETIF_MSG_RX_ERR	| \
+	 NETIF_MSG_TX_ERR)
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+#define B44_TX_TIMEOUT			(5 * HZ)
+
+/* hardware minimum and maximum for a single frame's data payload */
+#define B44_MIN_MTU			60
+#define B44_MAX_MTU			1500
+
+#define B44_RX_RING_SIZE		512
+#define B44_DEF_RX_RING_PENDING		200
+#define B44_RX_RING_BYTES	(sizeof(struct dma_desc) * \
+				 B44_RX_RING_SIZE)
+#define B44_TX_RING_SIZE		512
+#define B44_DEF_TX_RING_PENDING		(B44_TX_RING_SIZE - 1)
+#define B44_TX_RING_BYTES	(sizeof(struct dma_desc) * \
+				 B44_TX_RING_SIZE)
+
+#define TX_RING_GAP(BP)	\
+	(B44_TX_RING_SIZE - (BP)->tx_pending)
+#define TX_BUFFS_AVAIL(BP)						\
+	(((BP)->tx_cons <= (BP)->tx_prod) ?				\
+	  (BP)->tx_cons + (BP)->tx_pending - (BP)->tx_prod :		\
+	  (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
+#define NEXT_TX(N)		(((N) + 1) & (B44_TX_RING_SIZE - 1))
+
+#define RX_PKT_OFFSET		(RX_HEADER_LEN + 2)
+#define RX_PKT_BUF_SZ		(1536 + RX_PKT_OFFSET)
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define B44_TX_WAKEUP_THRESH		(B44_TX_RING_SIZE / 4)
+
+/* b44 internal pattern match filter info */
+#define B44_PATTERN_BASE	0x400
+#define B44_PATTERN_SIZE	0x80
+#define B44_PMASK_BASE		0x600
+#define B44_PMASK_SIZE		0x10
+#define B44_MAX_PATTERNS	16
+#define B44_ETHIPV6UDP_HLEN	62
+#define B44_ETHIPV4UDP_HLEN	42
+
+MODULE_AUTHOR("Felix Fietkau, Florian Schirmer, Pekka Pietikainen, David S. Miller");
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static int b44_debug = -1;	/* -1 == use B44_DEF_MSG_ENABLE as value */
+module_param(b44_debug, int, 0);
+MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
+
+
+#ifdef CONFIG_B44_PCI
+static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
+	{ 0 } /* terminate list with empty entry */
+};
+MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
+
+static struct pci_driver b44_pci_driver = {
+	.name		= DRV_MODULE_NAME,
+	.id_table	= b44_pci_tbl,
+};
+#endif /* CONFIG_B44_PCI */
+
+static const struct ssb_device_id b44_ssb_tbl[] = {
+	SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV),
+	SSB_DEVTABLE_END
+};
+MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl);
+
+static void b44_halt(struct b44 *);
+static void b44_init_rings(struct b44 *);
+
+#define B44_FULL_RESET		1
+#define B44_FULL_RESET_SKIP_PHY	2
+#define B44_PARTIAL_RESET	3
+#define B44_CHIP_RESET_FULL	4
+#define B44_CHIP_RESET_PARTIAL	5
+
+static void b44_init_hw(struct b44 *, int);
+
+static int dma_desc_sync_size;
+static int instance;
+
+static const char b44_gstrings[][ETH_GSTRING_LEN] = {
+#define _B44(x...)	# x,
+B44_STAT_REG_DECLARE
+#undef _B44
+};
+
+static inline void b44_sync_dma_desc_for_device(struct ssb_device *sdev,
+						dma_addr_t dma_base,
+						unsigned long offset,
+						enum dma_data_direction dir)
+{
+	dma_sync_single_for_device(sdev->dma_dev, dma_base + offset,
+				   dma_desc_sync_size, dir);
+}
+
+static inline void b44_sync_dma_desc_for_cpu(struct ssb_device *sdev,
+					     dma_addr_t dma_base,
+					     unsigned long offset,
+					     enum dma_data_direction dir)
+{
+	dma_sync_single_for_cpu(sdev->dma_dev, dma_base + offset,
+				dma_desc_sync_size, dir);
+}
+
+static inline unsigned long br32(const struct b44 *bp, unsigned long reg)
+{
+	return ssb_read32(bp->sdev, reg);
+}
+
+static inline void bw32(const struct b44 *bp,
+			unsigned long reg, unsigned long val)
+{
+	ssb_write32(bp->sdev, reg, val);
+}
+
+static int b44_wait_bit(struct b44 *bp, unsigned long reg,
+			u32 bit, unsigned long timeout, const int clear)
+{
+	unsigned long i;
+
+	for (i = 0; i < timeout; i++) {
+		u32 val = br32(bp, reg);
+
+		if (clear && !(val & bit))
+			break;
+		if (!clear && (val & bit))
+			break;
+		udelay(10);
+	}
+	if (i == timeout) {
+		if (net_ratelimit())
+			netdev_err(bp->dev, "BUG!  Timeout waiting for bit %08x of register %lx to %s\n",
+				   bit, reg, clear ? "clear" : "set");
+
+		return -ENODEV;
+	}
+	return 0;
+}
+
+static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index)
+{
+	u32 val;
+
+	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_READ |
+			    (index << CAM_CTRL_INDEX_SHIFT)));
+
+	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
+
+	val = br32(bp, B44_CAM_DATA_LO);
+
+	data[2] = (val >> 24) & 0xFF;
+	data[3] = (val >> 16) & 0xFF;
+	data[4] = (val >> 8) & 0xFF;
+	data[5] = (val >> 0) & 0xFF;
+
+	val = br32(bp, B44_CAM_DATA_HI);
+
+	data[0] = (val >> 8) & 0xFF;
+	data[1] = (val >> 0) & 0xFF;
+}
+
+static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
+{
+	u32 val;
+
+	val  = ((u32) data[2]) << 24;
+	val |= ((u32) data[3]) << 16;
+	val |= ((u32) data[4]) <<  8;
+	val |= ((u32) data[5]) <<  0;
+	bw32(bp, B44_CAM_DATA_LO, val);
+	val = (CAM_DATA_HI_VALID |
+	       (((u32) data[0]) << 8) |
+	       (((u32) data[1]) << 0));
+	bw32(bp, B44_CAM_DATA_HI, val);
+	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
+			    (index << CAM_CTRL_INDEX_SHIFT)));
+	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);
+}
+
+static inline void __b44_disable_ints(struct b44 *bp)
+{
+	bw32(bp, B44_IMASK, 0);
+}
+
+static void b44_disable_ints(struct b44 *bp)
+{
+	__b44_disable_ints(bp);
+
+	/* Flush posted writes. */
+	br32(bp, B44_IMASK);
+}
+
+static void b44_enable_ints(struct b44 *bp)
+{
+	bw32(bp, B44_IMASK, bp->imask);
+}
+
+static int __b44_readphy(struct b44 *bp, int phy_addr, int reg, u32 *val)
+{
+	int err;
+
+	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
+	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
+			     (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
+			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
+			     (reg << MDIO_DATA_RA_SHIFT) |
+			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
+	err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
+	*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;
+
+	return err;
+}
+
+static int __b44_writephy(struct b44 *bp, int phy_addr, int reg, u32 val)
+{
+	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
+	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
+			     (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
+			     (phy_addr << MDIO_DATA_PMD_SHIFT) |
+			     (reg << MDIO_DATA_RA_SHIFT) |
+			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
+			     (val & MDIO_DATA_DATA)));
+	return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
+}
+
+static inline int b44_readphy(struct b44 *bp, int reg, u32 *val)
+{
+	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+		return 0;
+
+	return __b44_readphy(bp, bp->phy_addr, reg, val);
+}
+
+static inline int b44_writephy(struct b44 *bp, int reg, u32 val)
+{
+	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+		return 0;
+
+	return __b44_writephy(bp, bp->phy_addr, reg, val);
+}
+
+/* miilib interface */
+static int b44_mii_read(struct net_device *dev, int phy_id, int location)
+{
+	u32 val;
+	struct b44 *bp = netdev_priv(dev);
+	int rc = __b44_readphy(bp, phy_id, location, &val);
+	if (rc)
+		return 0xffffffff;
+	return val;
+}
+
+static void b44_mii_write(struct net_device *dev, int phy_id, int location,
+			 int val)
+{
+	struct b44 *bp = netdev_priv(dev);
+	__b44_writephy(bp, phy_id, location, val);
+}
+
+static int b44_phy_reset(struct b44 *bp)
+{
+	u32 val;
+	int err;
+
+	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+		return 0;
+	err = b44_writephy(bp, MII_BMCR, BMCR_RESET);
+	if (err)
+		return err;
+	udelay(100);
+	err = b44_readphy(bp, MII_BMCR, &val);
+	if (!err) {
+		if (val & BMCR_RESET) {
+			netdev_err(bp->dev, "PHY Reset would not complete\n");
+			err = -ENODEV;
+		}
+	}
+
+	return err;
+}
+
+static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
+{
+	u32 val;
+
+	bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
+	bp->flags |= pause_flags;
+
+	val = br32(bp, B44_RXCONFIG);
+	if (pause_flags & B44_FLAG_RX_PAUSE)
+		val |= RXCONFIG_FLOW;
+	else
+		val &= ~RXCONFIG_FLOW;
+	bw32(bp, B44_RXCONFIG, val);
+
+	val = br32(bp, B44_MAC_FLOW);
+	if (pause_flags & B44_FLAG_TX_PAUSE)
+		val |= (MAC_FLOW_PAUSE_ENAB |
+			(0xc0 & MAC_FLOW_RX_HI_WATER));
+	else
+		val &= ~MAC_FLOW_PAUSE_ENAB;
+	bw32(bp, B44_MAC_FLOW, val);
+}
+
+static void b44_set_flow_ctrl(struct b44 *bp, u32 local, u32 remote)
+{
+	u32 pause_enab = 0;
+
+	/* The driver supports only rx pause by default because
+	   the b44 mac tx pause mechanism generates excessive
+	   pause frames.
+	   Use ethtool to turn on b44 tx pause if necessary.
+	 */
+	if ((local & ADVERTISE_PAUSE_CAP) &&
+	    (local & ADVERTISE_PAUSE_ASYM)){
+		if ((remote & LPA_PAUSE_ASYM) &&
+		    !(remote & LPA_PAUSE_CAP))
+			pause_enab |= B44_FLAG_RX_PAUSE;
+	}
+
+	__b44_set_flow_ctrl(bp, pause_enab);
+}
+
+#ifdef CONFIG_BCM47XX
+#include <asm/mach-bcm47xx/nvram.h>
+static void b44_wap54g10_workaround(struct b44 *bp)
+{
+	char buf[20];
+	u32 val;
+	int err;
+
+	/*
+	 * workaround for bad hardware design in Linksys WAP54G v1.0
+	 * see https://dev.openwrt.org/ticket/146
+	 * check and reset bit "isolate"
+	 */
+	if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
+		return;
+	if (simple_strtoul(buf, NULL, 0) == 2) {
+		err = __b44_readphy(bp, 0, MII_BMCR, &val);
+		if (err)
+			goto error;
+		if (!(val & BMCR_ISOLATE))
+			return;
+		val &= ~BMCR_ISOLATE;
+		err = __b44_writephy(bp, 0, MII_BMCR, val);
+		if (err)
+			goto error;
+	}
+	return;
+error:
+	pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
+}
+#else
+static inline void b44_wap54g10_workaround(struct b44 *bp)
+{
+}
+#endif
+
+static int b44_setup_phy(struct b44 *bp)
+{
+	u32 val;
+	int err;
+
+	b44_wap54g10_workaround(bp);
+
+	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY)
+		return 0;
+	if ((err = b44_readphy(bp, B44_MII_ALEDCTRL, &val)) != 0)
+		goto out;
+	if ((err = b44_writephy(bp, B44_MII_ALEDCTRL,
+				val & MII_ALEDCTRL_ALLMSK)) != 0)
+		goto out;
+	if ((err = b44_readphy(bp, B44_MII_TLEDCTRL, &val)) != 0)
+		goto out;
+	if ((err = b44_writephy(bp, B44_MII_TLEDCTRL,
+				val | MII_TLEDCTRL_ENABLE)) != 0)
+		goto out;
+
+	if (!(bp->flags & B44_FLAG_FORCE_LINK)) {
+		u32 adv = ADVERTISE_CSMA;
+
+		if (bp->flags & B44_FLAG_ADV_10HALF)
+			adv |= ADVERTISE_10HALF;
+		if (bp->flags & B44_FLAG_ADV_10FULL)
+			adv |= ADVERTISE_10FULL;
+		if (bp->flags & B44_FLAG_ADV_100HALF)
+			adv |= ADVERTISE_100HALF;
+		if (bp->flags & B44_FLAG_ADV_100FULL)
+			adv |= ADVERTISE_100FULL;
+
+		if (bp->flags & B44_FLAG_PAUSE_AUTO)
+			adv |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+
+		if ((err = b44_writephy(bp, MII_ADVERTISE, adv)) != 0)
+			goto out;
+		if ((err = b44_writephy(bp, MII_BMCR, (BMCR_ANENABLE |
+						       BMCR_ANRESTART))) != 0)
+			goto out;
+	} else {
+		u32 bmcr;
+
+		if ((err = b44_readphy(bp, MII_BMCR, &bmcr)) != 0)
+			goto out;
+		bmcr &= ~(BMCR_FULLDPLX | BMCR_ANENABLE | BMCR_SPEED100);
+		if (bp->flags & B44_FLAG_100_BASE_T)
+			bmcr |= BMCR_SPEED100;
+		if (bp->flags & B44_FLAG_FULL_DUPLEX)
+			bmcr |= BMCR_FULLDPLX;
+		if ((err = b44_writephy(bp, MII_BMCR, bmcr)) != 0)
+			goto out;
+
+		/* Since we will not be negotiating there is no safe way
+		 * to determine if the link partner supports flow control
+		 * or not.  So just disable it completely in this case.
+		 */
+		b44_set_flow_ctrl(bp, 0, 0);
+	}
+
+out:
+	return err;
+}
+
+static void b44_stats_update(struct b44 *bp)
+{
+	unsigned long reg;
+	u32 *val;
+
+	val = &bp->hw_stats.tx_good_octets;
+	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
+		*val++ += br32(bp, reg);
+	}
+
+	/* Pad */
+	reg += 8*4UL;
+
+	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
+		*val++ += br32(bp, reg);
+	}
+}
+
+static void b44_link_report(struct b44 *bp)
+{
+	if (!netif_carrier_ok(bp->dev)) {
+		netdev_info(bp->dev, "Link is down\n");
+	} else {
+		netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
+			    (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
+			    (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
+
+		netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
+			    (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
+			    (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
+	}
+}
+
+static void b44_check_phy(struct b44 *bp)
+{
+	u32 bmsr, aux;
+
+	if (bp->phy_addr == B44_PHY_ADDR_NO_PHY) {
+		bp->flags |= B44_FLAG_100_BASE_T;
+		bp->flags |= B44_FLAG_FULL_DUPLEX;
+		if (!netif_carrier_ok(bp->dev)) {
+			u32 val = br32(bp, B44_TX_CTRL);
+			val |= TX_CTRL_DUPLEX;
+			bw32(bp, B44_TX_CTRL, val);
+			netif_carrier_on(bp->dev);
+			b44_link_report(bp);
+		}
+		return;
+	}
+
+	if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
+	    !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
+	    (bmsr != 0xffff)) {
+		if (aux & MII_AUXCTRL_SPEED)
+			bp->flags |= B44_FLAG_100_BASE_T;
+		else
+			bp->flags &= ~B44_FLAG_100_BASE_T;
+		if (aux & MII_AUXCTRL_DUPLEX)
+			bp->flags |= B44_FLAG_FULL_DUPLEX;
+		else
+			bp->flags &= ~B44_FLAG_FULL_DUPLEX;
+
+		if (!netif_carrier_ok(bp->dev) &&
+		    (bmsr & BMSR_LSTATUS)) {
+			u32 val = br32(bp, B44_TX_CTRL);
+			u32 local_adv, remote_adv;
+
+			if (bp->flags & B44_FLAG_FULL_DUPLEX)
+				val |= TX_CTRL_DUPLEX;
+			else
+				val &= ~TX_CTRL_DUPLEX;
+			bw32(bp, B44_TX_CTRL, val);
+
+			if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
+			    !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
+			    !b44_readphy(bp, MII_LPA, &remote_adv))
+				b44_set_flow_ctrl(bp, local_adv, remote_adv);
+
+			/* Link now up */
+			netif_carrier_on(bp->dev);
+			b44_link_report(bp);
+		} else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
+			/* Link now down */
+			netif_carrier_off(bp->dev);
+			b44_link_report(bp);
+		}
+
+		if (bmsr & BMSR_RFAULT)
+			netdev_warn(bp->dev, "Remote fault detected in PHY\n");
+		if (bmsr & BMSR_JCD)
+			netdev_warn(bp->dev, "Jabber detected in PHY\n");
+	}
+}
+
+static void b44_timer(unsigned long __opaque)
+{
+	struct b44 *bp = (struct b44 *) __opaque;
+
+	spin_lock_irq(&bp->lock);
+
+	b44_check_phy(bp);
+
+	b44_stats_update(bp);
+
+	spin_unlock_irq(&bp->lock);
+
+	mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
+}
+
+static void b44_tx(struct b44 *bp)
+{
+	u32 cur, cons;
+
+	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
+	cur /= sizeof(struct dma_desc);
+
+	/* XXX needs updating when NETIF_F_SG is supported */
+	for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
+		struct ring_info *rp = &bp->tx_buffers[cons];
+		struct sk_buff *skb = rp->skb;
+
+		BUG_ON(skb == NULL);
+
+		dma_unmap_single(bp->sdev->dma_dev,
+				 rp->mapping,
+				 skb->len,
+				 DMA_TO_DEVICE);
+		rp->skb = NULL;
+		dev_kfree_skb(skb);
+	}
+
+	bp->tx_cons = cons;
+	if (netif_queue_stopped(bp->dev) &&
+	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
+		netif_wake_queue(bp->dev);
+
+	bw32(bp, B44_GPTIMER, 0);
+}
+
+/* Works like this.  This chip writes a 'struct rx_header" 30 bytes
+ * before the DMA address you give it.  So we allocate 30 more bytes
+ * for the RX buffer, DMA map all of it, skb_reserve the 30 bytes, then
+ * point the chip at 30 bytes past where the rx_header will go.
+ */
+static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
+{
+	struct dma_desc *dp;
+	struct ring_info *src_map, *map;
+	struct rx_header *rh;
+	struct sk_buff *skb;
+	dma_addr_t mapping;
+	int dest_idx;
+	u32 ctrl;
+
+	src_map = NULL;
+	if (src_idx >= 0)
+		src_map = &bp->rx_buffers[src_idx];
+	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
+	map = &bp->rx_buffers[dest_idx];
+	skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
+	if (skb == NULL)
+		return -ENOMEM;
+
+	mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
+				 RX_PKT_BUF_SZ,
+				 DMA_FROM_DEVICE);
+
+	/* Hardware bug work-around, the chip is unable to do PCI DMA
+	   to/from anything above 1GB :-( */
+	if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
+		mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
+		/* Sigh... */
+		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+			dma_unmap_single(bp->sdev->dma_dev, mapping,
+					     RX_PKT_BUF_SZ, DMA_FROM_DEVICE);
+		dev_kfree_skb_any(skb);
+		skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
+		if (skb == NULL)
+			return -ENOMEM;
+		mapping = dma_map_single(bp->sdev->dma_dev, skb->data,
+					 RX_PKT_BUF_SZ,
+					 DMA_FROM_DEVICE);
+		if (dma_mapping_error(bp->sdev->dma_dev, mapping) ||
+		    mapping + RX_PKT_BUF_SZ > DMA_BIT_MASK(30)) {
+			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+				dma_unmap_single(bp->sdev->dma_dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE);
+			dev_kfree_skb_any(skb);
+			return -ENOMEM;
+		}
+		bp->force_copybreak = 1;
+	}
+
+	rh = (struct rx_header *) skb->data;
+
+	rh->len = 0;
+	rh->flags = 0;
+
+	map->skb = skb;
+	map->mapping = mapping;
+
+	if (src_map != NULL)
+		src_map->skb = NULL;
+
+	ctrl = (DESC_CTRL_LEN & RX_PKT_BUF_SZ);
+	if (dest_idx == (B44_RX_RING_SIZE - 1))
+		ctrl |= DESC_CTRL_EOT;
+
+	dp = &bp->rx_ring[dest_idx];
+	dp->ctrl = cpu_to_le32(ctrl);
+	dp->addr = cpu_to_le32((u32) mapping + bp->dma_offset);
+
+	if (bp->flags & B44_FLAG_RX_RING_HACK)
+		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
+			                    dest_idx * sizeof(*dp),
+			                    DMA_BIDIRECTIONAL);
+
+	return RX_PKT_BUF_SZ;
+}
+
+static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
+{
+	struct dma_desc *src_desc, *dest_desc;
+	struct ring_info *src_map, *dest_map;
+	struct rx_header *rh;
+	int dest_idx;
+	__le32 ctrl;
+
+	dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
+	dest_desc = &bp->rx_ring[dest_idx];
+	dest_map = &bp->rx_buffers[dest_idx];
+	src_desc = &bp->rx_ring[src_idx];
+	src_map = &bp->rx_buffers[src_idx];
+
+	dest_map->skb = src_map->skb;
+	rh = (struct rx_header *) src_map->skb->data;
+	rh->len = 0;
+	rh->flags = 0;
+	dest_map->mapping = src_map->mapping;
+
+	if (bp->flags & B44_FLAG_RX_RING_HACK)
+		b44_sync_dma_desc_for_cpu(bp->sdev, bp->rx_ring_dma,
+			                 src_idx * sizeof(*src_desc),
+			                 DMA_BIDIRECTIONAL);
+
+	ctrl = src_desc->ctrl;
+	if (dest_idx == (B44_RX_RING_SIZE - 1))
+		ctrl |= cpu_to_le32(DESC_CTRL_EOT);
+	else
+		ctrl &= cpu_to_le32(~DESC_CTRL_EOT);
+
+	dest_desc->ctrl = ctrl;
+	dest_desc->addr = src_desc->addr;
+
+	src_map->skb = NULL;
+
+	if (bp->flags & B44_FLAG_RX_RING_HACK)
+		b44_sync_dma_desc_for_device(bp->sdev, bp->rx_ring_dma,
+					     dest_idx * sizeof(*dest_desc),
+					     DMA_BIDIRECTIONAL);
+
+	dma_sync_single_for_device(bp->sdev->dma_dev, dest_map->mapping,
+				   RX_PKT_BUF_SZ,
+				   DMA_FROM_DEVICE);
+}
+
+static int b44_rx(struct b44 *bp, int budget)
+{
+	int received;
+	u32 cons, prod;
+
+	received = 0;
+	prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
+	prod /= sizeof(struct dma_desc);
+	cons = bp->rx_cons;
+
+	while (cons != prod && budget > 0) {
+		struct ring_info *rp = &bp->rx_buffers[cons];
+		struct sk_buff *skb = rp->skb;
+		dma_addr_t map = rp->mapping;
+		struct rx_header *rh;
+		u16 len;
+
+		dma_sync_single_for_cpu(bp->sdev->dma_dev, map,
+					RX_PKT_BUF_SZ,
+					DMA_FROM_DEVICE);
+		rh = (struct rx_header *) skb->data;
+		len = le16_to_cpu(rh->len);
+		if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
+		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
+		drop_it:
+			b44_recycle_rx(bp, cons, bp->rx_prod);
+		drop_it_no_recycle:
+			bp->dev->stats.rx_dropped++;
+			goto next_pkt;
+		}
+
+		if (len == 0) {
+			int i = 0;
+
+			do {
+				udelay(2);
+				barrier();
+				len = le16_to_cpu(rh->len);
+			} while (len == 0 && i++ < 5);
+			if (len == 0)
+				goto drop_it;
+		}
+
+		/* Omit CRC. */
+		len -= 4;
+
+		if (!bp->force_copybreak && len > RX_COPY_THRESHOLD) {
+			int skb_size;
+			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
+			if (skb_size < 0)
+				goto drop_it;
+			dma_unmap_single(bp->sdev->dma_dev, map,
+					 skb_size, DMA_FROM_DEVICE);
+			/* Leave out rx_header */
+			skb_put(skb, len + RX_PKT_OFFSET);
+			skb_pull(skb, RX_PKT_OFFSET);
+		} else {
+			struct sk_buff *copy_skb;
+
+			b44_recycle_rx(bp, cons, bp->rx_prod);
+			copy_skb = netdev_alloc_skb(bp->dev, len + 2);
+			if (copy_skb == NULL)
+				goto drop_it_no_recycle;
+
+			skb_reserve(copy_skb, 2);
+			skb_put(copy_skb, len);
+			/* DMA sync done above, copy just the actual packet */
+			skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
+							 copy_skb->data, len);
+			skb = copy_skb;
+		}
+		skb_checksum_none_assert(skb);
+		skb->protocol = eth_type_trans(skb, bp->dev);
+		netif_receive_skb(skb);
+		received++;
+		budget--;
+	next_pkt:
+		bp->rx_prod = (bp->rx_prod + 1) &
+			(B44_RX_RING_SIZE - 1);
+		cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
+	}
+
+	bp->rx_cons = cons;
+	bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));
+
+	return received;
+}
+
+static int b44_poll(struct napi_struct *napi, int budget)
+{
+	struct b44 *bp = container_of(napi, struct b44, napi);
+	int work_done;
+	unsigned long flags;
+
+	spin_lock_irqsave(&bp->lock, flags);
+
+	if (bp->istat & (ISTAT_TX | ISTAT_TO)) {
+		/* spin_lock(&bp->tx_lock); */
+		b44_tx(bp);
+		/* spin_unlock(&bp->tx_lock); */
+	}
+	if (bp->istat & ISTAT_RFO) {	/* fast recovery, in ~20msec */
+		bp->istat &= ~ISTAT_RFO;
+		b44_disable_ints(bp);
+		ssb_device_enable(bp->sdev, 0); /* resets ISTAT_RFO */
+		b44_init_rings(bp);
+		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
+		netif_wake_queue(bp->dev);
+	}
+
+	spin_unlock_irqrestore(&bp->lock, flags);
+
+	work_done = 0;
+	if (bp->istat & ISTAT_RX)
+		work_done += b44_rx(bp, budget);
+
+	if (bp->istat & ISTAT_ERRORS) {
+		spin_lock_irqsave(&bp->lock, flags);
+		b44_halt(bp);
+		b44_init_rings(bp);
+		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
+		netif_wake_queue(bp->dev);
+		spin_unlock_irqrestore(&bp->lock, flags);
+		work_done = 0;
+	}
+
+	if (work_done < budget) {
+		napi_complete(napi);
+		b44_enable_ints(bp);
+	}
+
+	return work_done;
+}
+
+static irqreturn_t b44_interrupt(int irq, void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct b44 *bp = netdev_priv(dev);
+	u32 istat, imask;
+	int handled = 0;
+
+	spin_lock(&bp->lock);
+
+	istat = br32(bp, B44_ISTAT);
+	imask = br32(bp, B44_IMASK);
+
+	/* The interrupt mask register controls which interrupt bits
+	 * will actually raise an interrupt to the CPU when set by hw/firmware,
+	 * but doesn't mask off the bits.
+	 */
+	istat &= imask;
+	if (istat) {
+		handled = 1;
+
+		if (unlikely(!netif_running(dev))) {
+			netdev_info(dev, "late interrupt\n");
+			goto irq_ack;
+		}
+
+		if (napi_schedule_prep(&bp->napi)) {
+			/* NOTE: These writes are posted by the readback of
+			 *       the ISTAT register below.
+			 */
+			bp->istat = istat;
+			__b44_disable_ints(bp);
+			__napi_schedule(&bp->napi);
+		}
+
+irq_ack:
+		bw32(bp, B44_ISTAT, istat);
+		br32(bp, B44_ISTAT);
+	}
+	spin_unlock(&bp->lock);
+	return IRQ_RETVAL(handled);
+}
+
+static void b44_tx_timeout(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	netdev_err(dev, "transmit timed out, resetting\n");
+
+	spin_lock_irq(&bp->lock);
+
+	b44_halt(bp);
+	b44_init_rings(bp);
+	b44_init_hw(bp, B44_FULL_RESET);
+
+	spin_unlock_irq(&bp->lock);
+
+	b44_enable_ints(bp);
+
+	netif_wake_queue(dev);
+}
+
+static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+	int rc = NETDEV_TX_OK;
+	dma_addr_t mapping;
+	u32 len, entry, ctrl;
+	unsigned long flags;
+
+	len = skb->len;
+	spin_lock_irqsave(&bp->lock, flags);
+
+	/* This is a hard error, log it. */
+	if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
+		netif_stop_queue(dev);
+		netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
+		goto err_out;
+	}
+
+	mapping = dma_map_single(bp->sdev->dma_dev, skb->data, len, DMA_TO_DEVICE);
+	if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+		struct sk_buff *bounce_skb;
+
+		/* Chip can't handle DMA to/from >1GB, use bounce buffer */
+		if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+			dma_unmap_single(bp->sdev->dma_dev, mapping, len,
+					     DMA_TO_DEVICE);
+
+		bounce_skb = __netdev_alloc_skb(dev, len, GFP_ATOMIC | GFP_DMA);
+		if (!bounce_skb)
+			goto err_out;
+
+		mapping = dma_map_single(bp->sdev->dma_dev, bounce_skb->data,
+					 len, DMA_TO_DEVICE);
+		if (dma_mapping_error(bp->sdev->dma_dev, mapping) || mapping + len > DMA_BIT_MASK(30)) {
+			if (!dma_mapping_error(bp->sdev->dma_dev, mapping))
+				dma_unmap_single(bp->sdev->dma_dev, mapping,
+						     len, DMA_TO_DEVICE);
+			dev_kfree_skb_any(bounce_skb);
+			goto err_out;
+		}
+
+		skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
+		dev_kfree_skb_any(skb);
+		skb = bounce_skb;
+	}
+
+	entry = bp->tx_prod;
+	bp->tx_buffers[entry].skb = skb;
+	bp->tx_buffers[entry].mapping = mapping;
+
+	ctrl  = (len & DESC_CTRL_LEN);
+	ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
+	if (entry == (B44_TX_RING_SIZE - 1))
+		ctrl |= DESC_CTRL_EOT;
+
+	bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
+	bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);
+
+	if (bp->flags & B44_FLAG_TX_RING_HACK)
+		b44_sync_dma_desc_for_device(bp->sdev, bp->tx_ring_dma,
+			                    entry * sizeof(bp->tx_ring[0]),
+			                    DMA_TO_DEVICE);
+
+	entry = NEXT_TX(entry);
+
+	bp->tx_prod = entry;
+
+	wmb();
+
+	bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
+	if (bp->flags & B44_FLAG_BUGGY_TXPTR)
+		bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
+	if (bp->flags & B44_FLAG_REORDER_BUG)
+		br32(bp, B44_DMATX_PTR);
+
+	if (TX_BUFFS_AVAIL(bp) < 1)
+		netif_stop_queue(dev);
+
+out_unlock:
+	spin_unlock_irqrestore(&bp->lock, flags);
+
+	return rc;
+
+err_out:
+	rc = NETDEV_TX_BUSY;
+	goto out_unlock;
+}
+
+static int b44_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	if (new_mtu < B44_MIN_MTU || new_mtu > B44_MAX_MTU)
+		return -EINVAL;
+
+	if (!netif_running(dev)) {
+		/* We'll just catch it later when the
+		 * device is up'd.
+		 */
+		dev->mtu = new_mtu;
+		return 0;
+	}
+
+	spin_lock_irq(&bp->lock);
+	b44_halt(bp);
+	dev->mtu = new_mtu;
+	b44_init_rings(bp);
+	b44_init_hw(bp, B44_FULL_RESET);
+	spin_unlock_irq(&bp->lock);
+
+	b44_enable_ints(bp);
+
+	return 0;
+}
+
+/* Free up pending packets in all rx/tx rings.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver.  bp->lock is not held and we are not
+ * in an interrupt context and thus may sleep.
+ */
+static void b44_free_rings(struct b44 *bp)
+{
+	struct ring_info *rp;
+	int i;
+
+	for (i = 0; i < B44_RX_RING_SIZE; i++) {
+		rp = &bp->rx_buffers[i];
+
+		if (rp->skb == NULL)
+			continue;
+		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, RX_PKT_BUF_SZ,
+				 DMA_FROM_DEVICE);
+		dev_kfree_skb_any(rp->skb);
+		rp->skb = NULL;
+	}
+
+	/* XXX needs changes once NETIF_F_SG is set... */
+	for (i = 0; i < B44_TX_RING_SIZE; i++) {
+		rp = &bp->tx_buffers[i];
+
+		if (rp->skb == NULL)
+			continue;
+		dma_unmap_single(bp->sdev->dma_dev, rp->mapping, rp->skb->len,
+				 DMA_TO_DEVICE);
+		dev_kfree_skb_any(rp->skb);
+		rp->skb = NULL;
+	}
+}
+
+/* Initialize tx/rx rings for packet processing.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver.
+ */
+static void b44_init_rings(struct b44 *bp)
+{
+	int i;
+
+	b44_free_rings(bp);
+
+	memset(bp->rx_ring, 0, B44_RX_RING_BYTES);
+	memset(bp->tx_ring, 0, B44_TX_RING_BYTES);
+
+	if (bp->flags & B44_FLAG_RX_RING_HACK)
+		dma_sync_single_for_device(bp->sdev->dma_dev, bp->rx_ring_dma,
+					   DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
+
+	if (bp->flags & B44_FLAG_TX_RING_HACK)
+		dma_sync_single_for_device(bp->sdev->dma_dev, bp->tx_ring_dma,
+					   DMA_TABLE_BYTES, DMA_TO_DEVICE);
+
+	for (i = 0; i < bp->rx_pending; i++) {
+		if (b44_alloc_rx_skb(bp, -1, i) < 0)
+			break;
+	}
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.
+ */
+static void b44_free_consistent(struct b44 *bp)
+{
+	kfree(bp->rx_buffers);
+	bp->rx_buffers = NULL;
+	kfree(bp->tx_buffers);
+	bp->tx_buffers = NULL;
+	if (bp->rx_ring) {
+		if (bp->flags & B44_FLAG_RX_RING_HACK) {
+			dma_unmap_single(bp->sdev->dma_dev, bp->rx_ring_dma,
+					 DMA_TABLE_BYTES, DMA_BIDIRECTIONAL);
+			kfree(bp->rx_ring);
+		} else
+			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
+					  bp->rx_ring, bp->rx_ring_dma);
+		bp->rx_ring = NULL;
+		bp->flags &= ~B44_FLAG_RX_RING_HACK;
+	}
+	if (bp->tx_ring) {
+		if (bp->flags & B44_FLAG_TX_RING_HACK) {
+			dma_unmap_single(bp->sdev->dma_dev, bp->tx_ring_dma,
+					 DMA_TABLE_BYTES, DMA_TO_DEVICE);
+			kfree(bp->tx_ring);
+		} else
+			dma_free_coherent(bp->sdev->dma_dev, DMA_TABLE_BYTES,
+					  bp->tx_ring, bp->tx_ring_dma);
+		bp->tx_ring = NULL;
+		bp->flags &= ~B44_FLAG_TX_RING_HACK;
+	}
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.  Can sleep.
+ */
+static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
+{
+	int size;
+
+	size  = B44_RX_RING_SIZE * sizeof(struct ring_info);
+	bp->rx_buffers = kzalloc(size, gfp);
+	if (!bp->rx_buffers)
+		goto out_err;
+
+	size = B44_TX_RING_SIZE * sizeof(struct ring_info);
+	bp->tx_buffers = kzalloc(size, gfp);
+	if (!bp->tx_buffers)
+		goto out_err;
+
+	size = DMA_TABLE_BYTES;
+	bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
+					 &bp->rx_ring_dma, gfp);
+	if (!bp->rx_ring) {
+		/* Allocation may have failed due to pci_alloc_consistent
+		   insisting on use of GFP_DMA, which is more restrictive
+		   than necessary...  */
+		struct dma_desc *rx_ring;
+		dma_addr_t rx_ring_dma;
+
+		rx_ring = kzalloc(size, gfp);
+		if (!rx_ring)
+			goto out_err;
+
+		rx_ring_dma = dma_map_single(bp->sdev->dma_dev, rx_ring,
+					     DMA_TABLE_BYTES,
+					     DMA_BIDIRECTIONAL);
+
+		if (dma_mapping_error(bp->sdev->dma_dev, rx_ring_dma) ||
+			rx_ring_dma + size > DMA_BIT_MASK(30)) {
+			kfree(rx_ring);
+			goto out_err;
+		}
+
+		bp->rx_ring = rx_ring;
+		bp->rx_ring_dma = rx_ring_dma;
+		bp->flags |= B44_FLAG_RX_RING_HACK;
+	}
+
+	bp->tx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
+					 &bp->tx_ring_dma, gfp);
+	if (!bp->tx_ring) {
+		/* Allocation may have failed due to ssb_dma_alloc_consistent
+		   insisting on use of GFP_DMA, which is more restrictive
+		   than necessary...  */
+		struct dma_desc *tx_ring;
+		dma_addr_t tx_ring_dma;
+
+		tx_ring = kzalloc(size, gfp);
+		if (!tx_ring)
+			goto out_err;
+
+		tx_ring_dma = dma_map_single(bp->sdev->dma_dev, tx_ring,
+					     DMA_TABLE_BYTES,
+					     DMA_TO_DEVICE);
+
+		if (dma_mapping_error(bp->sdev->dma_dev, tx_ring_dma) ||
+			tx_ring_dma + size > DMA_BIT_MASK(30)) {
+			kfree(tx_ring);
+			goto out_err;
+		}
+
+		bp->tx_ring = tx_ring;
+		bp->tx_ring_dma = tx_ring_dma;
+		bp->flags |= B44_FLAG_TX_RING_HACK;
+	}
+
+	return 0;
+
+out_err:
+	b44_free_consistent(bp);
+	return -ENOMEM;
+}
+
+/* bp->lock is held. */
+static void b44_clear_stats(struct b44 *bp)
+{
+	unsigned long reg;
+
+	bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
+	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
+		br32(bp, reg);
+	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
+		br32(bp, reg);
+}
+
+/* bp->lock is held. */
+static void b44_chip_reset(struct b44 *bp, int reset_kind)
+{
+	struct ssb_device *sdev = bp->sdev;
+	bool was_enabled;
+
+	was_enabled = ssb_device_is_enabled(bp->sdev);
+
+	ssb_device_enable(bp->sdev, 0);
+	ssb_pcicore_dev_irqvecs_enable(&sdev->bus->pcicore, sdev);
+
+	if (was_enabled) {
+		bw32(bp, B44_RCV_LAZY, 0);
+		bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
+		b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);
+		bw32(bp, B44_DMATX_CTRL, 0);
+		bp->tx_prod = bp->tx_cons = 0;
+		if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
+			b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
+				     100, 0);
+		}
+		bw32(bp, B44_DMARX_CTRL, 0);
+		bp->rx_prod = bp->rx_cons = 0;
+	}
+
+	b44_clear_stats(bp);
+
+	/*
+	 * Don't enable PHY if we are doing a partial reset
+	 * we are probably going to power down
+	 */
+	if (reset_kind == B44_CHIP_RESET_PARTIAL)
+		return;
+
+	switch (sdev->bus->bustype) {
+	case SSB_BUSTYPE_SSB:
+		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
+		     (DIV_ROUND_CLOSEST(ssb_clockspeed(sdev->bus),
+					B44_MDC_RATIO)
+		     & MDIO_CTRL_MAXF_MASK)));
+		break;
+	case SSB_BUSTYPE_PCI:
+		bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
+		     (0x0d & MDIO_CTRL_MAXF_MASK)));
+		break;
+	case SSB_BUSTYPE_PCMCIA:
+	case SSB_BUSTYPE_SDIO:
+		WARN_ON(1); /* A device with this bus does not exist. */
+		break;
+	}
+
+	br32(bp, B44_MDIO_CTRL);
+
+	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
+		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
+		br32(bp, B44_ENET_CTRL);
+		bp->flags &= ~B44_FLAG_INTERNAL_PHY;
+	} else {
+		u32 val = br32(bp, B44_DEVCTRL);
+
+		if (val & DEVCTRL_EPR) {
+			bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
+			br32(bp, B44_DEVCTRL);
+			udelay(100);
+		}
+		bp->flags |= B44_FLAG_INTERNAL_PHY;
+	}
+}
+
+/* bp->lock is held. */
+static void b44_halt(struct b44 *bp)
+{
+	b44_disable_ints(bp);
+	/* reset PHY */
+	b44_phy_reset(bp);
+	/* power down PHY */
+	netdev_info(bp->dev, "powering down PHY\n");
+	bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
+	/* now reset the chip, but without enabling the MAC&PHY
+	 * part of it. This has to be done _after_ we shut down the PHY */
+	b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
+}
+
+/* bp->lock is held. */
+static void __b44_set_mac_addr(struct b44 *bp)
+{
+	bw32(bp, B44_CAM_CTRL, 0);
+	if (!(bp->dev->flags & IFF_PROMISC)) {
+		u32 val;
+
+		__b44_cam_write(bp, bp->dev->dev_addr, 0);
+		val = br32(bp, B44_CAM_CTRL);
+		bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
+	}
+}
+
+static int b44_set_mac_addr(struct net_device *dev, void *p)
+{
+	struct b44 *bp = netdev_priv(dev);
+	struct sockaddr *addr = p;
+	u32 val;
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EINVAL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	spin_lock_irq(&bp->lock);
+
+	val = br32(bp, B44_RXCONFIG);
+	if (!(val & RXCONFIG_CAM_ABSENT))
+		__b44_set_mac_addr(bp);
+
+	spin_unlock_irq(&bp->lock);
+
+	return 0;
+}
+
+/* Called at device open time to get the chip ready for
+ * packet processing.  Invoked with bp->lock held.
+ */
+static void __b44_set_rx_mode(struct net_device *);
+static void b44_init_hw(struct b44 *bp, int reset_kind)
+{
+	u32 val;
+
+	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
+	if (reset_kind == B44_FULL_RESET) {
+		b44_phy_reset(bp);
+		b44_setup_phy(bp);
+	}
+
+	/* Enable CRC32, set proper LED modes and power on PHY */
+	bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
+	bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));
+
+	/* This sets the MAC address too.  */
+	__b44_set_rx_mode(bp->dev);
+
+	/* MTU + eth header + possible VLAN tag + struct rx_header */
+	bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
+	bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN);
+
+	bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
+	if (reset_kind == B44_PARTIAL_RESET) {
+		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
+				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
+	} else {
+		bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
+		bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
+		bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
+				      (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
+		bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
+
+		bw32(bp, B44_DMARX_PTR, bp->rx_pending);
+		bp->rx_prod = bp->rx_pending;
+
+		bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
+	}
+
+	val = br32(bp, B44_ENET_CTRL);
+	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
+}
+
+static int b44_open(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+	int err;
+
+	err = b44_alloc_consistent(bp, GFP_KERNEL);
+	if (err)
+		goto out;
+
+	napi_enable(&bp->napi);
+
+	b44_init_rings(bp);
+	b44_init_hw(bp, B44_FULL_RESET);
+
+	b44_check_phy(bp);
+
+	err = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
+	if (unlikely(err < 0)) {
+		napi_disable(&bp->napi);
+		b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
+		b44_free_rings(bp);
+		b44_free_consistent(bp);
+		goto out;
+	}
+
+	init_timer(&bp->timer);
+	bp->timer.expires = jiffies + HZ;
+	bp->timer.data = (unsigned long) bp;
+	bp->timer.function = b44_timer;
+	add_timer(&bp->timer);
+
+	b44_enable_ints(bp);
+	netif_start_queue(dev);
+out:
+	return err;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void b44_poll_controller(struct net_device *dev)
+{
+	disable_irq(dev->irq);
+	b44_interrupt(dev->irq, dev);
+	enable_irq(dev->irq);
+}
+#endif
+
+static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
+{
+	u32 i;
+	u32 *pattern = (u32 *) pp;
+
+	for (i = 0; i < bytes; i += sizeof(u32)) {
+		bw32(bp, B44_FILT_ADDR, table_offset + i);
+		bw32(bp, B44_FILT_DATA, pattern[i / sizeof(u32)]);
+	}
+}
+
+static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
+{
+	int magicsync = 6;
+	int k, j, len = offset;
+	int ethaddr_bytes = ETH_ALEN;
+
+	memset(ppattern + offset, 0xff, magicsync);
+	for (j = 0; j < magicsync; j++)
+		set_bit(len++, (unsigned long *) pmask);
+
+	for (j = 0; j < B44_MAX_PATTERNS; j++) {
+		if ((B44_PATTERN_SIZE - len) >= ETH_ALEN)
+			ethaddr_bytes = ETH_ALEN;
+		else
+			ethaddr_bytes = B44_PATTERN_SIZE - len;
+		if (ethaddr_bytes <=0)
+			break;
+		for (k = 0; k< ethaddr_bytes; k++) {
+			ppattern[offset + magicsync +
+				(j * ETH_ALEN) + k] = macaddr[k];
+			set_bit(len++, (unsigned long *) pmask);
+		}
+	}
+	return len - 1;
+}
+
+/* Setup magic packet patterns in the b44 WOL
+ * pattern matching filter.
+ */
+static void b44_setup_pseudo_magicp(struct b44 *bp)
+{
+
+	u32 val;
+	int plen0, plen1, plen2;
+	u8 *pwol_pattern;
+	u8 pwol_mask[B44_PMASK_SIZE];
+
+	pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
+	if (!pwol_pattern) {
+		pr_err("Memory not available for WOL\n");
+		return;
+	}
+
+	/* Ipv4 magic packet pattern - pattern 0.*/
+	memset(pwol_mask, 0, B44_PMASK_SIZE);
+	plen0 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
+				  B44_ETHIPV4UDP_HLEN);
+
+   	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE, B44_PATTERN_BASE);
+   	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE, B44_PMASK_BASE);
+
+	/* Raw ethernet II magic packet pattern - pattern 1 */
+	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
+	memset(pwol_mask, 0, B44_PMASK_SIZE);
+	plen1 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
+				  ETH_HLEN);
+
+   	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
+		       B44_PATTERN_BASE + B44_PATTERN_SIZE);
+  	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
+		       B44_PMASK_BASE + B44_PMASK_SIZE);
+
+	/* Ipv6 magic packet pattern - pattern 2 */
+	memset(pwol_pattern, 0, B44_PATTERN_SIZE);
+	memset(pwol_mask, 0, B44_PMASK_SIZE);
+	plen2 = b44_magic_pattern(bp->dev->dev_addr, pwol_pattern, pwol_mask,
+				  B44_ETHIPV6UDP_HLEN);
+
+   	bwfilter_table(bp, pwol_pattern, B44_PATTERN_SIZE,
+		       B44_PATTERN_BASE + B44_PATTERN_SIZE + B44_PATTERN_SIZE);
+  	bwfilter_table(bp, pwol_mask, B44_PMASK_SIZE,
+		       B44_PMASK_BASE + B44_PMASK_SIZE + B44_PMASK_SIZE);
+
+	kfree(pwol_pattern);
+
+	/* set these pattern's lengths: one less than each real length */
+	val = plen0 | (plen1 << 8) | (plen2 << 16) | WKUP_LEN_ENABLE_THREE;
+	bw32(bp, B44_WKUP_LEN, val);
+
+	/* enable wakeup pattern matching */
+	val = br32(bp, B44_DEVCTRL);
+	bw32(bp, B44_DEVCTRL, val | DEVCTRL_PFE);
+
+}
+
+#ifdef CONFIG_B44_PCI
+static void b44_setup_wol_pci(struct b44 *bp)
+{
+	u16 val;
+
+	if (bp->sdev->bus->bustype != SSB_BUSTYPE_SSB) {
+		bw32(bp, SSB_TMSLOW, br32(bp, SSB_TMSLOW) | SSB_TMSLOW_PE);
+		pci_read_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, &val);
+		pci_write_config_word(bp->sdev->bus->host_pci, SSB_PMCSR, val | SSB_PE);
+	}
+}
+#else
+static inline void b44_setup_wol_pci(struct b44 *bp) { }
+#endif /* CONFIG_B44_PCI */
+
+static void b44_setup_wol(struct b44 *bp)
+{
+	u32 val;
+
+	bw32(bp, B44_RXCONFIG, RXCONFIG_ALLMULTI);
+
+	if (bp->flags & B44_FLAG_B0_ANDLATER) {
+
+		bw32(bp, B44_WKUP_LEN, WKUP_LEN_DISABLE);
+
+		val = bp->dev->dev_addr[2] << 24 |
+			bp->dev->dev_addr[3] << 16 |
+			bp->dev->dev_addr[4] << 8 |
+			bp->dev->dev_addr[5];
+		bw32(bp, B44_ADDR_LO, val);
+
+		val = bp->dev->dev_addr[0] << 8 |
+			bp->dev->dev_addr[1];
+		bw32(bp, B44_ADDR_HI, val);
+
+		val = br32(bp, B44_DEVCTRL);
+		bw32(bp, B44_DEVCTRL, val | DEVCTRL_MPM | DEVCTRL_PFE);
+
+ 	} else {
+ 		b44_setup_pseudo_magicp(bp);
+ 	}
+	b44_setup_wol_pci(bp);
+}
+
+static int b44_close(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	netif_stop_queue(dev);
+
+	napi_disable(&bp->napi);
+
+	del_timer_sync(&bp->timer);
+
+	spin_lock_irq(&bp->lock);
+
+	b44_halt(bp);
+	b44_free_rings(bp);
+	netif_carrier_off(dev);
+
+	spin_unlock_irq(&bp->lock);
+
+	free_irq(dev->irq, dev);
+
+	if (bp->flags & B44_FLAG_WOL_ENABLE) {
+		b44_init_hw(bp, B44_PARTIAL_RESET);
+		b44_setup_wol(bp);
+	}
+
+	b44_free_consistent(bp);
+
+	return 0;
+}
+
+static struct net_device_stats *b44_get_stats(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+	struct net_device_stats *nstat = &dev->stats;
+	struct b44_hw_stats *hwstat = &bp->hw_stats;
+
+	/* Convert HW stats into netdevice stats. */
+	nstat->rx_packets = hwstat->rx_pkts;
+	nstat->tx_packets = hwstat->tx_pkts;
+	nstat->rx_bytes   = hwstat->rx_octets;
+	nstat->tx_bytes   = hwstat->tx_octets;
+	nstat->tx_errors  = (hwstat->tx_jabber_pkts +
+			     hwstat->tx_oversize_pkts +
+			     hwstat->tx_underruns +
+			     hwstat->tx_excessive_cols +
+			     hwstat->tx_late_cols);
+	nstat->multicast  = hwstat->tx_multicast_pkts;
+	nstat->collisions = hwstat->tx_total_cols;
+
+	nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
+				   hwstat->rx_undersize);
+	nstat->rx_over_errors   = hwstat->rx_missed_pkts;
+	nstat->rx_frame_errors  = hwstat->rx_align_errs;
+	nstat->rx_crc_errors    = hwstat->rx_crc_errs;
+	nstat->rx_errors        = (hwstat->rx_jabber_pkts +
+				   hwstat->rx_oversize_pkts +
+				   hwstat->rx_missed_pkts +
+				   hwstat->rx_crc_align_errs +
+				   hwstat->rx_undersize +
+				   hwstat->rx_crc_errs +
+				   hwstat->rx_align_errs +
+				   hwstat->rx_symbol_errs);
+
+	nstat->tx_aborted_errors = hwstat->tx_underruns;
+#if 0
+	/* Carrier lost counter seems to be broken for some devices */
+	nstat->tx_carrier_errors = hwstat->tx_carrier_lost;
+#endif
+
+	return nstat;
+}
+
+static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
+{
+	struct netdev_hw_addr *ha;
+	int i, num_ents;
+
+	num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
+	i = 0;
+	netdev_for_each_mc_addr(ha, dev) {
+		if (i == num_ents)
+			break;
+		__b44_cam_write(bp, ha->addr, i++ + 1);
+	}
+	return i+1;
+}
+
+static void __b44_set_rx_mode(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+	u32 val;
+
+	val = br32(bp, B44_RXCONFIG);
+	val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
+	if ((dev->flags & IFF_PROMISC) || (val & RXCONFIG_CAM_ABSENT)) {
+		val |= RXCONFIG_PROMISC;
+		bw32(bp, B44_RXCONFIG, val);
+	} else {
+		unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
+		int i = 1;
+
+		__b44_set_mac_addr(bp);
+
+		if ((dev->flags & IFF_ALLMULTI) ||
+		    (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
+			val |= RXCONFIG_ALLMULTI;
+		else
+			i = __b44_load_mcast(bp, dev);
+
+		for (; i < 64; i++)
+			__b44_cam_write(bp, zero, i);
+
+		bw32(bp, B44_RXCONFIG, val);
+        	val = br32(bp, B44_CAM_CTRL);
+	        bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
+	}
+}
+
+static void b44_set_rx_mode(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	spin_lock_irq(&bp->lock);
+	__b44_set_rx_mode(dev);
+	spin_unlock_irq(&bp->lock);
+}
+
+static u32 b44_get_msglevel(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+	return bp->msg_enable;
+}
+
+static void b44_set_msglevel(struct net_device *dev, u32 value)
+{
+	struct b44 *bp = netdev_priv(dev);
+	bp->msg_enable = value;
+}
+
+static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	struct b44 *bp = netdev_priv(dev);
+	struct ssb_bus *bus = bp->sdev->bus;
+
+	strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+	strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+	switch (bus->bustype) {
+	case SSB_BUSTYPE_PCI:
+		strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
+		break;
+	case SSB_BUSTYPE_SSB:
+		strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
+		break;
+	case SSB_BUSTYPE_PCMCIA:
+	case SSB_BUSTYPE_SDIO:
+		WARN_ON(1); /* A device with this bus does not exist. */
+		break;
+	}
+}
+
+static int b44_nway_reset(struct net_device *dev)
+{
+	struct b44 *bp = netdev_priv(dev);
+	u32 bmcr;
+	int r;
+
+	spin_lock_irq(&bp->lock);
+	b44_readphy(bp, MII_BMCR, &bmcr);
+	b44_readphy(bp, MII_BMCR, &bmcr);
+	r = -EINVAL;
+	if (bmcr & BMCR_ANENABLE) {
+		b44_writephy(bp, MII_BMCR,
+			     bmcr | BMCR_ANRESTART);
+		r = 0;
+	}
+	spin_unlock_irq(&bp->lock);
+
+	return r;
+}
+
+static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	cmd->supported = (SUPPORTED_Autoneg);
+	cmd->supported |= (SUPPORTED_100baseT_Half |
+			  SUPPORTED_100baseT_Full |
+			  SUPPORTED_10baseT_Half |
+			  SUPPORTED_10baseT_Full |
+			  SUPPORTED_MII);
+
+	cmd->advertising = 0;
+	if (bp->flags & B44_FLAG_ADV_10HALF)
+		cmd->advertising |= ADVERTISED_10baseT_Half;
+	if (bp->flags & B44_FLAG_ADV_10FULL)
+		cmd->advertising |= ADVERTISED_10baseT_Full;
+	if (bp->flags & B44_FLAG_ADV_100HALF)
+		cmd->advertising |= ADVERTISED_100baseT_Half;
+	if (bp->flags & B44_FLAG_ADV_100FULL)
+		cmd->advertising |= ADVERTISED_100baseT_Full;
+	cmd->advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause;
+	ethtool_cmd_speed_set(cmd, ((bp->flags & B44_FLAG_100_BASE_T) ?
+				    SPEED_100 : SPEED_10));
+	cmd->duplex = (bp->flags & B44_FLAG_FULL_DUPLEX) ?
+		DUPLEX_FULL : DUPLEX_HALF;
+	cmd->port = 0;
+	cmd->phy_address = bp->phy_addr;
+	cmd->transceiver = (bp->flags & B44_FLAG_INTERNAL_PHY) ?
+		XCVR_INTERNAL : XCVR_EXTERNAL;
+	cmd->autoneg = (bp->flags & B44_FLAG_FORCE_LINK) ?
+		AUTONEG_DISABLE : AUTONEG_ENABLE;
+	if (cmd->autoneg == AUTONEG_ENABLE)
+		cmd->advertising |= ADVERTISED_Autoneg;
+	if (!netif_running(dev)){
+		ethtool_cmd_speed_set(cmd, 0);
+		cmd->duplex = 0xff;
+	}
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+	return 0;
+}
+
+static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct b44 *bp = netdev_priv(dev);
+	u32 speed = ethtool_cmd_speed(cmd);
+
+	/* We do not support gigabit. */
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		if (cmd->advertising &
+		    (ADVERTISED_1000baseT_Half |
+		     ADVERTISED_1000baseT_Full))
+			return -EINVAL;
+	} else if ((speed != SPEED_100 &&
+		    speed != SPEED_10) ||
+		   (cmd->duplex != DUPLEX_HALF &&
+		    cmd->duplex != DUPLEX_FULL)) {
+			return -EINVAL;
+	}
+
+	spin_lock_irq(&bp->lock);
+
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		bp->flags &= ~(B44_FLAG_FORCE_LINK |
+			       B44_FLAG_100_BASE_T |
+			       B44_FLAG_FULL_DUPLEX |
+			       B44_FLAG_ADV_10HALF |
+			       B44_FLAG_ADV_10FULL |
+			       B44_FLAG_ADV_100HALF |
+			       B44_FLAG_ADV_100FULL);
+		if (cmd->advertising == 0) {
+			bp->flags |= (B44_FLAG_ADV_10HALF |
+				      B44_FLAG_ADV_10FULL |
+				      B44_FLAG_ADV_100HALF |
+				      B44_FLAG_ADV_100FULL);
+		} else {
+			if (cmd->advertising & ADVERTISED_10baseT_Half)
+				bp->flags |= B44_FLAG_ADV_10HALF;
+			if (cmd->advertising & ADVERTISED_10baseT_Full)
+				bp->flags |= B44_FLAG_ADV_10FULL;
+			if (cmd->advertising & ADVERTISED_100baseT_Half)
+				bp->flags |= B44_FLAG_ADV_100HALF;
+			if (cmd->advertising & ADVERTISED_100baseT_Full)
+				bp->flags |= B44_FLAG_ADV_100FULL;
+		}
+	} else {
+		bp->flags |= B44_FLAG_FORCE_LINK;
+		bp->flags &= ~(B44_FLAG_100_BASE_T | B44_FLAG_FULL_DUPLEX);
+		if (speed == SPEED_100)
+			bp->flags |= B44_FLAG_100_BASE_T;
+		if (cmd->duplex == DUPLEX_FULL)
+			bp->flags |= B44_FLAG_FULL_DUPLEX;
+	}
+
+	if (netif_running(dev))
+		b44_setup_phy(bp);
+
+	spin_unlock_irq(&bp->lock);
+
+	return 0;
+}
+
+static void b44_get_ringparam(struct net_device *dev,
+			      struct ethtool_ringparam *ering)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	ering->rx_max_pending = B44_RX_RING_SIZE - 1;
+	ering->rx_pending = bp->rx_pending;
+
+	/* XXX ethtool lacks a tx_max_pending, oops... */
+}
+
+static int b44_set_ringparam(struct net_device *dev,
+			     struct ethtool_ringparam *ering)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	if ((ering->rx_pending > B44_RX_RING_SIZE - 1) ||
+	    (ering->rx_mini_pending != 0) ||
+	    (ering->rx_jumbo_pending != 0) ||
+	    (ering->tx_pending > B44_TX_RING_SIZE - 1))
+		return -EINVAL;
+
+	spin_lock_irq(&bp->lock);
+
+	bp->rx_pending = ering->rx_pending;
+	bp->tx_pending = ering->tx_pending;
+
+	b44_halt(bp);
+	b44_init_rings(bp);
+	b44_init_hw(bp, B44_FULL_RESET);
+	netif_wake_queue(bp->dev);
+	spin_unlock_irq(&bp->lock);
+
+	b44_enable_ints(bp);
+
+	return 0;
+}
+
+static void b44_get_pauseparam(struct net_device *dev,
+				struct ethtool_pauseparam *epause)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	epause->autoneg =
+		(bp->flags & B44_FLAG_PAUSE_AUTO) != 0;
+	epause->rx_pause =
+		(bp->flags & B44_FLAG_RX_PAUSE) != 0;
+	epause->tx_pause =
+		(bp->flags & B44_FLAG_TX_PAUSE) != 0;
+}
+
+static int b44_set_pauseparam(struct net_device *dev,
+				struct ethtool_pauseparam *epause)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	spin_lock_irq(&bp->lock);
+	if (epause->autoneg)
+		bp->flags |= B44_FLAG_PAUSE_AUTO;
+	else
+		bp->flags &= ~B44_FLAG_PAUSE_AUTO;
+	if (epause->rx_pause)
+		bp->flags |= B44_FLAG_RX_PAUSE;
+	else
+		bp->flags &= ~B44_FLAG_RX_PAUSE;
+	if (epause->tx_pause)
+		bp->flags |= B44_FLAG_TX_PAUSE;
+	else
+		bp->flags &= ~B44_FLAG_TX_PAUSE;
+	if (bp->flags & B44_FLAG_PAUSE_AUTO) {
+		b44_halt(bp);
+		b44_init_rings(bp);
+		b44_init_hw(bp, B44_FULL_RESET);
+	} else {
+		__b44_set_flow_ctrl(bp, bp->flags);
+	}
+	spin_unlock_irq(&bp->lock);
+
+	b44_enable_ints(bp);
+
+	return 0;
+}
+
+static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+	switch(stringset) {
+	case ETH_SS_STATS:
+		memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
+		break;
+	}
+}
+
+static int b44_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(b44_gstrings);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void b44_get_ethtool_stats(struct net_device *dev,
+				  struct ethtool_stats *stats, u64 *data)
+{
+	struct b44 *bp = netdev_priv(dev);
+	u32 *val = &bp->hw_stats.tx_good_octets;
+	u32 i;
+
+	spin_lock_irq(&bp->lock);
+
+	b44_stats_update(bp);
+
+	for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
+		*data++ = *val++;
+
+	spin_unlock_irq(&bp->lock);
+}
+
+static void b44_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	wol->supported = WAKE_MAGIC;
+	if (bp->flags & B44_FLAG_WOL_ENABLE)
+		wol->wolopts = WAKE_MAGIC;
+	else
+		wol->wolopts = 0;
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int b44_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct b44 *bp = netdev_priv(dev);
+
+	spin_lock_irq(&bp->lock);
+	if (wol->wolopts & WAKE_MAGIC)
+		bp->flags |= B44_FLAG_WOL_ENABLE;
+	else
+		bp->flags &= ~B44_FLAG_WOL_ENABLE;
+	spin_unlock_irq(&bp->lock);
+
+	return 0;
+}
+
+static const struct ethtool_ops b44_ethtool_ops = {
+	.get_drvinfo		= b44_get_drvinfo,
+	.get_settings		= b44_get_settings,
+	.set_settings		= b44_set_settings,
+	.nway_reset		= b44_nway_reset,
+	.get_link		= ethtool_op_get_link,
+	.get_wol		= b44_get_wol,
+	.set_wol		= b44_set_wol,
+	.get_ringparam		= b44_get_ringparam,
+	.set_ringparam		= b44_set_ringparam,
+	.get_pauseparam		= b44_get_pauseparam,
+	.set_pauseparam		= b44_set_pauseparam,
+	.get_msglevel		= b44_get_msglevel,
+	.set_msglevel		= b44_set_msglevel,
+	.get_strings		= b44_get_strings,
+	.get_sset_count		= b44_get_sset_count,
+	.get_ethtool_stats	= b44_get_ethtool_stats,
+};
+
+static int b44_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct mii_ioctl_data *data = if_mii(ifr);
+	struct b44 *bp = netdev_priv(dev);
+	int err = -EINVAL;
+
+	if (!netif_running(dev))
+		goto out;
+
+	spin_lock_irq(&bp->lock);
+	err = generic_mii_ioctl(&bp->mii_if, data, cmd, NULL);
+	spin_unlock_irq(&bp->lock);
+out:
+	return err;
+}
+
+static int __devinit b44_get_invariants(struct b44 *bp)
+{
+	struct ssb_device *sdev = bp->sdev;
+	int err = 0;
+	u8 *addr;
+
+	bp->dma_offset = ssb_dma_translation(sdev);
+
+	if (sdev->bus->bustype == SSB_BUSTYPE_SSB &&
+	    instance > 1) {
+		addr = sdev->bus->sprom.et1mac;
+		bp->phy_addr = sdev->bus->sprom.et1phyaddr;
+	} else {
+		addr = sdev->bus->sprom.et0mac;
+		bp->phy_addr = sdev->bus->sprom.et0phyaddr;
+	}
+	/* Some ROMs have buggy PHY addresses with the high
+	 * bits set (sign extension?). Truncate them to a
+	 * valid PHY address. */
+	bp->phy_addr &= 0x1F;
+
+	memcpy(bp->dev->dev_addr, addr, 6);
+
+	if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
+		pr_err("Invalid MAC address found in EEPROM\n");
+		return -EINVAL;
+	}
+
+	memcpy(bp->dev->perm_addr, bp->dev->dev_addr, bp->dev->addr_len);
+
+	bp->imask = IMASK_DEF;
+
+	/* XXX - really required?
+	   bp->flags |= B44_FLAG_BUGGY_TXPTR;
+	*/
+
+	if (bp->sdev->id.revision >= 7)
+		bp->flags |= B44_FLAG_B0_ANDLATER;
+
+	return err;
+}
+
+static const struct net_device_ops b44_netdev_ops = {
+	.ndo_open		= b44_open,
+	.ndo_stop		= b44_close,
+	.ndo_start_xmit		= b44_start_xmit,
+	.ndo_get_stats		= b44_get_stats,
+	.ndo_set_multicast_list = b44_set_rx_mode,
+	.ndo_set_mac_address	= b44_set_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= b44_ioctl,
+	.ndo_tx_timeout		= b44_tx_timeout,
+	.ndo_change_mtu		= b44_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= b44_poll_controller,
+#endif
+};
+
+static int __devinit b44_init_one(struct ssb_device *sdev,
+				  const struct ssb_device_id *ent)
+{
+	struct net_device *dev;
+	struct b44 *bp;
+	int err;
+
+	instance++;
+
+	pr_info_once("%s version %s\n", DRV_DESCRIPTION, DRV_MODULE_VERSION);
+
+	dev = alloc_etherdev(sizeof(*bp));
+	if (!dev) {
+		dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	SET_NETDEV_DEV(dev, sdev->dev);
+
+	/* No interesting netdevice features in this card... */
+	dev->features |= 0;
+
+	bp = netdev_priv(dev);
+	bp->sdev = sdev;
+	bp->dev = dev;
+	bp->force_copybreak = 0;
+
+	bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
+
+	spin_lock_init(&bp->lock);
+
+	bp->rx_pending = B44_DEF_RX_RING_PENDING;
+	bp->tx_pending = B44_DEF_TX_RING_PENDING;
+
+	dev->netdev_ops = &b44_netdev_ops;
+	netif_napi_add(dev, &bp->napi, b44_poll, 64);
+	dev->watchdog_timeo = B44_TX_TIMEOUT;
+	dev->irq = sdev->irq;
+	SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
+
+	err = ssb_bus_powerup(sdev->bus, 0);
+	if (err) {
+		dev_err(sdev->dev,
+			"Failed to powerup the bus\n");
+		goto err_out_free_dev;
+	}
+
+	if (dma_set_mask(sdev->dma_dev, DMA_BIT_MASK(30)) ||
+	    dma_set_coherent_mask(sdev->dma_dev, DMA_BIT_MASK(30))) {
+		dev_err(sdev->dev,
+			"Required 30BIT DMA mask unsupported by the system\n");
+		goto err_out_powerdown;
+	}
+
+	err = b44_get_invariants(bp);
+	if (err) {
+		dev_err(sdev->dev,
+			"Problem fetching invariants of chip, aborting\n");
+		goto err_out_powerdown;
+	}
+
+	bp->mii_if.dev = dev;
+	bp->mii_if.mdio_read = b44_mii_read;
+	bp->mii_if.mdio_write = b44_mii_write;
+	bp->mii_if.phy_id = bp->phy_addr;
+	bp->mii_if.phy_id_mask = 0x1f;
+	bp->mii_if.reg_num_mask = 0x1f;
+
+	/* By default, advertise all speed/duplex settings. */
+	bp->flags |= (B44_FLAG_ADV_10HALF | B44_FLAG_ADV_10FULL |
+		      B44_FLAG_ADV_100HALF | B44_FLAG_ADV_100FULL);
+
+	/* By default, auto-negotiate PAUSE. */
+	bp->flags |= B44_FLAG_PAUSE_AUTO;
+
+	err = register_netdev(dev);
+	if (err) {
+		dev_err(sdev->dev, "Cannot register net device, aborting\n");
+		goto err_out_powerdown;
+	}
+
+	netif_carrier_off(dev);
+
+	ssb_set_drvdata(sdev, dev);
+
+	/* Chip reset provides power to the b44 MAC & PCI cores, which
+	 * is necessary for MAC register access.
+	 */
+	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
+
+	/* do a phy reset to test if there is an active phy */
+	if (b44_phy_reset(bp) < 0)
+		bp->phy_addr = B44_PHY_ADDR_NO_PHY;
+
+	netdev_info(dev, "%s %pM\n", DRV_DESCRIPTION, dev->dev_addr);
+
+	return 0;
+
+err_out_powerdown:
+	ssb_bus_may_powerdown(sdev->bus);
+
+err_out_free_dev:
+	free_netdev(dev);
+
+out:
+	return err;
+}
+
+static void __devexit b44_remove_one(struct ssb_device *sdev)
+{
+	struct net_device *dev = ssb_get_drvdata(sdev);
+
+	unregister_netdev(dev);
+	ssb_device_disable(sdev, 0);
+	ssb_bus_may_powerdown(sdev->bus);
+	free_netdev(dev);
+	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
+	ssb_set_drvdata(sdev, NULL);
+}
+
+static int b44_suspend(struct ssb_device *sdev, pm_message_t state)
+{
+	struct net_device *dev = ssb_get_drvdata(sdev);
+	struct b44 *bp = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return 0;
+
+	del_timer_sync(&bp->timer);
+
+	spin_lock_irq(&bp->lock);
+
+	b44_halt(bp);
+	netif_carrier_off(bp->dev);
+	netif_device_detach(bp->dev);
+	b44_free_rings(bp);
+
+	spin_unlock_irq(&bp->lock);
+
+	free_irq(dev->irq, dev);
+	if (bp->flags & B44_FLAG_WOL_ENABLE) {
+		b44_init_hw(bp, B44_PARTIAL_RESET);
+		b44_setup_wol(bp);
+	}
+
+	ssb_pcihost_set_power_state(sdev, PCI_D3hot);
+	return 0;
+}
+
+static int b44_resume(struct ssb_device *sdev)
+{
+	struct net_device *dev = ssb_get_drvdata(sdev);
+	struct b44 *bp = netdev_priv(dev);
+	int rc = 0;
+
+	rc = ssb_bus_powerup(sdev->bus, 0);
+	if (rc) {
+		dev_err(sdev->dev,
+			"Failed to powerup the bus\n");
+		return rc;
+	}
+
+	if (!netif_running(dev))
+		return 0;
+
+	spin_lock_irq(&bp->lock);
+	b44_init_rings(bp);
+	b44_init_hw(bp, B44_FULL_RESET);
+	spin_unlock_irq(&bp->lock);
+
+	/*
+	 * As a shared interrupt, the handler can be called immediately. To be
+	 * able to check the interrupt status the hardware must already be
+	 * powered back on (b44_init_hw).
+	 */
+	rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
+	if (rc) {
+		netdev_err(dev, "request_irq failed\n");
+		spin_lock_irq(&bp->lock);
+		b44_halt(bp);
+		b44_free_rings(bp);
+		spin_unlock_irq(&bp->lock);
+		return rc;
+	}
+
+	netif_device_attach(bp->dev);
+
+	b44_enable_ints(bp);
+	netif_wake_queue(dev);
+
+	mod_timer(&bp->timer, jiffies + 1);
+
+	return 0;
+}
+
+static struct ssb_driver b44_ssb_driver = {
+	.name		= DRV_MODULE_NAME,
+	.id_table	= b44_ssb_tbl,
+	.probe		= b44_init_one,
+	.remove		= __devexit_p(b44_remove_one),
+	.suspend	= b44_suspend,
+	.resume		= b44_resume,
+};
+
+static inline int __init b44_pci_init(void)
+{
+	int err = 0;
+#ifdef CONFIG_B44_PCI
+	err = ssb_pcihost_register(&b44_pci_driver);
+#endif
+	return err;
+}
+
+static inline void __exit b44_pci_exit(void)
+{
+#ifdef CONFIG_B44_PCI
+	ssb_pcihost_unregister(&b44_pci_driver);
+#endif
+}
+
+static int __init b44_init(void)
+{
+	unsigned int dma_desc_align_size = dma_get_cache_alignment();
+	int err;
+
+	/* Setup paramaters for syncing RX/TX DMA descriptors */
+	dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
+
+	err = b44_pci_init();
+	if (err)
+		return err;
+	err = ssb_driver_register(&b44_ssb_driver);
+	if (err)
+		b44_pci_exit();
+	return err;
+}
+
+static void __exit b44_cleanup(void)
+{
+	ssb_driver_unregister(&b44_ssb_driver);
+	b44_pci_exit();
+}
+
+module_init(b44_init);
+module_exit(b44_cleanup);
+
diff --git a/drivers/net/ethernet/broadcom/b44.h b/drivers/net/ethernet/broadcom/b44.h
new file mode 100644
index 0000000..e1905a4
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/b44.h
@@ -0,0 +1,401 @@
+#ifndef _B44_H
+#define _B44_H
+
+/* Register layout. (These correspond to struct _bcmenettregs in bcm4400.) */
+#define	B44_DEVCTRL	0x0000UL /* Device Control */
+#define  DEVCTRL_MPM		0x00000040 /* Magic Packet PME Enable (B0 only) */
+#define  DEVCTRL_PFE		0x00000080 /* Pattern Filtering Enable */
+#define  DEVCTRL_IPP		0x00000400 /* Internal EPHY Present */
+#define  DEVCTRL_EPR		0x00008000 /* EPHY Reset */
+#define  DEVCTRL_PME		0x00001000 /* PHY Mode Enable */
+#define  DEVCTRL_PMCE		0x00002000 /* PHY Mode Clocks Enable */
+#define  DEVCTRL_PADDR		0x0007c000 /* PHY Address */
+#define  DEVCTRL_PADDR_SHIFT	18
+#define B44_BIST_STAT	0x000CUL /* Built-In Self-Test Status */
+#define B44_WKUP_LEN	0x0010UL /* Wakeup Length */
+#define  WKUP_LEN_P0_MASK	0x0000007f /* Pattern 0 */
+#define  WKUP_LEN_D0		0x00000080
+#define  WKUP_LEN_P1_MASK	0x00007f00 /* Pattern 1 */
+#define  WKUP_LEN_P1_SHIFT	8
+#define  WKUP_LEN_D1		0x00008000
+#define  WKUP_LEN_P2_MASK	0x007f0000 /* Pattern 2 */
+#define  WKUP_LEN_P2_SHIFT	16
+#define  WKUP_LEN_D2		0x00000000
+#define  WKUP_LEN_P3_MASK	0x7f000000 /* Pattern 3 */
+#define  WKUP_LEN_P3_SHIFT	24
+#define  WKUP_LEN_D3		0x80000000
+#define  WKUP_LEN_DISABLE	0x80808080
+#define  WKUP_LEN_ENABLE_TWO	0x80800000
+#define  WKUP_LEN_ENABLE_THREE	0x80000000
+#define B44_ISTAT	0x0020UL /* Interrupt Status */
+#define  ISTAT_LS		0x00000020 /* Link Change (B0 only) */
+#define  ISTAT_PME		0x00000040 /* Power Management Event */
+#define  ISTAT_TO		0x00000080 /* General Purpose Timeout */
+#define  ISTAT_DSCE		0x00000400 /* Descriptor Error */
+#define  ISTAT_DATAE		0x00000800 /* Data Error */
+#define  ISTAT_DPE		0x00001000 /* Descr. Protocol Error */
+#define  ISTAT_RDU		0x00002000 /* Receive Descr. Underflow */
+#define  ISTAT_RFO		0x00004000 /* Receive FIFO Overflow */
+#define  ISTAT_TFU		0x00008000 /* Transmit FIFO Underflow */
+#define  ISTAT_RX		0x00010000 /* RX Interrupt */
+#define  ISTAT_TX		0x01000000 /* TX Interrupt */
+#define  ISTAT_EMAC		0x04000000 /* EMAC Interrupt */
+#define  ISTAT_MII_WRITE	0x08000000 /* MII Write Interrupt */
+#define  ISTAT_MII_READ		0x10000000 /* MII Read Interrupt */
+#define  ISTAT_ERRORS (ISTAT_DSCE|ISTAT_DATAE|ISTAT_DPE|ISTAT_RDU|ISTAT_RFO|ISTAT_TFU)
+#define B44_IMASK	0x0024UL /* Interrupt Mask */
+#define  IMASK_DEF		(ISTAT_ERRORS | ISTAT_TO | ISTAT_RX | ISTAT_TX)
+#define B44_GPTIMER	0x0028UL /* General Purpose Timer */
+#define B44_ADDR_LO	0x0088UL /* ENET Address Lo (B0 only) */
+#define B44_ADDR_HI	0x008CUL /* ENET Address Hi (B0 only) */
+#define B44_FILT_ADDR	0x0090UL /* ENET Filter Address */
+#define B44_FILT_DATA	0x0094UL /* ENET Filter Data */
+#define B44_TXBURST	0x00A0UL /* TX Max Burst Length */
+#define B44_RXBURST	0x00A4UL /* RX Max Burst Length */
+#define B44_MAC_CTRL	0x00A8UL /* MAC Control */
+#define  MAC_CTRL_CRC32_ENAB	0x00000001 /* CRC32 Generation Enable */
+#define  MAC_CTRL_PHY_PDOWN	0x00000004 /* Onchip EPHY Powerdown */
+#define  MAC_CTRL_PHY_EDET	0x00000008 /* Onchip EPHY Energy Detected */
+#define  MAC_CTRL_PHY_LEDCTRL	0x000000e0 /* Onchip EPHY LED Control */
+#define  MAC_CTRL_PHY_LEDCTRL_SHIFT 5
+#define B44_MAC_FLOW	0x00ACUL /* MAC Flow Control */
+#define  MAC_FLOW_RX_HI_WATER	0x000000ff /* Receive FIFO HI Water Mark */
+#define  MAC_FLOW_PAUSE_ENAB	0x00008000 /* Enable Pause Frame Generation */
+#define B44_RCV_LAZY	0x0100UL /* Lazy Interrupt Control */
+#define  RCV_LAZY_TO_MASK	0x00ffffff /* Timeout */
+#define  RCV_LAZY_FC_MASK	0xff000000 /* Frame Count */
+#define  RCV_LAZY_FC_SHIFT	24
+#define B44_DMATX_CTRL	0x0200UL /* DMA TX Control */
+#define  DMATX_CTRL_ENABLE	0x00000001 /* Enable */
+#define  DMATX_CTRL_SUSPEND	0x00000002 /* Suepend Request */
+#define  DMATX_CTRL_LPBACK	0x00000004 /* Loopback Enable */
+#define  DMATX_CTRL_FAIRPRIOR	0x00000008 /* Fair Priority */
+#define  DMATX_CTRL_FLUSH	0x00000010 /* Flush Request */
+#define B44_DMATX_ADDR	0x0204UL /* DMA TX Descriptor Ring Address */
+#define B44_DMATX_PTR	0x0208UL /* DMA TX Last Posted Descriptor */
+#define B44_DMATX_STAT	0x020CUL /* DMA TX Current Active Desc. + Status */
+#define  DMATX_STAT_CDMASK	0x00000fff /* Current Descriptor Mask */
+#define  DMATX_STAT_SMASK	0x0000f000 /* State Mask */
+#define  DMATX_STAT_SDISABLED	0x00000000 /* State Disabled */
+#define  DMATX_STAT_SACTIVE	0x00001000 /* State Active */
+#define  DMATX_STAT_SIDLE	0x00002000 /* State Idle Wait */
+#define  DMATX_STAT_SSTOPPED	0x00003000 /* State Stopped */
+#define  DMATX_STAT_SSUSP	0x00004000 /* State Suspend Pending */
+#define  DMATX_STAT_EMASK	0x000f0000 /* Error Mask */
+#define  DMATX_STAT_ENONE	0x00000000 /* Error None */
+#define  DMATX_STAT_EDPE	0x00010000 /* Error Desc. Protocol Error */
+#define  DMATX_STAT_EDFU	0x00020000 /* Error Data FIFO Underrun */
+#define  DMATX_STAT_EBEBR	0x00030000 /* Error Bus Error on Buffer Read */
+#define  DMATX_STAT_EBEDA	0x00040000 /* Error Bus Error on Desc. Access */
+#define  DMATX_STAT_FLUSHED	0x00100000 /* Flushed */
+#define B44_DMARX_CTRL	0x0210UL /* DMA RX Control */
+#define  DMARX_CTRL_ENABLE	0x00000001 /* Enable */
+#define  DMARX_CTRL_ROMASK	0x000000fe /* Receive Offset Mask */
+#define  DMARX_CTRL_ROSHIFT	1 	   /* Receive Offset Shift */
+#define B44_DMARX_ADDR	0x0214UL /* DMA RX Descriptor Ring Address */
+#define B44_DMARX_PTR	0x0218UL /* DMA RX Last Posted Descriptor */
+#define B44_DMARX_STAT	0x021CUL /* DMA RX Current Active Desc. + Status */
+#define  DMARX_STAT_CDMASK	0x00000fff /* Current Descriptor Mask */
+#define  DMARX_STAT_SMASK	0x0000f000 /* State Mask */
+#define  DMARX_STAT_SDISABLED	0x00000000 /* State Disabled */
+#define  DMARX_STAT_SACTIVE	0x00001000 /* State Active */
+#define  DMARX_STAT_SIDLE	0x00002000 /* State Idle Wait */
+#define  DMARX_STAT_SSTOPPED	0x00003000 /* State Stopped */
+#define  DMARX_STAT_EMASK	0x000f0000 /* Error Mask */
+#define  DMARX_STAT_ENONE	0x00000000 /* Error None */
+#define  DMARX_STAT_EDPE	0x00010000 /* Error Desc. Protocol Error */
+#define  DMARX_STAT_EDFO	0x00020000 /* Error Data FIFO Overflow */
+#define  DMARX_STAT_EBEBW	0x00030000 /* Error Bus Error on Buffer Write */
+#define  DMARX_STAT_EBEDA	0x00040000 /* Error Bus Error on Desc. Access */
+#define B44_DMAFIFO_AD	0x0220UL /* DMA FIFO Diag Address */
+#define  DMAFIFO_AD_OMASK	0x0000ffff /* Offset Mask */
+#define  DMAFIFO_AD_SMASK	0x000f0000 /* Select Mask */
+#define  DMAFIFO_AD_SXDD	0x00000000 /* Select Transmit DMA Data */
+#define  DMAFIFO_AD_SXDP	0x00010000 /* Select Transmit DMA Pointers */
+#define  DMAFIFO_AD_SRDD	0x00040000 /* Select Receive DMA Data */
+#define  DMAFIFO_AD_SRDP	0x00050000 /* Select Receive DMA Pointers */
+#define  DMAFIFO_AD_SXFD	0x00080000 /* Select Transmit FIFO Data */
+#define  DMAFIFO_AD_SXFP	0x00090000 /* Select Transmit FIFO Pointers */
+#define  DMAFIFO_AD_SRFD	0x000c0000 /* Select Receive FIFO Data */
+#define  DMAFIFO_AD_SRFP	0x000c0000 /* Select Receive FIFO Pointers */
+#define B44_DMAFIFO_LO	0x0224UL /* DMA FIFO Diag Low Data */
+#define B44_DMAFIFO_HI	0x0228UL /* DMA FIFO Diag High Data */
+#define B44_RXCONFIG	0x0400UL /* EMAC RX Config */
+#define  RXCONFIG_DBCAST	0x00000001 /* Disable Broadcast */
+#define  RXCONFIG_ALLMULTI	0x00000002 /* Accept All Multicast */
+#define  RXCONFIG_NORX_WHILE_TX	0x00000004 /* Receive Disable While Transmitting */
+#define  RXCONFIG_PROMISC	0x00000008 /* Promiscuous Enable */
+#define  RXCONFIG_LPBACK	0x00000010 /* Loopback Enable */
+#define  RXCONFIG_FLOW		0x00000020 /* Flow Control Enable */
+#define  RXCONFIG_FLOW_ACCEPT	0x00000040 /* Accept Unicast Flow Control Frame */
+#define  RXCONFIG_RFILT		0x00000080 /* Reject Filter */
+#define  RXCONFIG_CAM_ABSENT	0x00000100 /* CAM Absent */
+#define B44_RXMAXLEN	0x0404UL /* EMAC RX Max Packet Length */
+#define B44_TXMAXLEN	0x0408UL /* EMAC TX Max Packet Length */
+#define B44_MDIO_CTRL	0x0410UL /* EMAC MDIO Control */
+#define  MDIO_CTRL_MAXF_MASK	0x0000007f /* MDC Frequency */
+#define  MDIO_CTRL_PREAMBLE	0x00000080 /* MII Preamble Enable */
+#define B44_MDIO_DATA	0x0414UL /* EMAC MDIO Data */
+#define  MDIO_DATA_DATA		0x0000ffff /* R/W Data */
+#define  MDIO_DATA_TA_MASK	0x00030000 /* Turnaround Value */
+#define  MDIO_DATA_TA_SHIFT	16
+#define  MDIO_TA_VALID		2
+#define  MDIO_DATA_RA_MASK	0x007c0000 /* Register Address */
+#define  MDIO_DATA_RA_SHIFT	18
+#define  MDIO_DATA_PMD_MASK	0x0f800000 /* Physical Media Device */
+#define  MDIO_DATA_PMD_SHIFT	23
+#define  MDIO_DATA_OP_MASK	0x30000000 /* Opcode */
+#define  MDIO_DATA_OP_SHIFT	28
+#define  MDIO_OP_WRITE		1
+#define  MDIO_OP_READ		2
+#define  MDIO_DATA_SB_MASK	0xc0000000 /* Start Bits */
+#define  MDIO_DATA_SB_SHIFT	30
+#define  MDIO_DATA_SB_START	0x40000000 /* Start Of Frame */
+#define B44_EMAC_IMASK	0x0418UL /* EMAC Interrupt Mask */
+#define B44_EMAC_ISTAT	0x041CUL /* EMAC Interrupt Status */
+#define  EMAC_INT_MII		0x00000001 /* MII MDIO Interrupt */
+#define  EMAC_INT_MIB		0x00000002 /* MIB Interrupt */
+#define  EMAC_INT_FLOW		0x00000003 /* Flow Control Interrupt */
+#define B44_CAM_DATA_LO	0x0420UL /* EMAC CAM Data Low */
+#define B44_CAM_DATA_HI	0x0424UL /* EMAC CAM Data High */
+#define  CAM_DATA_HI_VALID	0x00010000 /* Valid Bit */
+#define B44_CAM_CTRL	0x0428UL /* EMAC CAM Control */
+#define  CAM_CTRL_ENABLE	0x00000001 /* CAM Enable */
+#define  CAM_CTRL_MSEL		0x00000002 /* Mask Select */
+#define  CAM_CTRL_READ		0x00000004 /* Read */
+#define  CAM_CTRL_WRITE		0x00000008 /* Read */
+#define  CAM_CTRL_INDEX_MASK	0x003f0000 /* Index Mask */
+#define  CAM_CTRL_INDEX_SHIFT	16
+#define  CAM_CTRL_BUSY		0x80000000 /* CAM Busy */
+#define B44_ENET_CTRL	0x042CUL /* EMAC ENET Control */
+#define  ENET_CTRL_ENABLE	0x00000001 /* EMAC Enable */
+#define  ENET_CTRL_DISABLE	0x00000002 /* EMAC Disable */
+#define  ENET_CTRL_SRST		0x00000004 /* EMAC Soft Reset */
+#define  ENET_CTRL_EPSEL	0x00000008 /* External PHY Select */
+#define B44_TX_CTRL	0x0430UL /* EMAC TX Control */
+#define  TX_CTRL_DUPLEX		0x00000001 /* Full Duplex */
+#define  TX_CTRL_FMODE		0x00000002 /* Flow Mode */
+#define  TX_CTRL_SBENAB		0x00000004 /* Single Backoff Enable */
+#define  TX_CTRL_SMALL_SLOT	0x00000008 /* Small Slottime */
+#define B44_TX_WMARK	0x0434UL /* EMAC TX Watermark */
+#define B44_MIB_CTRL	0x0438UL /* EMAC MIB Control */
+#define  MIB_CTRL_CLR_ON_READ	0x00000001 /* Autoclear on Read */
+#define B44_TX_GOOD_O	0x0500UL /* MIB TX Good Octets */
+#define B44_TX_GOOD_P	0x0504UL /* MIB TX Good Packets */
+#define B44_TX_O	0x0508UL /* MIB TX Octets */
+#define B44_TX_P	0x050CUL /* MIB TX Packets */
+#define B44_TX_BCAST	0x0510UL /* MIB TX Broadcast Packets */
+#define B44_TX_MCAST	0x0514UL /* MIB TX Multicast Packets */
+#define B44_TX_64	0x0518UL /* MIB TX <= 64 byte Packets */
+#define B44_TX_65_127	0x051CUL /* MIB TX 65 to 127 byte Packets */
+#define B44_TX_128_255	0x0520UL /* MIB TX 128 to 255 byte Packets */
+#define B44_TX_256_511	0x0524UL /* MIB TX 256 to 511 byte Packets */
+#define B44_TX_512_1023	0x0528UL /* MIB TX 512 to 1023 byte Packets */
+#define B44_TX_1024_MAX	0x052CUL /* MIB TX 1024 to max byte Packets */
+#define B44_TX_JABBER	0x0530UL /* MIB TX Jabber Packets */
+#define B44_TX_OSIZE	0x0534UL /* MIB TX Oversize Packets */
+#define B44_TX_FRAG	0x0538UL /* MIB TX Fragment Packets */
+#define B44_TX_URUNS	0x053CUL /* MIB TX Underruns */
+#define B44_TX_TCOLS	0x0540UL /* MIB TX Total Collisions */
+#define B44_TX_SCOLS	0x0544UL /* MIB TX Single Collisions */
+#define B44_TX_MCOLS	0x0548UL /* MIB TX Multiple Collisions */
+#define B44_TX_ECOLS	0x054CUL /* MIB TX Excessive Collisions */
+#define B44_TX_LCOLS	0x0550UL /* MIB TX Late Collisions */
+#define B44_TX_DEFERED	0x0554UL /* MIB TX Defered Packets */
+#define B44_TX_CLOST	0x0558UL /* MIB TX Carrier Lost */
+#define B44_TX_PAUSE	0x055CUL /* MIB TX Pause Packets */
+#define B44_RX_GOOD_O	0x0580UL /* MIB RX Good Octets */
+#define B44_RX_GOOD_P	0x0584UL /* MIB RX Good Packets */
+#define B44_RX_O	0x0588UL /* MIB RX Octets */
+#define B44_RX_P	0x058CUL /* MIB RX Packets */
+#define B44_RX_BCAST	0x0590UL /* MIB RX Broadcast Packets */
+#define B44_RX_MCAST	0x0594UL /* MIB RX Multicast Packets */
+#define B44_RX_64	0x0598UL /* MIB RX <= 64 byte Packets */
+#define B44_RX_65_127	0x059CUL /* MIB RX 65 to 127 byte Packets */
+#define B44_RX_128_255	0x05A0UL /* MIB RX 128 to 255 byte Packets */
+#define B44_RX_256_511	0x05A4UL /* MIB RX 256 to 511 byte Packets */
+#define B44_RX_512_1023	0x05A8UL /* MIB RX 512 to 1023 byte Packets */
+#define B44_RX_1024_MAX	0x05ACUL /* MIB RX 1024 to max byte Packets */
+#define B44_RX_JABBER	0x05B0UL /* MIB RX Jabber Packets */
+#define B44_RX_OSIZE	0x05B4UL /* MIB RX Oversize Packets */
+#define B44_RX_FRAG	0x05B8UL /* MIB RX Fragment Packets */
+#define B44_RX_MISS	0x05BCUL /* MIB RX Missed Packets */
+#define B44_RX_CRCA	0x05C0UL /* MIB RX CRC Align Errors */
+#define B44_RX_USIZE	0x05C4UL /* MIB RX Undersize Packets */
+#define B44_RX_CRC	0x05C8UL /* MIB RX CRC Errors */
+#define B44_RX_ALIGN	0x05CCUL /* MIB RX Align Errors */
+#define B44_RX_SYM	0x05D0UL /* MIB RX Symbol Errors */
+#define B44_RX_PAUSE	0x05D4UL /* MIB RX Pause Packets */
+#define B44_RX_NPAUSE	0x05D8UL /* MIB RX Non-Pause Packets */
+
+/* 4400 PHY registers */
+#define B44_MII_AUXCTRL		24	/* Auxiliary Control */
+#define  MII_AUXCTRL_DUPLEX	0x0001  /* Full Duplex */
+#define  MII_AUXCTRL_SPEED	0x0002  /* 1=100Mbps, 0=10Mbps */
+#define  MII_AUXCTRL_FORCED	0x0004	/* Forced 10/100 */
+#define B44_MII_ALEDCTRL	26	/* Activity LED */
+#define  MII_ALEDCTRL_ALLMSK	0x7fff
+#define B44_MII_TLEDCTRL	27	/* Traffic Meter LED */
+#define  MII_TLEDCTRL_ENABLE	0x0040
+
+struct dma_desc {
+	__le32	ctrl;
+	__le32	addr;
+};
+
+/* There are only 12 bits in the DMA engine for descriptor offsetting
+ * so the table must be aligned on a boundary of this.
+ */
+#define DMA_TABLE_BYTES		4096
+
+#define DESC_CTRL_LEN	0x00001fff
+#define DESC_CTRL_CMASK	0x0ff00000 /* Core specific bits */
+#define DESC_CTRL_EOT	0x10000000 /* End of Table */
+#define DESC_CTRL_IOC	0x20000000 /* Interrupt On Completion */
+#define DESC_CTRL_EOF	0x40000000 /* End of Frame */
+#define DESC_CTRL_SOF	0x80000000 /* Start of Frame */
+
+#define RX_COPY_THRESHOLD  	256
+
+struct rx_header {
+	__le16	len;
+	__le16	flags;
+	__le16	pad[12];
+};
+#define RX_HEADER_LEN	28
+
+#define RX_FLAG_OFIFO	0x00000001 /* FIFO Overflow */
+#define RX_FLAG_CRCERR	0x00000002 /* CRC Error */
+#define RX_FLAG_SERR	0x00000004 /* Receive Symbol Error */
+#define RX_FLAG_ODD	0x00000008 /* Frame has odd number of nibbles */
+#define RX_FLAG_LARGE	0x00000010 /* Frame is > RX MAX Length */
+#define RX_FLAG_MCAST	0x00000020 /* Dest is Multicast Address */
+#define RX_FLAG_BCAST	0x00000040 /* Dest is Broadcast Address */
+#define RX_FLAG_MISS	0x00000080 /* Received due to promisc mode */
+#define RX_FLAG_LAST	0x00000800 /* Last buffer in frame */
+#define RX_FLAG_ERRORS	(RX_FLAG_ODD | RX_FLAG_SERR | RX_FLAG_CRCERR | RX_FLAG_OFIFO)
+
+struct ring_info {
+	struct sk_buff		*skb;
+	dma_addr_t	mapping;
+};
+
+#define B44_MCAST_TABLE_SIZE	32
+#define B44_PHY_ADDR_NO_PHY	30
+#define B44_MDC_RATIO		5000000
+
+#define	B44_STAT_REG_DECLARE		\
+	_B44(tx_good_octets)		\
+	_B44(tx_good_pkts)		\
+	_B44(tx_octets)			\
+	_B44(tx_pkts)			\
+	_B44(tx_broadcast_pkts)		\
+	_B44(tx_multicast_pkts)		\
+	_B44(tx_len_64)			\
+	_B44(tx_len_65_to_127)		\
+	_B44(tx_len_128_to_255)		\
+	_B44(tx_len_256_to_511)		\
+	_B44(tx_len_512_to_1023)	\
+	_B44(tx_len_1024_to_max)	\
+	_B44(tx_jabber_pkts)		\
+	_B44(tx_oversize_pkts)		\
+	_B44(tx_fragment_pkts)		\
+	_B44(tx_underruns)		\
+	_B44(tx_total_cols)		\
+	_B44(tx_single_cols)		\
+	_B44(tx_multiple_cols)		\
+	_B44(tx_excessive_cols)		\
+	_B44(tx_late_cols)		\
+	_B44(tx_defered)		\
+	_B44(tx_carrier_lost)		\
+	_B44(tx_pause_pkts)		\
+	_B44(rx_good_octets)		\
+	_B44(rx_good_pkts)		\
+	_B44(rx_octets)			\
+	_B44(rx_pkts)			\
+	_B44(rx_broadcast_pkts)		\
+	_B44(rx_multicast_pkts)		\
+	_B44(rx_len_64)			\
+	_B44(rx_len_65_to_127)		\
+	_B44(rx_len_128_to_255)		\
+	_B44(rx_len_256_to_511)		\
+	_B44(rx_len_512_to_1023)	\
+	_B44(rx_len_1024_to_max)	\
+	_B44(rx_jabber_pkts)		\
+	_B44(rx_oversize_pkts)		\
+	_B44(rx_fragment_pkts)		\
+	_B44(rx_missed_pkts)		\
+	_B44(rx_crc_align_errs)		\
+	_B44(rx_undersize)		\
+	_B44(rx_crc_errs)		\
+	_B44(rx_align_errs)		\
+	_B44(rx_symbol_errs)		\
+	_B44(rx_pause_pkts)		\
+	_B44(rx_nonpause_pkts)
+
+/* SW copy of device statistics, kept up to date by periodic timer
+ * which probes HW values. Check b44_stats_update if you mess with
+ * the layout
+ */
+struct b44_hw_stats {
+#define _B44(x)	u32 x;
+B44_STAT_REG_DECLARE
+#undef _B44
+};
+
+struct ssb_device;
+
+struct b44 {
+	spinlock_t		lock;
+
+	u32			imask, istat;
+
+	struct dma_desc		*rx_ring, *tx_ring;
+
+	u32			tx_prod, tx_cons;
+	u32			rx_prod, rx_cons;
+
+	struct ring_info	*rx_buffers;
+	struct ring_info	*tx_buffers;
+
+	struct napi_struct	napi;
+
+	u32			dma_offset;
+	u32			flags;
+#define B44_FLAG_B0_ANDLATER	0x00000001
+#define B44_FLAG_BUGGY_TXPTR	0x00000002
+#define B44_FLAG_REORDER_BUG	0x00000004
+#define B44_FLAG_PAUSE_AUTO	0x00008000
+#define B44_FLAG_FULL_DUPLEX	0x00010000
+#define B44_FLAG_100_BASE_T	0x00020000
+#define B44_FLAG_TX_PAUSE	0x00040000
+#define B44_FLAG_RX_PAUSE	0x00080000
+#define B44_FLAG_FORCE_LINK	0x00100000
+#define B44_FLAG_ADV_10HALF	0x01000000
+#define B44_FLAG_ADV_10FULL	0x02000000
+#define B44_FLAG_ADV_100HALF	0x04000000
+#define B44_FLAG_ADV_100FULL	0x08000000
+#define B44_FLAG_INTERNAL_PHY	0x10000000
+#define B44_FLAG_RX_RING_HACK	0x20000000
+#define B44_FLAG_TX_RING_HACK	0x40000000
+#define B44_FLAG_WOL_ENABLE	0x80000000
+
+	u32			msg_enable;
+
+	struct timer_list	timer;
+
+	struct b44_hw_stats	hw_stats;
+
+	struct ssb_device	*sdev;
+	struct net_device	*dev;
+
+	dma_addr_t		rx_ring_dma, tx_ring_dma;
+
+	u32			rx_pending;
+	u32			tx_pending;
+	u8			phy_addr;
+	u8			force_copybreak;
+	struct mii_if_info	mii_if;
+};
+
+#endif /* _B44_H */
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
new file mode 100644
index 0000000..1d9b985
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -0,0 +1,1966 @@
+/*
+ * Driver for BCM963xx builtin Ethernet mac
+ *
+ * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/if_vlan.h>
+
+#include <bcm63xx_dev_enet.h>
+#include "bcm63xx_enet.h"
+
+static char bcm_enet_driver_name[] = "bcm63xx_enet";
+static char bcm_enet_driver_version[] = "1.0";
+
+static int copybreak __read_mostly = 128;
+module_param(copybreak, int, 0);
+MODULE_PARM_DESC(copybreak, "Receive copy threshold");
+
+/* io memory shared between all devices */
+static void __iomem *bcm_enet_shared_base;
+
+/*
+ * io helpers to access mac registers
+ */
+static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off)
+{
+	return bcm_readl(priv->base + off);
+}
+
+static inline void enet_writel(struct bcm_enet_priv *priv,
+			       u32 val, u32 off)
+{
+	bcm_writel(val, priv->base + off);
+}
+
+/*
+ * io helpers to access shared registers
+ */
+static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off)
+{
+	return bcm_readl(bcm_enet_shared_base + off);
+}
+
+static inline void enet_dma_writel(struct bcm_enet_priv *priv,
+				       u32 val, u32 off)
+{
+	bcm_writel(val, bcm_enet_shared_base + off);
+}
+
+/*
+ * write given data into mii register and wait for transfer to end
+ * with timeout (average measured transfer time is 25us)
+ */
+static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
+{
+	int limit;
+
+	/* make sure mii interrupt status is cleared */
+	enet_writel(priv, ENET_IR_MII, ENET_IR_REG);
+
+	enet_writel(priv, data, ENET_MIIDATA_REG);
+	wmb();
+
+	/* busy wait on mii interrupt bit, with timeout */
+	limit = 1000;
+	do {
+		if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
+			break;
+		udelay(1);
+	} while (limit-- > 0);
+
+	return (limit < 0) ? 1 : 0;
+}
+
+/*
+ * MII internal read callback
+ */
+static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id,
+			      int regnum)
+{
+	u32 tmp, val;
+
+	tmp = regnum << ENET_MIIDATA_REG_SHIFT;
+	tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
+	tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
+	tmp |= ENET_MIIDATA_OP_READ_MASK;
+
+	if (do_mdio_op(priv, tmp))
+		return -1;
+
+	val = enet_readl(priv, ENET_MIIDATA_REG);
+	val &= 0xffff;
+	return val;
+}
+
+/*
+ * MII internal write callback
+ */
+static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id,
+			       int regnum, u16 value)
+{
+	u32 tmp;
+
+	tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT;
+	tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT;
+	tmp |= regnum << ENET_MIIDATA_REG_SHIFT;
+	tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT;
+	tmp |= ENET_MIIDATA_OP_WRITE_MASK;
+
+	(void)do_mdio_op(priv, tmp);
+	return 0;
+}
+
+/*
+ * MII read callback from phylib
+ */
+static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id,
+				     int regnum)
+{
+	return bcm_enet_mdio_read(bus->priv, mii_id, regnum);
+}
+
+/*
+ * MII write callback from phylib
+ */
+static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id,
+				      int regnum, u16 value)
+{
+	return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value);
+}
+
+/*
+ * MII read callback from mii core
+ */
+static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id,
+				  int regnum)
+{
+	return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum);
+}
+
+/*
+ * MII write callback from mii core
+ */
+static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id,
+				    int regnum, int value)
+{
+	bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value);
+}
+
+/*
+ * refill rx queue
+ */
+static int bcm_enet_refill_rx(struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+
+	while (priv->rx_desc_count < priv->rx_ring_size) {
+		struct bcm_enet_desc *desc;
+		struct sk_buff *skb;
+		dma_addr_t p;
+		int desc_idx;
+		u32 len_stat;
+
+		desc_idx = priv->rx_dirty_desc;
+		desc = &priv->rx_desc_cpu[desc_idx];
+
+		if (!priv->rx_skb[desc_idx]) {
+			skb = netdev_alloc_skb(dev, priv->rx_skb_size);
+			if (!skb)
+				break;
+			priv->rx_skb[desc_idx] = skb;
+
+			p = dma_map_single(&priv->pdev->dev, skb->data,
+					   priv->rx_skb_size,
+					   DMA_FROM_DEVICE);
+			desc->address = p;
+		}
+
+		len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT;
+		len_stat |= DMADESC_OWNER_MASK;
+		if (priv->rx_dirty_desc == priv->rx_ring_size - 1) {
+			len_stat |= DMADESC_WRAP_MASK;
+			priv->rx_dirty_desc = 0;
+		} else {
+			priv->rx_dirty_desc++;
+		}
+		wmb();
+		desc->len_stat = len_stat;
+
+		priv->rx_desc_count++;
+
+		/* tell dma engine we allocated one buffer */
+		enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan));
+	}
+
+	/* If rx ring is still empty, set a timer to try allocating
+	 * again at a later time. */
+	if (priv->rx_desc_count == 0 && netif_running(dev)) {
+		dev_warn(&priv->pdev->dev, "unable to refill rx ring\n");
+		priv->rx_timeout.expires = jiffies + HZ;
+		add_timer(&priv->rx_timeout);
+	}
+
+	return 0;
+}
+
+/*
+ * timer callback to defer refill rx queue in case we're OOM
+ */
+static void bcm_enet_refill_rx_timer(unsigned long data)
+{
+	struct net_device *dev;
+	struct bcm_enet_priv *priv;
+
+	dev = (struct net_device *)data;
+	priv = netdev_priv(dev);
+
+	spin_lock(&priv->rx_lock);
+	bcm_enet_refill_rx((struct net_device *)data);
+	spin_unlock(&priv->rx_lock);
+}
+
+/*
+ * extract packet from rx queue
+ */
+static int bcm_enet_receive_queue(struct net_device *dev, int budget)
+{
+	struct bcm_enet_priv *priv;
+	struct device *kdev;
+	int processed;
+
+	priv = netdev_priv(dev);
+	kdev = &priv->pdev->dev;
+	processed = 0;
+
+	/* don't scan ring further than number of refilled
+	 * descriptor */
+	if (budget > priv->rx_desc_count)
+		budget = priv->rx_desc_count;
+
+	do {
+		struct bcm_enet_desc *desc;
+		struct sk_buff *skb;
+		int desc_idx;
+		u32 len_stat;
+		unsigned int len;
+
+		desc_idx = priv->rx_curr_desc;
+		desc = &priv->rx_desc_cpu[desc_idx];
+
+		/* make sure we actually read the descriptor status at
+		 * each loop */
+		rmb();
+
+		len_stat = desc->len_stat;
+
+		/* break if dma ownership belongs to hw */
+		if (len_stat & DMADESC_OWNER_MASK)
+			break;
+
+		processed++;
+		priv->rx_curr_desc++;
+		if (priv->rx_curr_desc == priv->rx_ring_size)
+			priv->rx_curr_desc = 0;
+		priv->rx_desc_count--;
+
+		/* if the packet does not have start of packet _and_
+		 * end of packet flag set, then just recycle it */
+		if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) {
+			dev->stats.rx_dropped++;
+			continue;
+		}
+
+		/* recycle packet if it's marked as bad */
+		if (unlikely(len_stat & DMADESC_ERR_MASK)) {
+			dev->stats.rx_errors++;
+
+			if (len_stat & DMADESC_OVSIZE_MASK)
+				dev->stats.rx_length_errors++;
+			if (len_stat & DMADESC_CRC_MASK)
+				dev->stats.rx_crc_errors++;
+			if (len_stat & DMADESC_UNDER_MASK)
+				dev->stats.rx_frame_errors++;
+			if (len_stat & DMADESC_OV_MASK)
+				dev->stats.rx_fifo_errors++;
+			continue;
+		}
+
+		/* valid packet */
+		skb = priv->rx_skb[desc_idx];
+		len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT;
+		/* don't include FCS */
+		len -= 4;
+
+		if (len < copybreak) {
+			struct sk_buff *nskb;
+
+			nskb = netdev_alloc_skb_ip_align(dev, len);
+			if (!nskb) {
+				/* forget packet, just rearm desc */
+				dev->stats.rx_dropped++;
+				continue;
+			}
+
+			dma_sync_single_for_cpu(kdev, desc->address,
+						len, DMA_FROM_DEVICE);
+			memcpy(nskb->data, skb->data, len);
+			dma_sync_single_for_device(kdev, desc->address,
+						   len, DMA_FROM_DEVICE);
+			skb = nskb;
+		} else {
+			dma_unmap_single(&priv->pdev->dev, desc->address,
+					 priv->rx_skb_size, DMA_FROM_DEVICE);
+			priv->rx_skb[desc_idx] = NULL;
+		}
+
+		skb_put(skb, len);
+		skb->protocol = eth_type_trans(skb, dev);
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += len;
+		netif_receive_skb(skb);
+
+	} while (--budget > 0);
+
+	if (processed || !priv->rx_desc_count) {
+		bcm_enet_refill_rx(dev);
+
+		/* kick rx dma */
+		enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
+				ENETDMA_CHANCFG_REG(priv->rx_chan));
+	}
+
+	return processed;
+}
+
+
+/*
+ * try to or force reclaim of transmitted buffers
+ */
+static int bcm_enet_tx_reclaim(struct net_device *dev, int force)
+{
+	struct bcm_enet_priv *priv;
+	int released;
+
+	priv = netdev_priv(dev);
+	released = 0;
+
+	while (priv->tx_desc_count < priv->tx_ring_size) {
+		struct bcm_enet_desc *desc;
+		struct sk_buff *skb;
+
+		/* We run in a bh and fight against start_xmit, which
+		 * is called with bh disabled  */
+		spin_lock(&priv->tx_lock);
+
+		desc = &priv->tx_desc_cpu[priv->tx_dirty_desc];
+
+		if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) {
+			spin_unlock(&priv->tx_lock);
+			break;
+		}
+
+		/* ensure other field of the descriptor were not read
+		 * before we checked ownership */
+		rmb();
+
+		skb = priv->tx_skb[priv->tx_dirty_desc];
+		priv->tx_skb[priv->tx_dirty_desc] = NULL;
+		dma_unmap_single(&priv->pdev->dev, desc->address, skb->len,
+				 DMA_TO_DEVICE);
+
+		priv->tx_dirty_desc++;
+		if (priv->tx_dirty_desc == priv->tx_ring_size)
+			priv->tx_dirty_desc = 0;
+		priv->tx_desc_count++;
+
+		spin_unlock(&priv->tx_lock);
+
+		if (desc->len_stat & DMADESC_UNDER_MASK)
+			dev->stats.tx_errors++;
+
+		dev_kfree_skb(skb);
+		released++;
+	}
+
+	if (netif_queue_stopped(dev) && released)
+		netif_wake_queue(dev);
+
+	return released;
+}
+
+/*
+ * poll func, called by network core
+ */
+static int bcm_enet_poll(struct napi_struct *napi, int budget)
+{
+	struct bcm_enet_priv *priv;
+	struct net_device *dev;
+	int tx_work_done, rx_work_done;
+
+	priv = container_of(napi, struct bcm_enet_priv, napi);
+	dev = priv->net_dev;
+
+	/* ack interrupts */
+	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+			ENETDMA_IR_REG(priv->rx_chan));
+	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+			ENETDMA_IR_REG(priv->tx_chan));
+
+	/* reclaim sent skb */
+	tx_work_done = bcm_enet_tx_reclaim(dev, 0);
+
+	spin_lock(&priv->rx_lock);
+	rx_work_done = bcm_enet_receive_queue(dev, budget);
+	spin_unlock(&priv->rx_lock);
+
+	if (rx_work_done >= budget || tx_work_done > 0) {
+		/* rx/tx queue is not yet empty/clean */
+		return rx_work_done;
+	}
+
+	/* no more packet in rx/tx queue, remove device from poll
+	 * queue */
+	napi_complete(napi);
+
+	/* restore rx/tx interrupt */
+	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+			ENETDMA_IRMASK_REG(priv->rx_chan));
+	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+			ENETDMA_IRMASK_REG(priv->tx_chan));
+
+	return rx_work_done;
+}
+
+/*
+ * mac interrupt handler
+ */
+static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
+{
+	struct net_device *dev;
+	struct bcm_enet_priv *priv;
+	u32 stat;
+
+	dev = dev_id;
+	priv = netdev_priv(dev);
+
+	stat = enet_readl(priv, ENET_IR_REG);
+	if (!(stat & ENET_IR_MIB))
+		return IRQ_NONE;
+
+	/* clear & mask interrupt */
+	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
+	enet_writel(priv, 0, ENET_IRMASK_REG);
+
+	/* read mib registers in workqueue */
+	schedule_work(&priv->mib_update_task);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * rx/tx dma interrupt handler
+ */
+static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id)
+{
+	struct net_device *dev;
+	struct bcm_enet_priv *priv;
+
+	dev = dev_id;
+	priv = netdev_priv(dev);
+
+	/* mask rx/tx interrupts */
+	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
+	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
+
+	napi_schedule(&priv->napi);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * tx request callback
+ */
+static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+	struct bcm_enet_desc *desc;
+	u32 len_stat;
+	int ret;
+
+	priv = netdev_priv(dev);
+
+	/* lock against tx reclaim */
+	spin_lock(&priv->tx_lock);
+
+	/* make sure  the tx hw queue  is not full,  should not happen
+	 * since we stop queue before it's the case */
+	if (unlikely(!priv->tx_desc_count)) {
+		netif_stop_queue(dev);
+		dev_err(&priv->pdev->dev, "xmit called with no tx desc "
+			"available?\n");
+		ret = NETDEV_TX_BUSY;
+		goto out_unlock;
+	}
+
+	/* point to the next available desc */
+	desc = &priv->tx_desc_cpu[priv->tx_curr_desc];
+	priv->tx_skb[priv->tx_curr_desc] = skb;
+
+	/* fill descriptor */
+	desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
+				       DMA_TO_DEVICE);
+
+	len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK;
+	len_stat |= DMADESC_ESOP_MASK |
+		DMADESC_APPEND_CRC |
+		DMADESC_OWNER_MASK;
+
+	priv->tx_curr_desc++;
+	if (priv->tx_curr_desc == priv->tx_ring_size) {
+		priv->tx_curr_desc = 0;
+		len_stat |= DMADESC_WRAP_MASK;
+	}
+	priv->tx_desc_count--;
+
+	/* dma might be already polling, make sure we update desc
+	 * fields in correct order */
+	wmb();
+	desc->len_stat = len_stat;
+	wmb();
+
+	/* kick tx dma */
+	enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
+			ENETDMA_CHANCFG_REG(priv->tx_chan));
+
+	/* stop queue if no more desc available */
+	if (!priv->tx_desc_count)
+		netif_stop_queue(dev);
+
+	dev->stats.tx_bytes += skb->len;
+	dev->stats.tx_packets++;
+	ret = NETDEV_TX_OK;
+
+out_unlock:
+	spin_unlock(&priv->tx_lock);
+	return ret;
+}
+
+/*
+ * Change the interface's mac address.
+ */
+static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
+{
+	struct bcm_enet_priv *priv;
+	struct sockaddr *addr = p;
+	u32 val;
+
+	priv = netdev_priv(dev);
+	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+
+	/* use perfect match register 0 to store my mac address */
+	val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
+		(dev->dev_addr[4] << 8) | dev->dev_addr[5];
+	enet_writel(priv, val, ENET_PML_REG(0));
+
+	val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
+	val |= ENET_PMH_DATAVALID_MASK;
+	enet_writel(priv, val, ENET_PMH_REG(0));
+
+	return 0;
+}
+
+/*
+ * Change rx mode (promiscuous/allmulti) and update multicast list
+ */
+static void bcm_enet_set_multicast_list(struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+	struct netdev_hw_addr *ha;
+	u32 val;
+	int i;
+
+	priv = netdev_priv(dev);
+
+	val = enet_readl(priv, ENET_RXCFG_REG);
+
+	if (dev->flags & IFF_PROMISC)
+		val |= ENET_RXCFG_PROMISC_MASK;
+	else
+		val &= ~ENET_RXCFG_PROMISC_MASK;
+
+	/* only 3 perfect match registers left, first one is used for
+	 * own mac address */
+	if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
+		val |= ENET_RXCFG_ALLMCAST_MASK;
+	else
+		val &= ~ENET_RXCFG_ALLMCAST_MASK;
+
+	/* no need to set perfect match registers if we catch all
+	 * multicast */
+	if (val & ENET_RXCFG_ALLMCAST_MASK) {
+		enet_writel(priv, val, ENET_RXCFG_REG);
+		return;
+	}
+
+	i = 0;
+	netdev_for_each_mc_addr(ha, dev) {
+		u8 *dmi_addr;
+		u32 tmp;
+
+		if (i == 3)
+			break;
+		/* update perfect match registers */
+		dmi_addr = ha->addr;
+		tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
+			(dmi_addr[4] << 8) | dmi_addr[5];
+		enet_writel(priv, tmp, ENET_PML_REG(i + 1));
+
+		tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
+		tmp |= ENET_PMH_DATAVALID_MASK;
+		enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
+	}
+
+	for (; i < 3; i++) {
+		enet_writel(priv, 0, ENET_PML_REG(i + 1));
+		enet_writel(priv, 0, ENET_PMH_REG(i + 1));
+	}
+
+	enet_writel(priv, val, ENET_RXCFG_REG);
+}
+
+/*
+ * set mac duplex parameters
+ */
+static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
+{
+	u32 val;
+
+	val = enet_readl(priv, ENET_TXCTL_REG);
+	if (fullduplex)
+		val |= ENET_TXCTL_FD_MASK;
+	else
+		val &= ~ENET_TXCTL_FD_MASK;
+	enet_writel(priv, val, ENET_TXCTL_REG);
+}
+
+/*
+ * set mac flow control parameters
+ */
+static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
+{
+	u32 val;
+
+	/* rx flow control (pause frame handling) */
+	val = enet_readl(priv, ENET_RXCFG_REG);
+	if (rx_en)
+		val |= ENET_RXCFG_ENFLOW_MASK;
+	else
+		val &= ~ENET_RXCFG_ENFLOW_MASK;
+	enet_writel(priv, val, ENET_RXCFG_REG);
+
+	/* tx flow control (pause frame generation) */
+	val = enet_dma_readl(priv, ENETDMA_CFG_REG);
+	if (tx_en)
+		val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
+	else
+		val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
+	enet_dma_writel(priv, val, ENETDMA_CFG_REG);
+}
+
+/*
+ * link changed callback (from phylib)
+ */
+static void bcm_enet_adjust_phy_link(struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+	struct phy_device *phydev;
+	int status_changed;
+
+	priv = netdev_priv(dev);
+	phydev = priv->phydev;
+	status_changed = 0;
+
+	if (priv->old_link != phydev->link) {
+		status_changed = 1;
+		priv->old_link = phydev->link;
+	}
+
+	/* reflect duplex change in mac configuration */
+	if (phydev->link && phydev->duplex != priv->old_duplex) {
+		bcm_enet_set_duplex(priv,
+				    (phydev->duplex == DUPLEX_FULL) ? 1 : 0);
+		status_changed = 1;
+		priv->old_duplex = phydev->duplex;
+	}
+
+	/* enable flow control if remote advertise it (trust phylib to
+	 * check that duplex is full */
+	if (phydev->link && phydev->pause != priv->old_pause) {
+		int rx_pause_en, tx_pause_en;
+
+		if (phydev->pause) {
+			/* pause was advertised by lpa and us */
+			rx_pause_en = 1;
+			tx_pause_en = 1;
+		} else if (!priv->pause_auto) {
+			/* pause setting overrided by user */
+			rx_pause_en = priv->pause_rx;
+			tx_pause_en = priv->pause_tx;
+		} else {
+			rx_pause_en = 0;
+			tx_pause_en = 0;
+		}
+
+		bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en);
+		status_changed = 1;
+		priv->old_pause = phydev->pause;
+	}
+
+	if (status_changed) {
+		pr_info("%s: link %s", dev->name, phydev->link ?
+			"UP" : "DOWN");
+		if (phydev->link)
+			pr_cont(" - %d/%s - flow control %s", phydev->speed,
+			       DUPLEX_FULL == phydev->duplex ? "full" : "half",
+			       phydev->pause == 1 ? "rx&tx" : "off");
+
+		pr_cont("\n");
+	}
+}
+
+/*
+ * link changed callback (if phylib is not used)
+ */
+static void bcm_enet_adjust_link(struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+	bcm_enet_set_duplex(priv, priv->force_duplex_full);
+	bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx);
+	netif_carrier_on(dev);
+
+	pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n",
+		dev->name,
+		priv->force_speed_100 ? 100 : 10,
+		priv->force_duplex_full ? "full" : "half",
+		priv->pause_rx ? "rx" : "off",
+		priv->pause_tx ? "tx" : "off");
+}
+
+/*
+ * open callback, allocate dma rings & buffers and start rx operation
+ */
+static int bcm_enet_open(struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+	struct sockaddr addr;
+	struct device *kdev;
+	struct phy_device *phydev;
+	int i, ret;
+	unsigned int size;
+	char phy_id[MII_BUS_ID_SIZE + 3];
+	void *p;
+	u32 val;
+
+	priv = netdev_priv(dev);
+	kdev = &priv->pdev->dev;
+
+	if (priv->has_phy) {
+		/* connect to PHY */
+		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
+			 priv->mac_id ? "1" : "0", priv->phy_id);
+
+		phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0,
+				     PHY_INTERFACE_MODE_MII);
+
+		if (IS_ERR(phydev)) {
+			dev_err(kdev, "could not attach to PHY\n");
+			return PTR_ERR(phydev);
+		}
+
+		/* mask with MAC supported features */
+		phydev->supported &= (SUPPORTED_10baseT_Half |
+				      SUPPORTED_10baseT_Full |
+				      SUPPORTED_100baseT_Half |
+				      SUPPORTED_100baseT_Full |
+				      SUPPORTED_Autoneg |
+				      SUPPORTED_Pause |
+				      SUPPORTED_MII);
+		phydev->advertising = phydev->supported;
+
+		if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
+			phydev->advertising |= SUPPORTED_Pause;
+		else
+			phydev->advertising &= ~SUPPORTED_Pause;
+
+		dev_info(kdev, "attached PHY at address %d [%s]\n",
+			 phydev->addr, phydev->drv->name);
+
+		priv->old_link = 0;
+		priv->old_duplex = -1;
+		priv->old_pause = -1;
+		priv->phydev = phydev;
+	}
+
+	/* mask all interrupts and request them */
+	enet_writel(priv, 0, ENET_IRMASK_REG);
+	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
+	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
+
+	ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
+	if (ret)
+		goto out_phy_disconnect;
+
+	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_DISABLED,
+			  dev->name, dev);
+	if (ret)
+		goto out_freeirq;
+
+	ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
+			  IRQF_DISABLED, dev->name, dev);
+	if (ret)
+		goto out_freeirq_rx;
+
+	/* initialize perfect match registers */
+	for (i = 0; i < 4; i++) {
+		enet_writel(priv, 0, ENET_PML_REG(i));
+		enet_writel(priv, 0, ENET_PMH_REG(i));
+	}
+
+	/* write device mac address */
+	memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
+	bcm_enet_set_mac_address(dev, &addr);
+
+	/* allocate rx dma ring */
+	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
+	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
+	if (!p) {
+		dev_err(kdev, "cannot allocate rx ring %u\n", size);
+		ret = -ENOMEM;
+		goto out_freeirq_tx;
+	}
+
+	memset(p, 0, size);
+	priv->rx_desc_alloc_size = size;
+	priv->rx_desc_cpu = p;
+
+	/* allocate tx dma ring */
+	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
+	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
+	if (!p) {
+		dev_err(kdev, "cannot allocate tx ring\n");
+		ret = -ENOMEM;
+		goto out_free_rx_ring;
+	}
+
+	memset(p, 0, size);
+	priv->tx_desc_alloc_size = size;
+	priv->tx_desc_cpu = p;
+
+	priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
+			       GFP_KERNEL);
+	if (!priv->tx_skb) {
+		dev_err(kdev, "cannot allocate rx skb queue\n");
+		ret = -ENOMEM;
+		goto out_free_tx_ring;
+	}
+
+	priv->tx_desc_count = priv->tx_ring_size;
+	priv->tx_dirty_desc = 0;
+	priv->tx_curr_desc = 0;
+	spin_lock_init(&priv->tx_lock);
+
+	/* init & fill rx ring with skbs */
+	priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
+			       GFP_KERNEL);
+	if (!priv->rx_skb) {
+		dev_err(kdev, "cannot allocate rx skb queue\n");
+		ret = -ENOMEM;
+		goto out_free_tx_skb;
+	}
+
+	priv->rx_desc_count = 0;
+	priv->rx_dirty_desc = 0;
+	priv->rx_curr_desc = 0;
+
+	/* initialize flow control buffer allocation */
+	enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
+			ENETDMA_BUFALLOC_REG(priv->rx_chan));
+
+	if (bcm_enet_refill_rx(dev)) {
+		dev_err(kdev, "cannot allocate rx skb queue\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/* write rx & tx ring addresses */
+	enet_dma_writel(priv, priv->rx_desc_dma,
+			ENETDMA_RSTART_REG(priv->rx_chan));
+	enet_dma_writel(priv, priv->tx_desc_dma,
+			ENETDMA_RSTART_REG(priv->tx_chan));
+
+	/* clear remaining state ram for rx & tx channel */
+	enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
+	enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
+	enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
+	enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
+	enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
+	enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));
+
+	/* set max rx/tx length */
+	enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
+	enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);
+
+	/* set dma maximum burst len */
+	enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
+			ENETDMA_MAXBURST_REG(priv->rx_chan));
+	enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
+			ENETDMA_MAXBURST_REG(priv->tx_chan));
+
+	/* set correct transmit fifo watermark */
+	enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);
+
+	/* set flow control low/high threshold to 1/3 / 2/3 */
+	val = priv->rx_ring_size / 3;
+	enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
+	val = (priv->rx_ring_size * 2) / 3;
+	enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));
+
+	/* all set, enable mac and interrupts, start dma engine and
+	 * kick rx dma channel */
+	wmb();
+	val = enet_readl(priv, ENET_CTL_REG);
+	val |= ENET_CTL_ENABLE_MASK;
+	enet_writel(priv, val, ENET_CTL_REG);
+	enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
+	enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
+			ENETDMA_CHANCFG_REG(priv->rx_chan));
+
+	/* watch "mib counters about to overflow" interrupt */
+	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
+	enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
+
+	/* watch "packet transferred" interrupt in rx and tx */
+	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+			ENETDMA_IR_REG(priv->rx_chan));
+	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+			ENETDMA_IR_REG(priv->tx_chan));
+
+	/* make sure we enable napi before rx interrupt  */
+	napi_enable(&priv->napi);
+
+	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+			ENETDMA_IRMASK_REG(priv->rx_chan));
+	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
+			ENETDMA_IRMASK_REG(priv->tx_chan));
+
+	if (priv->has_phy)
+		phy_start(priv->phydev);
+	else
+		bcm_enet_adjust_link(dev);
+
+	netif_start_queue(dev);
+	return 0;
+
+out:
+	for (i = 0; i < priv->rx_ring_size; i++) {
+		struct bcm_enet_desc *desc;
+
+		if (!priv->rx_skb[i])
+			continue;
+
+		desc = &priv->rx_desc_cpu[i];
+		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
+				 DMA_FROM_DEVICE);
+		kfree_skb(priv->rx_skb[i]);
+	}
+	kfree(priv->rx_skb);
+
+out_free_tx_skb:
+	kfree(priv->tx_skb);
+
+out_free_tx_ring:
+	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
+			  priv->tx_desc_cpu, priv->tx_desc_dma);
+
+out_free_rx_ring:
+	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
+			  priv->rx_desc_cpu, priv->rx_desc_dma);
+
+out_freeirq_tx:
+	free_irq(priv->irq_tx, dev);
+
+out_freeirq_rx:
+	free_irq(priv->irq_rx, dev);
+
+out_freeirq:
+	free_irq(dev->irq, dev);
+
+out_phy_disconnect:
+	phy_disconnect(priv->phydev);
+
+	return ret;
+}
+
+/*
+ * disable mac
+ */
+static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
+{
+	int limit;
+	u32 val;
+
+	val = enet_readl(priv, ENET_CTL_REG);
+	val |= ENET_CTL_DISABLE_MASK;
+	enet_writel(priv, val, ENET_CTL_REG);
+
+	limit = 1000;
+	do {
+		u32 val;
+
+		val = enet_readl(priv, ENET_CTL_REG);
+		if (!(val & ENET_CTL_DISABLE_MASK))
+			break;
+		udelay(1);
+	} while (limit--);
+}
+
+/*
+ * disable dma in given channel
+ */
+static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan)
+{
+	int limit;
+
+	enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan));
+
+	limit = 1000;
+	do {
+		u32 val;
+
+		val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan));
+		if (!(val & ENETDMA_CHANCFG_EN_MASK))
+			break;
+		udelay(1);
+	} while (limit--);
+}
+
+/*
+ * stop callback
+ */
+static int bcm_enet_stop(struct net_device *dev)
+{
+	struct bcm_enet_priv *priv;
+	struct device *kdev;
+	int i;
+
+	priv = netdev_priv(dev);
+	kdev = &priv->pdev->dev;
+
+	netif_stop_queue(dev);
+	napi_disable(&priv->napi);
+	if (priv->has_phy)
+		phy_stop(priv->phydev);
+	del_timer_sync(&priv->rx_timeout);
+
+	/* mask all interrupts */
+	enet_writel(priv, 0, ENET_IRMASK_REG);
+	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
+	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
+
+	/* make sure no mib update is scheduled */
+	cancel_work_sync(&priv->mib_update_task);
+
+	/* disable dma & mac */
+	bcm_enet_disable_dma(priv, priv->tx_chan);
+	bcm_enet_disable_dma(priv, priv->rx_chan);
+	bcm_enet_disable_mac(priv);
+
+	/* force reclaim of all tx buffers */
+	bcm_enet_tx_reclaim(dev, 1);
+
+	/* free the rx skb ring */
+	for (i = 0; i < priv->rx_ring_size; i++) {
+		struct bcm_enet_desc *desc;
+
+		if (!priv->rx_skb[i])
+			continue;
+
+		desc = &priv->rx_desc_cpu[i];
+		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
+				 DMA_FROM_DEVICE);
+		kfree_skb(priv->rx_skb[i]);
+	}
+
+	/* free remaining allocated memory */
+	kfree(priv->rx_skb);
+	kfree(priv->tx_skb);
+	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
+			  priv->rx_desc_cpu, priv->rx_desc_dma);
+	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
+			  priv->tx_desc_cpu, priv->tx_desc_dma);
+	free_irq(priv->irq_tx, dev);
+	free_irq(priv->irq_rx, dev);
+	free_irq(dev->irq, dev);
+
+	/* release phy */
+	if (priv->has_phy) {
+		phy_disconnect(priv->phydev);
+		priv->phydev = NULL;
+	}
+
+	return 0;
+}
+
+/*
+ * ethtool callbacks
+ */
+struct bcm_enet_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+	int mib_reg;
+};
+
+#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m),		\
+		     offsetof(struct bcm_enet_priv, m)
+#define DEV_STAT(m) sizeof(((struct net_device_stats *)0)->m),		\
+		     offsetof(struct net_device_stats, m)
+
+static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
+	{ "rx_packets", DEV_STAT(rx_packets), -1 },
+	{ "tx_packets",	DEV_STAT(tx_packets), -1 },
+	{ "rx_bytes", DEV_STAT(rx_bytes), -1 },
+	{ "tx_bytes", DEV_STAT(tx_bytes), -1 },
+	{ "rx_errors", DEV_STAT(rx_errors), -1 },
+	{ "tx_errors", DEV_STAT(tx_errors), -1 },
+	{ "rx_dropped",	DEV_STAT(rx_dropped), -1 },
+	{ "tx_dropped",	DEV_STAT(tx_dropped), -1 },
+
+	{ "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS},
+	{ "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS },
+	{ "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST },
+	{ "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT },
+	{ "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 },
+	{ "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 },
+	{ "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 },
+	{ "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 },
+	{ "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 },
+	{ "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX },
+	{ "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB },
+	{ "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR },
+	{ "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG },
+	{ "rx_dropped",	GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP },
+	{ "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN },
+	{ "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND },
+	{ "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC },
+	{ "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN },
+	{ "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM },
+	{ "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE },
+	{ "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL },
+
+	{ "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS },
+	{ "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS },
+	{ "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST },
+	{ "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT },
+	{ "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 },
+	{ "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 },
+	{ "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 },
+	{ "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 },
+	{ "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023},
+	{ "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX },
+	{ "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB },
+	{ "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR },
+	{ "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG },
+	{ "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN },
+	{ "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL },
+	{ "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL },
+	{ "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL },
+	{ "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL },
+	{ "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE },
+	{ "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF },
+	{ "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS },
+	{ "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE },
+
+};
+
+#define BCM_ENET_STATS_LEN	\
+	(sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
+
+static const u32 unused_mib_regs[] = {
+	ETH_MIB_TX_ALL_OCTETS,
+	ETH_MIB_TX_ALL_PKTS,
+	ETH_MIB_RX_ALL_OCTETS,
+	ETH_MIB_RX_ALL_PKTS,
+};
+
+
+static void bcm_enet_get_drvinfo(struct net_device *netdev,
+				 struct ethtool_drvinfo *drvinfo)
+{
+	strncpy(drvinfo->driver, bcm_enet_driver_name, 32);
+	strncpy(drvinfo->version, bcm_enet_driver_version, 32);
+	strncpy(drvinfo->fw_version, "N/A", 32);
+	strncpy(drvinfo->bus_info, "bcm63xx", 32);
+	drvinfo->n_stats = BCM_ENET_STATS_LEN;
+}
+
+static int bcm_enet_get_sset_count(struct net_device *netdev,
+					int string_set)
+{
+	switch (string_set) {
+	case ETH_SS_STATS:
+		return BCM_ENET_STATS_LEN;
+	default:
+		return -EINVAL;
+	}
+}
+
+static void bcm_enet_get_strings(struct net_device *netdev,
+				 u32 stringset, u8 *data)
+{
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
+			memcpy(data + i * ETH_GSTRING_LEN,
+			       bcm_enet_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+		}
+		break;
+	}
+}
+
+static void update_mib_counters(struct bcm_enet_priv *priv)
+{
+	int i;
+
+	for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
+		const struct bcm_enet_stats *s;
+		u32 val;
+		char *p;
+
+		s = &bcm_enet_gstrings_stats[i];
+		if (s->mib_reg == -1)
+			continue;
+
+		val = enet_readl(priv, ENET_MIB_REG(s->mib_reg));
+		p = (char *)priv + s->stat_offset;
+
+		if (s->sizeof_stat == sizeof(u64))
+			*(u64 *)p += val;
+		else
+			*(u32 *)p += val;
+	}
+
+	/* also empty unused mib counters to make sure mib counter
+	 * overflow interrupt is cleared */
+	for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++)
+		(void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i]));
+}
+
+static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = container_of(t, struct bcm_enet_priv, mib_update_task);
+	mutex_lock(&priv->mib_update_lock);
+	update_mib_counters(priv);
+	mutex_unlock(&priv->mib_update_lock);
+
+	/* reenable mib interrupt */
+	if (netif_running(priv->net_dev))
+		enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
+}
+
+static void bcm_enet_get_ethtool_stats(struct net_device *netdev,
+				       struct ethtool_stats *stats,
+				       u64 *data)
+{
+	struct bcm_enet_priv *priv;
+	int i;
+
+	priv = netdev_priv(netdev);
+
+	mutex_lock(&priv->mib_update_lock);
+	update_mib_counters(priv);
+
+	for (i = 0; i < BCM_ENET_STATS_LEN; i++) {
+		const struct bcm_enet_stats *s;
+		char *p;
+
+		s = &bcm_enet_gstrings_stats[i];
+		if (s->mib_reg == -1)
+			p = (char *)&netdev->stats;
+		else
+			p = (char *)priv;
+		p += s->stat_offset;
+		data[i] = (s->sizeof_stat == sizeof(u64)) ?
+			*(u64 *)p : *(u32 *)p;
+	}
+	mutex_unlock(&priv->mib_update_lock);
+}
+
+static int bcm_enet_get_settings(struct net_device *dev,
+				 struct ethtool_cmd *cmd)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+
+	cmd->maxrxpkt = 0;
+	cmd->maxtxpkt = 0;
+
+	if (priv->has_phy) {
+		if (!priv->phydev)
+			return -ENODEV;
+		return phy_ethtool_gset(priv->phydev, cmd);
+	} else {
+		cmd->autoneg = 0;
+		ethtool_cmd_speed_set(cmd, ((priv->force_speed_100)
+					    ? SPEED_100 : SPEED_10));
+		cmd->duplex = (priv->force_duplex_full) ?
+			DUPLEX_FULL : DUPLEX_HALF;
+		cmd->supported = ADVERTISED_10baseT_Half  |
+			ADVERTISED_10baseT_Full |
+			ADVERTISED_100baseT_Half |
+			ADVERTISED_100baseT_Full;
+		cmd->advertising = 0;
+		cmd->port = PORT_MII;
+		cmd->transceiver = XCVR_EXTERNAL;
+	}
+	return 0;
+}
+
+static int bcm_enet_set_settings(struct net_device *dev,
+				 struct ethtool_cmd *cmd)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+	if (priv->has_phy) {
+		if (!priv->phydev)
+			return -ENODEV;
+		return phy_ethtool_sset(priv->phydev, cmd);
+	} else {
+
+		if (cmd->autoneg ||
+		    (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) ||
+		    cmd->port != PORT_MII)
+			return -EINVAL;
+
+		priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0;
+		priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0;
+
+		if (netif_running(dev))
+			bcm_enet_adjust_link(dev);
+		return 0;
+	}
+}
+
+static void bcm_enet_get_ringparam(struct net_device *dev,
+				   struct ethtool_ringparam *ering)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+
+	/* rx/tx ring is actually only limited by memory */
+	ering->rx_max_pending = 8192;
+	ering->tx_max_pending = 8192;
+	ering->rx_mini_max_pending = 0;
+	ering->rx_jumbo_max_pending = 0;
+	ering->rx_pending = priv->rx_ring_size;
+	ering->tx_pending = priv->tx_ring_size;
+}
+
+static int bcm_enet_set_ringparam(struct net_device *dev,
+				  struct ethtool_ringparam *ering)
+{
+	struct bcm_enet_priv *priv;
+	int was_running;
+
+	priv = netdev_priv(dev);
+
+	was_running = 0;
+	if (netif_running(dev)) {
+		bcm_enet_stop(dev);
+		was_running = 1;
+	}
+
+	priv->rx_ring_size = ering->rx_pending;
+	priv->tx_ring_size = ering->tx_pending;
+
+	if (was_running) {
+		int err;
+
+		err = bcm_enet_open(dev);
+		if (err)
+			dev_close(dev);
+		else
+			bcm_enet_set_multicast_list(dev);
+	}
+	return 0;
+}
+
+static void bcm_enet_get_pauseparam(struct net_device *dev,
+				    struct ethtool_pauseparam *ecmd)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+	ecmd->autoneg = priv->pause_auto;
+	ecmd->rx_pause = priv->pause_rx;
+	ecmd->tx_pause = priv->pause_tx;
+}
+
+static int bcm_enet_set_pauseparam(struct net_device *dev,
+				   struct ethtool_pauseparam *ecmd)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+
+	if (priv->has_phy) {
+		if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) {
+			/* asymetric pause mode not supported,
+			 * actually possible but integrated PHY has RO
+			 * asym_pause bit */
+			return -EINVAL;
+		}
+	} else {
+		/* no pause autoneg on direct mii connection */
+		if (ecmd->autoneg)
+			return -EINVAL;
+	}
+
+	priv->pause_auto = ecmd->autoneg;
+	priv->pause_rx = ecmd->rx_pause;
+	priv->pause_tx = ecmd->tx_pause;
+
+	return 0;
+}
+
+static struct ethtool_ops bcm_enet_ethtool_ops = {
+	.get_strings		= bcm_enet_get_strings,
+	.get_sset_count		= bcm_enet_get_sset_count,
+	.get_ethtool_stats      = bcm_enet_get_ethtool_stats,
+	.get_settings		= bcm_enet_get_settings,
+	.set_settings		= bcm_enet_set_settings,
+	.get_drvinfo		= bcm_enet_get_drvinfo,
+	.get_link		= ethtool_op_get_link,
+	.get_ringparam		= bcm_enet_get_ringparam,
+	.set_ringparam		= bcm_enet_set_ringparam,
+	.get_pauseparam		= bcm_enet_get_pauseparam,
+	.set_pauseparam		= bcm_enet_set_pauseparam,
+};
+
+static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct bcm_enet_priv *priv;
+
+	priv = netdev_priv(dev);
+	if (priv->has_phy) {
+		if (!priv->phydev)
+			return -ENODEV;
+		return phy_mii_ioctl(priv->phydev, rq, cmd);
+	} else {
+		struct mii_if_info mii;
+
+		mii.dev = dev;
+		mii.mdio_read = bcm_enet_mdio_read_mii;
+		mii.mdio_write = bcm_enet_mdio_write_mii;
+		mii.phy_id = 0;
+		mii.phy_id_mask = 0x3f;
+		mii.reg_num_mask = 0x1f;
+		return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL);
+	}
+}
+
+/*
+ * calculate actual hardware mtu
+ */
+static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu)
+{
+	int actual_mtu;
+
+	actual_mtu = mtu;
+
+	/* add ethernet header + vlan tag size */
+	actual_mtu += VLAN_ETH_HLEN;
+
+	if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU)
+		return -EINVAL;
+
+	/*
+	 * setup maximum size before we get overflow mark in
+	 * descriptor, note that this will not prevent reception of
+	 * big frames, they will be split into multiple buffers
+	 * anyway
+	 */
+	priv->hw_mtu = actual_mtu;
+
+	/*
+	 * align rx buffer size to dma burst len, account FCS since
+	 * it's appended
+	 */
+	priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN,
+				  BCMENET_DMA_MAXBURST * 4);
+	return 0;
+}
+
+/*
+ * adjust mtu, can't be called while device is running
+ */
+static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu)
+{
+	int ret;
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	ret = compute_hw_mtu(netdev_priv(dev), new_mtu);
+	if (ret)
+		return ret;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+/*
+ * preinit hardware to allow mii operation while device is down
+ */
+static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
+{
+	u32 val;
+	int limit;
+
+	/* make sure mac is disabled */
+	bcm_enet_disable_mac(priv);
+
+	/* soft reset mac */
+	val = ENET_CTL_SRESET_MASK;
+	enet_writel(priv, val, ENET_CTL_REG);
+	wmb();
+
+	limit = 1000;
+	do {
+		val = enet_readl(priv, ENET_CTL_REG);
+		if (!(val & ENET_CTL_SRESET_MASK))
+			break;
+		udelay(1);
+	} while (limit--);
+
+	/* select correct mii interface */
+	val = enet_readl(priv, ENET_CTL_REG);
+	if (priv->use_external_mii)
+		val |= ENET_CTL_EPHYSEL_MASK;
+	else
+		val &= ~ENET_CTL_EPHYSEL_MASK;
+	enet_writel(priv, val, ENET_CTL_REG);
+
+	/* turn on mdc clock */
+	enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
+		    ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);
+
+	/* set mib counters to self-clear when read */
+	val = enet_readl(priv, ENET_MIBCTL_REG);
+	val |= ENET_MIBCTL_RDCLEAR_MASK;
+	enet_writel(priv, val, ENET_MIBCTL_REG);
+}
+
+static const struct net_device_ops bcm_enet_ops = {
+	.ndo_open		= bcm_enet_open,
+	.ndo_stop		= bcm_enet_stop,
+	.ndo_start_xmit		= bcm_enet_start_xmit,
+	.ndo_set_mac_address	= bcm_enet_set_mac_address,
+	.ndo_set_multicast_list = bcm_enet_set_multicast_list,
+	.ndo_do_ioctl		= bcm_enet_ioctl,
+	.ndo_change_mtu		= bcm_enet_change_mtu,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller = bcm_enet_netpoll,
+#endif
+};
+
+/*
+ * allocate netdevice, request register memory and register device.
+ */
+static int __devinit bcm_enet_probe(struct platform_device *pdev)
+{
+	struct bcm_enet_priv *priv;
+	struct net_device *dev;
+	struct bcm63xx_enet_platform_data *pd;
+	struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
+	struct mii_bus *bus;
+	const char *clk_name;
+	unsigned int iomem_size;
+	int i, ret;
+
+	/* stop if shared driver failed, assume driver->probe will be
+	 * called in the same order we register devices (correct ?) */
+	if (!bcm_enet_shared_base)
+		return -ENODEV;
+
+	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+	res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
+	if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
+		return -ENODEV;
+
+	ret = 0;
+	dev = alloc_etherdev(sizeof(*priv));
+	if (!dev)
+		return -ENOMEM;
+	priv = netdev_priv(dev);
+
+	ret = compute_hw_mtu(priv, dev->mtu);
+	if (ret)
+		goto out;
+
+	iomem_size = resource_size(res_mem);
+	if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	priv->base = ioremap(res_mem->start, iomem_size);
+	if (priv->base == NULL) {
+		ret = -ENOMEM;
+		goto out_release_mem;
+	}
+	dev->irq = priv->irq = res_irq->start;
+	priv->irq_rx = res_irq_rx->start;
+	priv->irq_tx = res_irq_tx->start;
+	priv->mac_id = pdev->id;
+
+	/* get rx & tx dma channel id for this mac */
+	if (priv->mac_id == 0) {
+		priv->rx_chan = 0;
+		priv->tx_chan = 1;
+		clk_name = "enet0";
+	} else {
+		priv->rx_chan = 2;
+		priv->tx_chan = 3;
+		clk_name = "enet1";
+	}
+
+	priv->mac_clk = clk_get(&pdev->dev, clk_name);
+	if (IS_ERR(priv->mac_clk)) {
+		ret = PTR_ERR(priv->mac_clk);
+		goto out_unmap;
+	}
+	clk_enable(priv->mac_clk);
+
+	/* initialize default and fetch platform data */
+	priv->rx_ring_size = BCMENET_DEF_RX_DESC;
+	priv->tx_ring_size = BCMENET_DEF_TX_DESC;
+
+	pd = pdev->dev.platform_data;
+	if (pd) {
+		memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
+		priv->has_phy = pd->has_phy;
+		priv->phy_id = pd->phy_id;
+		priv->has_phy_interrupt = pd->has_phy_interrupt;
+		priv->phy_interrupt = pd->phy_interrupt;
+		priv->use_external_mii = !pd->use_internal_phy;
+		priv->pause_auto = pd->pause_auto;
+		priv->pause_rx = pd->pause_rx;
+		priv->pause_tx = pd->pause_tx;
+		priv->force_duplex_full = pd->force_duplex_full;
+		priv->force_speed_100 = pd->force_speed_100;
+	}
+
+	if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
+		/* using internal PHY, enable clock */
+		priv->phy_clk = clk_get(&pdev->dev, "ephy");
+		if (IS_ERR(priv->phy_clk)) {
+			ret = PTR_ERR(priv->phy_clk);
+			priv->phy_clk = NULL;
+			goto out_put_clk_mac;
+		}
+		clk_enable(priv->phy_clk);
+	}
+
+	/* do minimal hardware init to be able to probe mii bus */
+	bcm_enet_hw_preinit(priv);
+
+	/* MII bus registration */
+	if (priv->has_phy) {
+
+		priv->mii_bus = mdiobus_alloc();
+		if (!priv->mii_bus) {
+			ret = -ENOMEM;
+			goto out_uninit_hw;
+		}
+
+		bus = priv->mii_bus;
+		bus->name = "bcm63xx_enet MII bus";
+		bus->parent = &pdev->dev;
+		bus->priv = priv;
+		bus->read = bcm_enet_mdio_read_phylib;
+		bus->write = bcm_enet_mdio_write_phylib;
+		sprintf(bus->id, "%d", priv->mac_id);
+
+		/* only probe bus where we think the PHY is, because
+		 * the mdio read operation return 0 instead of 0xffff
+		 * if a slave is not present on hw */
+		bus->phy_mask = ~(1 << priv->phy_id);
+
+		bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+		if (!bus->irq) {
+			ret = -ENOMEM;
+			goto out_free_mdio;
+		}
+
+		if (priv->has_phy_interrupt)
+			bus->irq[priv->phy_id] = priv->phy_interrupt;
+		else
+			bus->irq[priv->phy_id] = PHY_POLL;
+
+		ret = mdiobus_register(bus);
+		if (ret) {
+			dev_err(&pdev->dev, "unable to register mdio bus\n");
+			goto out_free_mdio;
+		}
+	} else {
+
+		/* run platform code to initialize PHY device */
+		if (pd->mii_config &&
+		    pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
+				   bcm_enet_mdio_write_mii)) {
+			dev_err(&pdev->dev, "unable to configure mdio bus\n");
+			goto out_uninit_hw;
+		}
+	}
+
+	spin_lock_init(&priv->rx_lock);
+
+	/* init rx timeout (used for oom) */
+	init_timer(&priv->rx_timeout);
+	priv->rx_timeout.function = bcm_enet_refill_rx_timer;
+	priv->rx_timeout.data = (unsigned long)dev;
+
+	/* init the mib update lock&work */
+	mutex_init(&priv->mib_update_lock);
+	INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);
+
+	/* zero mib counters */
+	for (i = 0; i < ENET_MIB_REG_COUNT; i++)
+		enet_writel(priv, 0, ENET_MIB_REG(i));
+
+	/* register netdevice */
+	dev->netdev_ops = &bcm_enet_ops;
+	netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
+
+	SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	ret = register_netdev(dev);
+	if (ret)
+		goto out_unregister_mdio;
+
+	netif_carrier_off(dev);
+	platform_set_drvdata(pdev, dev);
+	priv->pdev = pdev;
+	priv->net_dev = dev;
+
+	return 0;
+
+out_unregister_mdio:
+	if (priv->mii_bus) {
+		mdiobus_unregister(priv->mii_bus);
+		kfree(priv->mii_bus->irq);
+	}
+
+out_free_mdio:
+	if (priv->mii_bus)
+		mdiobus_free(priv->mii_bus);
+
+out_uninit_hw:
+	/* turn off mdc clock */
+	enet_writel(priv, 0, ENET_MIISC_REG);
+	if (priv->phy_clk) {
+		clk_disable(priv->phy_clk);
+		clk_put(priv->phy_clk);
+	}
+
+out_put_clk_mac:
+	clk_disable(priv->mac_clk);
+	clk_put(priv->mac_clk);
+
+out_unmap:
+	iounmap(priv->base);
+
+out_release_mem:
+	release_mem_region(res_mem->start, iomem_size);
+out:
+	free_netdev(dev);
+	return ret;
+}
+
+
+/*
+ * exit func, stops hardware and unregisters netdevice
+ */
+static int __devexit bcm_enet_remove(struct platform_device *pdev)
+{
+	struct bcm_enet_priv *priv;
+	struct net_device *dev;
+	struct resource *res;
+
+	/* stop netdevice */
+	dev = platform_get_drvdata(pdev);
+	priv = netdev_priv(dev);
+	unregister_netdev(dev);
+
+	/* turn off mdc clock */
+	enet_writel(priv, 0, ENET_MIISC_REG);
+
+	if (priv->has_phy) {
+		mdiobus_unregister(priv->mii_bus);
+		kfree(priv->mii_bus->irq);
+		mdiobus_free(priv->mii_bus);
+	} else {
+		struct bcm63xx_enet_platform_data *pd;
+
+		pd = pdev->dev.platform_data;
+		if (pd && pd->mii_config)
+			pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
+				       bcm_enet_mdio_write_mii);
+	}
+
+	/* release device resources */
+	iounmap(priv->base);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, resource_size(res));
+
+	/* disable hw block clocks */
+	if (priv->phy_clk) {
+		clk_disable(priv->phy_clk);
+		clk_put(priv->phy_clk);
+	}
+	clk_disable(priv->mac_clk);
+	clk_put(priv->mac_clk);
+
+	platform_set_drvdata(pdev, NULL);
+	free_netdev(dev);
+	return 0;
+}
+
+struct platform_driver bcm63xx_enet_driver = {
+	.probe	= bcm_enet_probe,
+	.remove	= __devexit_p(bcm_enet_remove),
+	.driver	= {
+		.name	= "bcm63xx_enet",
+		.owner  = THIS_MODULE,
+	},
+};
+
+/*
+ * reserve & remap memory space shared between all macs
+ */
+static int __devinit bcm_enet_shared_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	unsigned int iomem_size;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	iomem_size = resource_size(res);
+	if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma"))
+		return -EBUSY;
+
+	bcm_enet_shared_base = ioremap(res->start, iomem_size);
+	if (!bcm_enet_shared_base) {
+		release_mem_region(res->start, iomem_size);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int __devexit bcm_enet_shared_remove(struct platform_device *pdev)
+{
+	struct resource *res;
+
+	iounmap(bcm_enet_shared_base);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, resource_size(res));
+	return 0;
+}
+
+/*
+ * this "shared" driver is needed because both macs share a single
+ * address space
+ */
+struct platform_driver bcm63xx_enet_shared_driver = {
+	.probe	= bcm_enet_shared_probe,
+	.remove	= __devexit_p(bcm_enet_shared_remove),
+	.driver	= {
+		.name	= "bcm63xx_enet_shared",
+		.owner  = THIS_MODULE,
+	},
+};
+
+/*
+ * entry point
+ */
+static int __init bcm_enet_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&bcm63xx_enet_shared_driver);
+	if (ret)
+		return ret;
+
+	ret = platform_driver_register(&bcm63xx_enet_driver);
+	if (ret)
+		platform_driver_unregister(&bcm63xx_enet_shared_driver);
+
+	return ret;
+}
+
+static void __exit bcm_enet_exit(void)
+{
+	platform_driver_unregister(&bcm63xx_enet_driver);
+	platform_driver_unregister(&bcm63xx_enet_shared_driver);
+}
+
+
+module_init(bcm_enet_init);
+module_exit(bcm_enet_exit);
+
+MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.h b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
new file mode 100644
index 0000000..0e3048b
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h
@@ -0,0 +1,302 @@
+#ifndef BCM63XX_ENET_H_
+#define BCM63XX_ENET_H_
+
+#include <linux/types.h>
+#include <linux/mii.h>
+#include <linux/mutex.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+
+#include <bcm63xx_regs.h>
+#include <bcm63xx_irq.h>
+#include <bcm63xx_io.h>
+
+/* default number of descriptor */
+#define BCMENET_DEF_RX_DESC	64
+#define BCMENET_DEF_TX_DESC	32
+
+/* maximum burst len for dma (4 bytes unit) */
+#define BCMENET_DMA_MAXBURST	16
+
+/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value
+ * must be low enough so that a DMA transfer of above burst length can
+ * not overflow the fifo  */
+#define BCMENET_TX_FIFO_TRESH	32
+
+/*
+ * hardware maximum rx/tx packet size including FCS, max mtu is
+ * actually 2047, but if we set max rx size register to 2047 we won't
+ * get overflow information if packet size is 2048 or above
+ */
+#define BCMENET_MAX_MTU		2046
+
+/*
+ * rx/tx dma descriptor
+ */
+struct bcm_enet_desc {
+	u32 len_stat;
+	u32 address;
+};
+
+#define DMADESC_LENGTH_SHIFT	16
+#define DMADESC_LENGTH_MASK	(0xfff << DMADESC_LENGTH_SHIFT)
+#define DMADESC_OWNER_MASK	(1 << 15)
+#define DMADESC_EOP_MASK	(1 << 14)
+#define DMADESC_SOP_MASK	(1 << 13)
+#define DMADESC_ESOP_MASK	(DMADESC_EOP_MASK | DMADESC_SOP_MASK)
+#define DMADESC_WRAP_MASK	(1 << 12)
+
+#define DMADESC_UNDER_MASK	(1 << 9)
+#define DMADESC_APPEND_CRC	(1 << 8)
+#define DMADESC_OVSIZE_MASK	(1 << 4)
+#define DMADESC_RXER_MASK	(1 << 2)
+#define DMADESC_CRC_MASK	(1 << 1)
+#define DMADESC_OV_MASK		(1 << 0)
+#define DMADESC_ERR_MASK	(DMADESC_UNDER_MASK | \
+				DMADESC_OVSIZE_MASK | \
+				DMADESC_RXER_MASK | \
+				DMADESC_CRC_MASK | \
+				DMADESC_OV_MASK)
+
+
+/*
+ * MIB Counters register definitions
+*/
+#define ETH_MIB_TX_GD_OCTETS			0
+#define ETH_MIB_TX_GD_PKTS			1
+#define ETH_MIB_TX_ALL_OCTETS			2
+#define ETH_MIB_TX_ALL_PKTS			3
+#define ETH_MIB_TX_BRDCAST			4
+#define ETH_MIB_TX_MULT				5
+#define ETH_MIB_TX_64				6
+#define ETH_MIB_TX_65_127			7
+#define ETH_MIB_TX_128_255			8
+#define ETH_MIB_TX_256_511			9
+#define ETH_MIB_TX_512_1023			10
+#define ETH_MIB_TX_1024_MAX			11
+#define ETH_MIB_TX_JAB				12
+#define ETH_MIB_TX_OVR				13
+#define ETH_MIB_TX_FRAG				14
+#define ETH_MIB_TX_UNDERRUN			15
+#define ETH_MIB_TX_COL				16
+#define ETH_MIB_TX_1_COL			17
+#define ETH_MIB_TX_M_COL			18
+#define ETH_MIB_TX_EX_COL			19
+#define ETH_MIB_TX_LATE				20
+#define ETH_MIB_TX_DEF				21
+#define ETH_MIB_TX_CRS				22
+#define ETH_MIB_TX_PAUSE			23
+
+#define ETH_MIB_RX_GD_OCTETS			32
+#define ETH_MIB_RX_GD_PKTS			33
+#define ETH_MIB_RX_ALL_OCTETS			34
+#define ETH_MIB_RX_ALL_PKTS			35
+#define ETH_MIB_RX_BRDCAST			36
+#define ETH_MIB_RX_MULT				37
+#define ETH_MIB_RX_64				38
+#define ETH_MIB_RX_65_127			39
+#define ETH_MIB_RX_128_255			40
+#define ETH_MIB_RX_256_511			41
+#define ETH_MIB_RX_512_1023			42
+#define ETH_MIB_RX_1024_MAX			43
+#define ETH_MIB_RX_JAB				44
+#define ETH_MIB_RX_OVR				45
+#define ETH_MIB_RX_FRAG				46
+#define ETH_MIB_RX_DROP				47
+#define ETH_MIB_RX_CRC_ALIGN			48
+#define ETH_MIB_RX_UND				49
+#define ETH_MIB_RX_CRC				50
+#define ETH_MIB_RX_ALIGN			51
+#define ETH_MIB_RX_SYM				52
+#define ETH_MIB_RX_PAUSE			53
+#define ETH_MIB_RX_CNTRL			54
+
+
+struct bcm_enet_mib_counters {
+	u64 tx_gd_octets;
+	u32 tx_gd_pkts;
+	u32 tx_all_octets;
+	u32 tx_all_pkts;
+	u32 tx_brdcast;
+	u32 tx_mult;
+	u32 tx_64;
+	u32 tx_65_127;
+	u32 tx_128_255;
+	u32 tx_256_511;
+	u32 tx_512_1023;
+	u32 tx_1024_max;
+	u32 tx_jab;
+	u32 tx_ovr;
+	u32 tx_frag;
+	u32 tx_underrun;
+	u32 tx_col;
+	u32 tx_1_col;
+	u32 tx_m_col;
+	u32 tx_ex_col;
+	u32 tx_late;
+	u32 tx_def;
+	u32 tx_crs;
+	u32 tx_pause;
+	u64 rx_gd_octets;
+	u32 rx_gd_pkts;
+	u32 rx_all_octets;
+	u32 rx_all_pkts;
+	u32 rx_brdcast;
+	u32 rx_mult;
+	u32 rx_64;
+	u32 rx_65_127;
+	u32 rx_128_255;
+	u32 rx_256_511;
+	u32 rx_512_1023;
+	u32 rx_1024_max;
+	u32 rx_jab;
+	u32 rx_ovr;
+	u32 rx_frag;
+	u32 rx_drop;
+	u32 rx_crc_align;
+	u32 rx_und;
+	u32 rx_crc;
+	u32 rx_align;
+	u32 rx_sym;
+	u32 rx_pause;
+	u32 rx_cntrl;
+};
+
+
+struct bcm_enet_priv {
+
+	/* mac id (from platform device id) */
+	int mac_id;
+
+	/* base remapped address of device */
+	void __iomem *base;
+
+	/* mac irq, rx_dma irq, tx_dma irq */
+	int irq;
+	int irq_rx;
+	int irq_tx;
+
+	/* hw view of rx & tx dma ring */
+	dma_addr_t rx_desc_dma;
+	dma_addr_t tx_desc_dma;
+
+	/* allocated size (in bytes) for rx & tx dma ring */
+	unsigned int rx_desc_alloc_size;
+	unsigned int tx_desc_alloc_size;
+
+
+	struct napi_struct napi;
+
+	/* dma channel id for rx */
+	int rx_chan;
+
+	/* number of dma desc in rx ring */
+	int rx_ring_size;
+
+	/* cpu view of rx dma ring */
+	struct bcm_enet_desc *rx_desc_cpu;
+
+	/* current number of armed descriptor given to hardware for rx */
+	int rx_desc_count;
+
+	/* next rx descriptor to fetch from hardware */
+	int rx_curr_desc;
+
+	/* next dirty rx descriptor to refill */
+	int rx_dirty_desc;
+
+	/* size of allocated rx skbs */
+	unsigned int rx_skb_size;
+
+	/* list of skb given to hw for rx */
+	struct sk_buff **rx_skb;
+
+	/* used when rx skb allocation failed, so we defer rx queue
+	 * refill */
+	struct timer_list rx_timeout;
+
+	/* lock rx_timeout against rx normal operation */
+	spinlock_t rx_lock;
+
+
+	/* dma channel id for tx */
+	int tx_chan;
+
+	/* number of dma desc in tx ring */
+	int tx_ring_size;
+
+	/* cpu view of rx dma ring */
+	struct bcm_enet_desc *tx_desc_cpu;
+
+	/* number of available descriptor for tx */
+	int tx_desc_count;
+
+	/* next tx descriptor avaiable */
+	int tx_curr_desc;
+
+	/* next dirty tx descriptor to reclaim */
+	int tx_dirty_desc;
+
+	/* list of skb given to hw for tx */
+	struct sk_buff **tx_skb;
+
+	/* lock used by tx reclaim and xmit */
+	spinlock_t tx_lock;
+
+
+	/* set if internal phy is ignored and external mii interface
+	 * is selected */
+	int use_external_mii;
+
+	/* set if a phy is connected, phy address must be known,
+	 * probing is not possible */
+	int has_phy;
+	int phy_id;
+
+	/* set if connected phy has an associated irq */
+	int has_phy_interrupt;
+	int phy_interrupt;
+
+	/* used when a phy is connected (phylib used) */
+	struct mii_bus *mii_bus;
+	struct phy_device *phydev;
+	int old_link;
+	int old_duplex;
+	int old_pause;
+
+	/* used when no phy is connected */
+	int force_speed_100;
+	int force_duplex_full;
+
+	/* pause parameters */
+	int pause_auto;
+	int pause_rx;
+	int pause_tx;
+
+	/* stats */
+	struct bcm_enet_mib_counters mib;
+
+	/* after mib interrupt, mib registers update is done in this
+	 * work queue */
+	struct work_struct mib_update_task;
+
+	/* lock mib update between userspace request and workqueue */
+	struct mutex mib_update_lock;
+
+	/* mac clock */
+	struct clk *mac_clk;
+
+	/* phy clock if internal phy is used */
+	struct clk *phy_clk;
+
+	/* network device reference */
+	struct net_device *net_dev;
+
+	/* platform device reference */
+	struct platform_device *pdev;
+
+	/* maximum hardware transmit/receive size */
+	unsigned int hw_mtu;
+};
+
+#endif /* ! BCM63XX_ENET_H_ */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
new file mode 100644
index 0000000..4b2b570
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -0,0 +1,8607 @@
+/* bnx2.c: Broadcom NX2 network driver.
+ *
+ * Copyright (c) 2004-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Michael Chan  (mchan@broadcom.com)
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <asm/page.h>
+#include <linux/time.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/checksum.h>
+#include <linux/workqueue.h>
+#include <linux/crc32.h>
+#include <linux/prefetch.h>
+#include <linux/cache.h>
+#include <linux/firmware.h>
+#include <linux/log2.h>
+#include <linux/aer.h>
+
+#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#define BCM_CNIC 1
+#include "cnic_if.h"
+#endif
+#include "bnx2.h"
+#include "bnx2_fw.h"
+
+#define DRV_MODULE_NAME		"bnx2"
+#define DRV_MODULE_VERSION	"2.1.11"
+#define DRV_MODULE_RELDATE	"July 20, 2011"
+#define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.2.1.fw"
+#define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
+#define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.2.1a.fw"
+#define FW_RV2P_FILE_09_Ax	"bnx2/bnx2-rv2p-09ax-6.0.17.fw"
+#define FW_RV2P_FILE_09		"bnx2/bnx2-rv2p-09-6.0.17.fw"
+
+#define RUN_AT(x) (jiffies + (x))
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (5*HZ)
+
+static char version[] __devinitdata =
+	"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_FIRMWARE(FW_MIPS_FILE_06);
+MODULE_FIRMWARE(FW_RV2P_FILE_06);
+MODULE_FIRMWARE(FW_MIPS_FILE_09);
+MODULE_FIRMWARE(FW_RV2P_FILE_09);
+MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
+
+static int disable_msi = 0;
+
+module_param(disable_msi, int, 0);
+MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
+
+typedef enum {
+	BCM5706 = 0,
+	NC370T,
+	NC370I,
+	BCM5706S,
+	NC370F,
+	BCM5708,
+	BCM5708S,
+	BCM5709,
+	BCM5709S,
+	BCM5716,
+	BCM5716S,
+} board_t;
+
+/* indexed by board_t, above */
+static struct {
+	char *name;
+} board_info[] __devinitdata = {
+	{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
+	{ "HP NC370T Multifunction Gigabit Server Adapter" },
+	{ "HP NC370i Multifunction Gigabit Server Adapter" },
+	{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
+	{ "HP NC370F Multifunction Gigabit Server Adapter" },
+	{ "Broadcom NetXtreme II BCM5708 1000Base-T" },
+	{ "Broadcom NetXtreme II BCM5708 1000Base-SX" },
+	{ "Broadcom NetXtreme II BCM5709 1000Base-T" },
+	{ "Broadcom NetXtreme II BCM5709 1000Base-SX" },
+	{ "Broadcom NetXtreme II BCM5716 1000Base-T" },
+	{ "Broadcom NetXtreme II BCM5716 1000Base-SX" },
+	};
+
+static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
+	  PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
+	  PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
+	  PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
+	{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
+	{ PCI_VENDOR_ID_BROADCOM, 0x163b,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
+	{ PCI_VENDOR_ID_BROADCOM, 0x163c,
+	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
+	{ 0, }
+};
+
+static const struct flash_spec flash_table[] =
+{
+#define BUFFERED_FLAGS		(BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
+#define NONBUFFERED_FLAGS	(BNX2_NV_WREN)
+	/* Slow EEPROM */
+	{0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
+	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
+	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
+	 "EEPROM - slow"},
+	/* Expansion entry 0001 */
+	{0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+	 "Entry 0001"},
+	/* Saifun SA25F010 (non-buffered flash) */
+	/* strap, cfg1, & write1 need updates */
+	{0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
+	 "Non-buffered flash (128kB)"},
+	/* Saifun SA25F020 (non-buffered flash) */
+	/* strap, cfg1, & write1 need updates */
+	{0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
+	 "Non-buffered flash (256kB)"},
+	/* Expansion entry 0100 */
+	{0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+	 "Entry 0100"},
+	/* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
+	{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
+	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
+	 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
+	/* Entry 0110: ST M45PE20 (non-buffered flash)*/
+	{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
+	 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
+	 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
+	/* Saifun SA25F005 (non-buffered flash) */
+	/* strap, cfg1, & write1 need updates */
+	{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
+	 "Non-buffered flash (64kB)"},
+	/* Fast EEPROM */
+	{0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
+	 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
+	 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
+	 "EEPROM - fast"},
+	/* Expansion entry 1001 */
+	{0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+	 "Entry 1001"},
+	/* Expansion entry 1010 */
+	{0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+	 "Entry 1010"},
+	/* ATMEL AT45DB011B (buffered flash) */
+	{0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
+	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
+	 "Buffered flash (128kB)"},
+	/* Expansion entry 1100 */
+	{0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+	 "Entry 1100"},
+	/* Expansion entry 1101 */
+	{0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
+	 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
+	 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
+	 "Entry 1101"},
+	/* Ateml Expansion entry 1110 */
+	{0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
+	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+	 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
+	 "Entry 1110 (Atmel)"},
+	/* ATMEL AT45DB021B (buffered flash) */
+	{0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
+	 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
+	 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
+	 "Buffered flash (256kB)"},
+};
+
+static const struct flash_spec flash_5709 = {
+	.flags		= BNX2_NV_BUFFERED,
+	.page_bits	= BCM5709_FLASH_PAGE_BITS,
+	.page_size	= BCM5709_FLASH_PAGE_SIZE,
+	.addr_mask	= BCM5709_FLASH_BYTE_ADDR_MASK,
+	.total_size	= BUFFERED_FLASH_TOTAL_SIZE*2,
+	.name		= "5709 Buffered flash (256kB)",
+};
+
+MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
+
+static void bnx2_init_napi(struct bnx2 *bp);
+static void bnx2_del_napi(struct bnx2 *bp);
+
+static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
+{
+	u32 diff;
+
+	/* Tell compiler to fetch tx_prod and tx_cons from memory. */
+	barrier();
+
+	/* The ring uses 256 indices for 255 entries, one of them
+	 * needs to be skipped.
+	 */
+	diff = txr->tx_prod - txr->tx_cons;
+	if (unlikely(diff >= TX_DESC_CNT)) {
+		diff &= 0xffff;
+		if (diff == TX_DESC_CNT)
+			diff = MAX_TX_DESC_CNT;
+	}
+	return bp->tx_ring_size - diff;
+}
+
+static u32
+bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
+{
+	u32 val;
+
+	spin_lock_bh(&bp->indirect_lock);
+	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
+	val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
+	spin_unlock_bh(&bp->indirect_lock);
+	return val;
+}
+
+static void
+bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
+{
+	spin_lock_bh(&bp->indirect_lock);
+	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
+	REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
+	spin_unlock_bh(&bp->indirect_lock);
+}
+
+static void
+bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
+{
+	bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
+}
+
+static u32
+bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
+{
+	return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
+}
+
+static void
+bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
+{
+	offset += cid_addr;
+	spin_lock_bh(&bp->indirect_lock);
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		int i;
+
+		REG_WR(bp, BNX2_CTX_CTX_DATA, val);
+		REG_WR(bp, BNX2_CTX_CTX_CTRL,
+		       offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
+		for (i = 0; i < 5; i++) {
+			val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
+			if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
+				break;
+			udelay(5);
+		}
+	} else {
+		REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
+		REG_WR(bp, BNX2_CTX_DATA, val);
+	}
+	spin_unlock_bh(&bp->indirect_lock);
+}
+
+#ifdef BCM_CNIC
+static int
+bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct drv_ctl_io *io = &info->data.io;
+
+	switch (info->cmd) {
+	case DRV_CTL_IO_WR_CMD:
+		bnx2_reg_wr_ind(bp, io->offset, io->data);
+		break;
+	case DRV_CTL_IO_RD_CMD:
+		io->data = bnx2_reg_rd_ind(bp, io->offset);
+		break;
+	case DRV_CTL_CTX_WR_CMD:
+		bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
+{
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+	int sb_id;
+
+	if (bp->flags & BNX2_FLAG_USING_MSIX) {
+		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+		bnapi->cnic_present = 0;
+		sb_id = bp->irq_nvecs;
+		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+	} else {
+		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+		bnapi->cnic_tag = bnapi->last_status_idx;
+		bnapi->cnic_present = 1;
+		sb_id = 0;
+		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+	}
+
+	cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
+	cp->irq_arr[0].status_blk = (void *)
+		((unsigned long) bnapi->status_blk.msi +
+		(BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
+	cp->irq_arr[0].status_blk_num = sb_id;
+	cp->num_irq = 1;
+}
+
+static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+			      void *data)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	if (ops == NULL)
+		return -EINVAL;
+
+	if (cp->drv_state & CNIC_DRV_STATE_REGD)
+		return -EBUSY;
+
+	if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
+		return -ENODEV;
+
+	bp->cnic_data = data;
+	rcu_assign_pointer(bp->cnic_ops, ops);
+
+	cp->num_irq = 0;
+	cp->drv_state = CNIC_DRV_STATE_REGD;
+
+	bnx2_setup_cnic_irq_info(bp);
+
+	return 0;
+}
+
+static int bnx2_unregister_cnic(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	mutex_lock(&bp->cnic_lock);
+	cp->drv_state = 0;
+	bnapi->cnic_present = 0;
+	rcu_assign_pointer(bp->cnic_ops, NULL);
+	mutex_unlock(&bp->cnic_lock);
+	synchronize_rcu();
+	return 0;
+}
+
+struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	if (!cp->max_iscsi_conn)
+		return NULL;
+
+	cp->drv_owner = THIS_MODULE;
+	cp->chip_id = bp->chip_id;
+	cp->pdev = bp->pdev;
+	cp->io_base = bp->regview;
+	cp->drv_ctl = bnx2_drv_ctl;
+	cp->drv_register_cnic = bnx2_register_cnic;
+	cp->drv_unregister_cnic = bnx2_unregister_cnic;
+
+	return cp;
+}
+EXPORT_SYMBOL(bnx2_cnic_probe);
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+	struct cnic_ops *c_ops;
+	struct cnic_ctl_info info;
+
+	mutex_lock(&bp->cnic_lock);
+	c_ops = rcu_dereference_protected(bp->cnic_ops,
+					  lockdep_is_held(&bp->cnic_lock));
+	if (c_ops) {
+		info.cmd = CNIC_CTL_STOP_CMD;
+		c_ops->cnic_ctl(bp->cnic_data, &info);
+	}
+	mutex_unlock(&bp->cnic_lock);
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+	struct cnic_ops *c_ops;
+	struct cnic_ctl_info info;
+
+	mutex_lock(&bp->cnic_lock);
+	c_ops = rcu_dereference_protected(bp->cnic_ops,
+					  lockdep_is_held(&bp->cnic_lock));
+	if (c_ops) {
+		if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
+			struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+
+			bnapi->cnic_tag = bnapi->last_status_idx;
+		}
+		info.cmd = CNIC_CTL_START_CMD;
+		c_ops->cnic_ctl(bp->cnic_data, &info);
+	}
+	mutex_unlock(&bp->cnic_lock);
+}
+
+#else
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+}
+
+#endif
+
+static int
+bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
+{
+	u32 val1;
+	int i, ret;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
+		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
+
+		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+
+		udelay(40);
+	}
+
+	val1 = (bp->phy_addr << 21) | (reg << 16) |
+		BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
+		BNX2_EMAC_MDIO_COMM_START_BUSY;
+	REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
+		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
+			udelay(5);
+
+			val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
+			val1 &= BNX2_EMAC_MDIO_COMM_DATA;
+
+			break;
+		}
+	}
+
+	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
+		*val = 0x0;
+		ret = -EBUSY;
+	}
+	else {
+		*val = val1;
+		ret = 0;
+	}
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
+		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
+
+		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+
+		udelay(40);
+	}
+
+	return ret;
+}
+
+static int
+bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
+{
+	u32 val1;
+	int i, ret;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
+		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+		val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
+
+		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+
+		udelay(40);
+	}
+
+	val1 = (bp->phy_addr << 21) | (reg << 16) | val |
+		BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
+		BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
+	REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
+		if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
+			udelay(5);
+			break;
+		}
+	}
+
+	if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
+        	ret = -EBUSY;
+	else
+		ret = 0;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
+		val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+		val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
+
+		REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
+		REG_RD(bp, BNX2_EMAC_MDIO_MODE);
+
+		udelay(40);
+	}
+
+	return ret;
+}
+
+static void
+bnx2_disable_int(struct bnx2 *bp)
+{
+	int i;
+	struct bnx2_napi *bnapi;
+
+	for (i = 0; i < bp->irq_nvecs; i++) {
+		bnapi = &bp->bnx2_napi[i];
+		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+	}
+	REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
+}
+
+static void
+bnx2_enable_int(struct bnx2 *bp)
+{
+	int i;
+	struct bnx2_napi *bnapi;
+
+	for (i = 0; i < bp->irq_nvecs; i++) {
+		bnapi = &bp->bnx2_napi[i];
+
+		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+		       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+		       BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
+		       bnapi->last_status_idx);
+
+		REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+		       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+		       bnapi->last_status_idx);
+	}
+	REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
+}
+
+static void
+bnx2_disable_int_sync(struct bnx2 *bp)
+{
+	int i;
+
+	atomic_inc(&bp->intr_sem);
+	if (!netif_running(bp->dev))
+		return;
+
+	bnx2_disable_int(bp);
+	for (i = 0; i < bp->irq_nvecs; i++)
+		synchronize_irq(bp->irq_tbl[i].vector);
+}
+
+static void
+bnx2_napi_disable(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->irq_nvecs; i++)
+		napi_disable(&bp->bnx2_napi[i].napi);
+}
+
+static void
+bnx2_napi_enable(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->irq_nvecs; i++)
+		napi_enable(&bp->bnx2_napi[i].napi);
+}
+
+static void
+bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
+{
+	if (stop_cnic)
+		bnx2_cnic_stop(bp);
+	if (netif_running(bp->dev)) {
+		bnx2_napi_disable(bp);
+		netif_tx_disable(bp->dev);
+	}
+	bnx2_disable_int_sync(bp);
+	netif_carrier_off(bp->dev);	/* prevent tx timeout */
+}
+
+static void
+bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
+{
+	if (atomic_dec_and_test(&bp->intr_sem)) {
+		if (netif_running(bp->dev)) {
+			netif_tx_wake_all_queues(bp->dev);
+			spin_lock_bh(&bp->phy_lock);
+			if (bp->link_up)
+				netif_carrier_on(bp->dev);
+			spin_unlock_bh(&bp->phy_lock);
+			bnx2_napi_enable(bp);
+			bnx2_enable_int(bp);
+			if (start_cnic)
+				bnx2_cnic_start(bp);
+		}
+	}
+}
+
+static void
+bnx2_free_tx_mem(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->num_tx_rings; i++) {
+		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+
+		if (txr->tx_desc_ring) {
+			dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
+					  txr->tx_desc_ring,
+					  txr->tx_desc_mapping);
+			txr->tx_desc_ring = NULL;
+		}
+		kfree(txr->tx_buf_ring);
+		txr->tx_buf_ring = NULL;
+	}
+}
+
+static void
+bnx2_free_rx_mem(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->num_rx_rings; i++) {
+		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+		int j;
+
+		for (j = 0; j < bp->rx_max_ring; j++) {
+			if (rxr->rx_desc_ring[j])
+				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
+						  rxr->rx_desc_ring[j],
+						  rxr->rx_desc_mapping[j]);
+			rxr->rx_desc_ring[j] = NULL;
+		}
+		vfree(rxr->rx_buf_ring);
+		rxr->rx_buf_ring = NULL;
+
+		for (j = 0; j < bp->rx_max_pg_ring; j++) {
+			if (rxr->rx_pg_desc_ring[j])
+				dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
+						  rxr->rx_pg_desc_ring[j],
+						  rxr->rx_pg_desc_mapping[j]);
+			rxr->rx_pg_desc_ring[j] = NULL;
+		}
+		vfree(rxr->rx_pg_ring);
+		rxr->rx_pg_ring = NULL;
+	}
+}
+
+static int
+bnx2_alloc_tx_mem(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->num_tx_rings; i++) {
+		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+
+		txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
+		if (txr->tx_buf_ring == NULL)
+			return -ENOMEM;
+
+		txr->tx_desc_ring =
+			dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
+					   &txr->tx_desc_mapping, GFP_KERNEL);
+		if (txr->tx_desc_ring == NULL)
+			return -ENOMEM;
+	}
+	return 0;
+}
+
+static int
+bnx2_alloc_rx_mem(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->num_rx_rings; i++) {
+		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+		int j;
+
+		rxr->rx_buf_ring =
+			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
+		if (rxr->rx_buf_ring == NULL)
+			return -ENOMEM;
+
+		for (j = 0; j < bp->rx_max_ring; j++) {
+			rxr->rx_desc_ring[j] =
+				dma_alloc_coherent(&bp->pdev->dev,
+						   RXBD_RING_SIZE,
+						   &rxr->rx_desc_mapping[j],
+						   GFP_KERNEL);
+			if (rxr->rx_desc_ring[j] == NULL)
+				return -ENOMEM;
+
+		}
+
+		if (bp->rx_pg_ring_size) {
+			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
+						  bp->rx_max_pg_ring);
+			if (rxr->rx_pg_ring == NULL)
+				return -ENOMEM;
+
+		}
+
+		for (j = 0; j < bp->rx_max_pg_ring; j++) {
+			rxr->rx_pg_desc_ring[j] =
+				dma_alloc_coherent(&bp->pdev->dev,
+						   RXBD_RING_SIZE,
+						   &rxr->rx_pg_desc_mapping[j],
+						   GFP_KERNEL);
+			if (rxr->rx_pg_desc_ring[j] == NULL)
+				return -ENOMEM;
+
+		}
+	}
+	return 0;
+}
+
+static void
+bnx2_free_mem(struct bnx2 *bp)
+{
+	int i;
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+
+	bnx2_free_tx_mem(bp);
+	bnx2_free_rx_mem(bp);
+
+	for (i = 0; i < bp->ctx_pages; i++) {
+		if (bp->ctx_blk[i]) {
+			dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
+					  bp->ctx_blk[i],
+					  bp->ctx_blk_mapping[i]);
+			bp->ctx_blk[i] = NULL;
+		}
+	}
+	if (bnapi->status_blk.msi) {
+		dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
+				  bnapi->status_blk.msi,
+				  bp->status_blk_mapping);
+		bnapi->status_blk.msi = NULL;
+		bp->stats_blk = NULL;
+	}
+}
+
+static int
+bnx2_alloc_mem(struct bnx2 *bp)
+{
+	int i, status_blk_size, err;
+	struct bnx2_napi *bnapi;
+	void *status_blk;
+
+	/* Combine status and statistics blocks into one allocation. */
+	status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
+	if (bp->flags & BNX2_FLAG_MSIX_CAP)
+		status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
+						 BNX2_SBLK_MSIX_ALIGN_SIZE);
+	bp->status_stats_size = status_blk_size +
+				sizeof(struct statistics_block);
+
+	status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
+					&bp->status_blk_mapping, GFP_KERNEL);
+	if (status_blk == NULL)
+		goto alloc_mem_err;
+
+	memset(status_blk, 0, bp->status_stats_size);
+
+	bnapi = &bp->bnx2_napi[0];
+	bnapi->status_blk.msi = status_blk;
+	bnapi->hw_tx_cons_ptr =
+		&bnapi->status_blk.msi->status_tx_quick_consumer_index0;
+	bnapi->hw_rx_cons_ptr =
+		&bnapi->status_blk.msi->status_rx_quick_consumer_index0;
+	if (bp->flags & BNX2_FLAG_MSIX_CAP) {
+		for (i = 1; i < bp->irq_nvecs; i++) {
+			struct status_block_msix *sblk;
+
+			bnapi = &bp->bnx2_napi[i];
+
+			sblk = (void *) (status_blk +
+					 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
+			bnapi->status_blk.msix = sblk;
+			bnapi->hw_tx_cons_ptr =
+				&sblk->status_tx_quick_consumer_index;
+			bnapi->hw_rx_cons_ptr =
+				&sblk->status_rx_quick_consumer_index;
+			bnapi->int_num = i << 24;
+		}
+	}
+
+	bp->stats_blk = status_blk + status_blk_size;
+
+	bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
+		if (bp->ctx_pages == 0)
+			bp->ctx_pages = 1;
+		for (i = 0; i < bp->ctx_pages; i++) {
+			bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
+						BCM_PAGE_SIZE,
+						&bp->ctx_blk_mapping[i],
+						GFP_KERNEL);
+			if (bp->ctx_blk[i] == NULL)
+				goto alloc_mem_err;
+		}
+	}
+
+	err = bnx2_alloc_rx_mem(bp);
+	if (err)
+		goto alloc_mem_err;
+
+	err = bnx2_alloc_tx_mem(bp);
+	if (err)
+		goto alloc_mem_err;
+
+	return 0;
+
+alloc_mem_err:
+	bnx2_free_mem(bp);
+	return -ENOMEM;
+}
+
+static void
+bnx2_report_fw_link(struct bnx2 *bp)
+{
+	u32 fw_link_status = 0;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+		return;
+
+	if (bp->link_up) {
+		u32 bmsr;
+
+		switch (bp->line_speed) {
+		case SPEED_10:
+			if (bp->duplex == DUPLEX_HALF)
+				fw_link_status = BNX2_LINK_STATUS_10HALF;
+			else
+				fw_link_status = BNX2_LINK_STATUS_10FULL;
+			break;
+		case SPEED_100:
+			if (bp->duplex == DUPLEX_HALF)
+				fw_link_status = BNX2_LINK_STATUS_100HALF;
+			else
+				fw_link_status = BNX2_LINK_STATUS_100FULL;
+			break;
+		case SPEED_1000:
+			if (bp->duplex == DUPLEX_HALF)
+				fw_link_status = BNX2_LINK_STATUS_1000HALF;
+			else
+				fw_link_status = BNX2_LINK_STATUS_1000FULL;
+			break;
+		case SPEED_2500:
+			if (bp->duplex == DUPLEX_HALF)
+				fw_link_status = BNX2_LINK_STATUS_2500HALF;
+			else
+				fw_link_status = BNX2_LINK_STATUS_2500FULL;
+			break;
+		}
+
+		fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
+
+		if (bp->autoneg) {
+			fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
+
+			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+
+			if (!(bmsr & BMSR_ANEGCOMPLETE) ||
+			    bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
+				fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
+			else
+				fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
+		}
+	}
+	else
+		fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
+
+	bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
+}
+
+static char *
+bnx2_xceiver_str(struct bnx2 *bp)
+{
+	return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
+		((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
+		 "Copper");
+}
+
+static void
+bnx2_report_link(struct bnx2 *bp)
+{
+	if (bp->link_up) {
+		netif_carrier_on(bp->dev);
+		netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
+			    bnx2_xceiver_str(bp),
+			    bp->line_speed,
+			    bp->duplex == DUPLEX_FULL ? "full" : "half");
+
+		if (bp->flow_ctrl) {
+			if (bp->flow_ctrl & FLOW_CTRL_RX) {
+				pr_cont(", receive ");
+				if (bp->flow_ctrl & FLOW_CTRL_TX)
+					pr_cont("& transmit ");
+			}
+			else {
+				pr_cont(", transmit ");
+			}
+			pr_cont("flow control ON");
+		}
+		pr_cont("\n");
+	} else {
+		netif_carrier_off(bp->dev);
+		netdev_err(bp->dev, "NIC %s Link is Down\n",
+			   bnx2_xceiver_str(bp));
+	}
+
+	bnx2_report_fw_link(bp);
+}
+
+static void
+bnx2_resolve_flow_ctrl(struct bnx2 *bp)
+{
+	u32 local_adv, remote_adv;
+
+	bp->flow_ctrl = 0;
+	if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
+		(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
+
+		if (bp->duplex == DUPLEX_FULL) {
+			bp->flow_ctrl = bp->req_flow_ctrl;
+		}
+		return;
+	}
+
+	if (bp->duplex != DUPLEX_FULL) {
+		return;
+	}
+
+	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+	    (CHIP_NUM(bp) == CHIP_NUM_5708)) {
+		u32 val;
+
+		bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
+		if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
+			bp->flow_ctrl |= FLOW_CTRL_TX;
+		if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
+			bp->flow_ctrl |= FLOW_CTRL_RX;
+		return;
+	}
+
+	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
+	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+		u32 new_local_adv = 0;
+		u32 new_remote_adv = 0;
+
+		if (local_adv & ADVERTISE_1000XPAUSE)
+			new_local_adv |= ADVERTISE_PAUSE_CAP;
+		if (local_adv & ADVERTISE_1000XPSE_ASYM)
+			new_local_adv |= ADVERTISE_PAUSE_ASYM;
+		if (remote_adv & ADVERTISE_1000XPAUSE)
+			new_remote_adv |= ADVERTISE_PAUSE_CAP;
+		if (remote_adv & ADVERTISE_1000XPSE_ASYM)
+			new_remote_adv |= ADVERTISE_PAUSE_ASYM;
+
+		local_adv = new_local_adv;
+		remote_adv = new_remote_adv;
+	}
+
+	/* See Table 28B-3 of 802.3ab-1999 spec. */
+	if (local_adv & ADVERTISE_PAUSE_CAP) {
+		if(local_adv & ADVERTISE_PAUSE_ASYM) {
+	                if (remote_adv & ADVERTISE_PAUSE_CAP) {
+				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
+			}
+			else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
+				bp->flow_ctrl = FLOW_CTRL_RX;
+			}
+		}
+		else {
+			if (remote_adv & ADVERTISE_PAUSE_CAP) {
+				bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
+			}
+		}
+	}
+	else if (local_adv & ADVERTISE_PAUSE_ASYM) {
+		if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
+			(remote_adv & ADVERTISE_PAUSE_ASYM)) {
+
+			bp->flow_ctrl = FLOW_CTRL_TX;
+		}
+	}
+}
+
+static int
+bnx2_5709s_linkup(struct bnx2 *bp)
+{
+	u32 val, speed;
+
+	bp->link_up = 1;
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
+	bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+
+	if ((bp->autoneg & AUTONEG_SPEED) == 0) {
+		bp->line_speed = bp->req_line_speed;
+		bp->duplex = bp->req_duplex;
+		return 0;
+	}
+	speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
+	switch (speed) {
+		case MII_BNX2_GP_TOP_AN_SPEED_10:
+			bp->line_speed = SPEED_10;
+			break;
+		case MII_BNX2_GP_TOP_AN_SPEED_100:
+			bp->line_speed = SPEED_100;
+			break;
+		case MII_BNX2_GP_TOP_AN_SPEED_1G:
+		case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
+			bp->line_speed = SPEED_1000;
+			break;
+		case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
+			bp->line_speed = SPEED_2500;
+			break;
+	}
+	if (val & MII_BNX2_GP_TOP_AN_FD)
+		bp->duplex = DUPLEX_FULL;
+	else
+		bp->duplex = DUPLEX_HALF;
+	return 0;
+}
+
+static int
+bnx2_5708s_linkup(struct bnx2 *bp)
+{
+	u32 val;
+
+	bp->link_up = 1;
+	bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
+	switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
+		case BCM5708S_1000X_STAT1_SPEED_10:
+			bp->line_speed = SPEED_10;
+			break;
+		case BCM5708S_1000X_STAT1_SPEED_100:
+			bp->line_speed = SPEED_100;
+			break;
+		case BCM5708S_1000X_STAT1_SPEED_1G:
+			bp->line_speed = SPEED_1000;
+			break;
+		case BCM5708S_1000X_STAT1_SPEED_2G5:
+			bp->line_speed = SPEED_2500;
+			break;
+	}
+	if (val & BCM5708S_1000X_STAT1_FD)
+		bp->duplex = DUPLEX_FULL;
+	else
+		bp->duplex = DUPLEX_HALF;
+
+	return 0;
+}
+
+static int
+bnx2_5706s_linkup(struct bnx2 *bp)
+{
+	u32 bmcr, local_adv, remote_adv, common;
+
+	bp->link_up = 1;
+	bp->line_speed = SPEED_1000;
+
+	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+	if (bmcr & BMCR_FULLDPLX) {
+		bp->duplex = DUPLEX_FULL;
+	}
+	else {
+		bp->duplex = DUPLEX_HALF;
+	}
+
+	if (!(bmcr & BMCR_ANENABLE)) {
+		return 0;
+	}
+
+	bnx2_read_phy(bp, bp->mii_adv, &local_adv);
+	bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
+
+	common = local_adv & remote_adv;
+	if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
+
+		if (common & ADVERTISE_1000XFULL) {
+			bp->duplex = DUPLEX_FULL;
+		}
+		else {
+			bp->duplex = DUPLEX_HALF;
+		}
+	}
+
+	return 0;
+}
+
+static int
+bnx2_copper_linkup(struct bnx2 *bp)
+{
+	u32 bmcr;
+
+	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+	if (bmcr & BMCR_ANENABLE) {
+		u32 local_adv, remote_adv, common;
+
+		bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
+		bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
+
+		common = local_adv & (remote_adv >> 2);
+		if (common & ADVERTISE_1000FULL) {
+			bp->line_speed = SPEED_1000;
+			bp->duplex = DUPLEX_FULL;
+		}
+		else if (common & ADVERTISE_1000HALF) {
+			bp->line_speed = SPEED_1000;
+			bp->duplex = DUPLEX_HALF;
+		}
+		else {
+			bnx2_read_phy(bp, bp->mii_adv, &local_adv);
+			bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
+
+			common = local_adv & remote_adv;
+			if (common & ADVERTISE_100FULL) {
+				bp->line_speed = SPEED_100;
+				bp->duplex = DUPLEX_FULL;
+			}
+			else if (common & ADVERTISE_100HALF) {
+				bp->line_speed = SPEED_100;
+				bp->duplex = DUPLEX_HALF;
+			}
+			else if (common & ADVERTISE_10FULL) {
+				bp->line_speed = SPEED_10;
+				bp->duplex = DUPLEX_FULL;
+			}
+			else if (common & ADVERTISE_10HALF) {
+				bp->line_speed = SPEED_10;
+				bp->duplex = DUPLEX_HALF;
+			}
+			else {
+				bp->line_speed = 0;
+				bp->link_up = 0;
+			}
+		}
+	}
+	else {
+		if (bmcr & BMCR_SPEED100) {
+			bp->line_speed = SPEED_100;
+		}
+		else {
+			bp->line_speed = SPEED_10;
+		}
+		if (bmcr & BMCR_FULLDPLX) {
+			bp->duplex = DUPLEX_FULL;
+		}
+		else {
+			bp->duplex = DUPLEX_HALF;
+		}
+	}
+
+	return 0;
+}
+
+static void
+bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
+{
+	u32 val, rx_cid_addr = GET_CID_ADDR(cid);
+
+	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
+	val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
+	val |= 0x02 << 8;
+
+	if (bp->flow_ctrl & FLOW_CTRL_TX)
+		val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
+
+	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
+}
+
+static void
+bnx2_init_all_rx_contexts(struct bnx2 *bp)
+{
+	int i;
+	u32 cid;
+
+	for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
+		if (i == 1)
+			cid = RX_RSS_CID;
+		bnx2_init_rx_context(bp, cid);
+	}
+}
+
+static void
+bnx2_set_mac_link(struct bnx2 *bp)
+{
+	u32 val;
+
+	REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
+	if (bp->link_up && (bp->line_speed == SPEED_1000) &&
+		(bp->duplex == DUPLEX_HALF)) {
+		REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
+	}
+
+	/* Configure the EMAC mode register. */
+	val = REG_RD(bp, BNX2_EMAC_MODE);
+
+	val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
+		BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
+		BNX2_EMAC_MODE_25G_MODE);
+
+	if (bp->link_up) {
+		switch (bp->line_speed) {
+			case SPEED_10:
+				if (CHIP_NUM(bp) != CHIP_NUM_5706) {
+					val |= BNX2_EMAC_MODE_PORT_MII_10M;
+					break;
+				}
+				/* fall through */
+			case SPEED_100:
+				val |= BNX2_EMAC_MODE_PORT_MII;
+				break;
+			case SPEED_2500:
+				val |= BNX2_EMAC_MODE_25G_MODE;
+				/* fall through */
+			case SPEED_1000:
+				val |= BNX2_EMAC_MODE_PORT_GMII;
+				break;
+		}
+	}
+	else {
+		val |= BNX2_EMAC_MODE_PORT_GMII;
+	}
+
+	/* Set the MAC to operate in the appropriate duplex mode. */
+	if (bp->duplex == DUPLEX_HALF)
+		val |= BNX2_EMAC_MODE_HALF_DUPLEX;
+	REG_WR(bp, BNX2_EMAC_MODE, val);
+
+	/* Enable/disable rx PAUSE. */
+	bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
+
+	if (bp->flow_ctrl & FLOW_CTRL_RX)
+		bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
+	REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
+
+	/* Enable/disable tx PAUSE. */
+	val = REG_RD(bp, BNX2_EMAC_TX_MODE);
+	val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
+
+	if (bp->flow_ctrl & FLOW_CTRL_TX)
+		val |= BNX2_EMAC_TX_MODE_FLOW_EN;
+	REG_WR(bp, BNX2_EMAC_TX_MODE, val);
+
+	/* Acknowledge the interrupt. */
+	REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
+
+	bnx2_init_all_rx_contexts(bp);
+}
+
+static void
+bnx2_enable_bmsr1(struct bnx2 *bp)
+{
+	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+	    (CHIP_NUM(bp) == CHIP_NUM_5709))
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_GP_STATUS);
+}
+
+static void
+bnx2_disable_bmsr1(struct bnx2 *bp)
+{
+	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+	    (CHIP_NUM(bp) == CHIP_NUM_5709))
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+}
+
+static int
+bnx2_test_and_enable_2g5(struct bnx2 *bp)
+{
+	u32 up1;
+	int ret = 1;
+
+	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
+		return 0;
+
+	if (bp->autoneg & AUTONEG_SPEED)
+		bp->advertising |= ADVERTISED_2500baseX_Full;
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709)
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
+
+	bnx2_read_phy(bp, bp->mii_up1, &up1);
+	if (!(up1 & BCM5708S_UP1_2G5)) {
+		up1 |= BCM5708S_UP1_2G5;
+		bnx2_write_phy(bp, bp->mii_up1, up1);
+		ret = 0;
+	}
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709)
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+
+	return ret;
+}
+
+static int
+bnx2_test_and_disable_2g5(struct bnx2 *bp)
+{
+	u32 up1;
+	int ret = 0;
+
+	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
+		return 0;
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709)
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
+
+	bnx2_read_phy(bp, bp->mii_up1, &up1);
+	if (up1 & BCM5708S_UP1_2G5) {
+		up1 &= ~BCM5708S_UP1_2G5;
+		bnx2_write_phy(bp, bp->mii_up1, up1);
+		ret = 1;
+	}
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709)
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+
+	return ret;
+}
+
+static void
+bnx2_enable_forced_2g5(struct bnx2 *bp)
+{
+	u32 uninitialized_var(bmcr);
+	int err;
+
+	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
+		return;
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		u32 val;
+
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_SERDES_DIG);
+		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
+			val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
+			val |= MII_BNX2_SD_MISC1_FORCE |
+				MII_BNX2_SD_MISC1_FORCE_2_5G;
+			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+		}
+
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+
+	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		if (!err)
+			bmcr |= BCM5708S_BMCR_FORCE_2500;
+	} else {
+		return;
+	}
+
+	if (err)
+		return;
+
+	if (bp->autoneg & AUTONEG_SPEED) {
+		bmcr &= ~BMCR_ANENABLE;
+		if (bp->req_duplex == DUPLEX_FULL)
+			bmcr |= BMCR_FULLDPLX;
+	}
+	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
+}
+
+static void
+bnx2_disable_forced_2g5(struct bnx2 *bp)
+{
+	u32 uninitialized_var(bmcr);
+	int err;
+
+	if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
+		return;
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		u32 val;
+
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_SERDES_DIG);
+		if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
+			val &= ~MII_BNX2_SD_MISC1_FORCE;
+			bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
+		}
+
+		bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
+			       MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+
+	} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+		err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		if (!err)
+			bmcr &= ~BCM5708S_BMCR_FORCE_2500;
+	} else {
+		return;
+	}
+
+	if (err)
+		return;
+
+	if (bp->autoneg & AUTONEG_SPEED)
+		bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
+	bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
+}
+
+static void
+bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
+{
+	u32 val;
+
+	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
+	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
+	if (start)
+		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
+	else
+		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
+}
+
+static int
+bnx2_set_link(struct bnx2 *bp)
+{
+	u32 bmsr;
+	u8 link_up;
+
+	if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
+		bp->link_up = 1;
+		return 0;
+	}
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+		return 0;
+
+	link_up = bp->link_up;
+
+	bnx2_enable_bmsr1(bp);
+	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
+	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
+	bnx2_disable_bmsr1(bp);
+
+	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+	    (CHIP_NUM(bp) == CHIP_NUM_5706)) {
+		u32 val, an_dbg;
+
+		if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
+			bnx2_5706s_force_link_dn(bp, 0);
+			bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
+		}
+		val = REG_RD(bp, BNX2_EMAC_STATUS);
+
+		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
+		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
+		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
+
+		if ((val & BNX2_EMAC_STATUS_LINK) &&
+		    !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
+			bmsr |= BMSR_LSTATUS;
+		else
+			bmsr &= ~BMSR_LSTATUS;
+	}
+
+	if (bmsr & BMSR_LSTATUS) {
+		bp->link_up = 1;
+
+		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+			if (CHIP_NUM(bp) == CHIP_NUM_5706)
+				bnx2_5706s_linkup(bp);
+			else if (CHIP_NUM(bp) == CHIP_NUM_5708)
+				bnx2_5708s_linkup(bp);
+			else if (CHIP_NUM(bp) == CHIP_NUM_5709)
+				bnx2_5709s_linkup(bp);
+		}
+		else {
+			bnx2_copper_linkup(bp);
+		}
+		bnx2_resolve_flow_ctrl(bp);
+	}
+	else {
+		if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+		    (bp->autoneg & AUTONEG_SPEED))
+			bnx2_disable_forced_2g5(bp);
+
+		if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
+			u32 bmcr;
+
+			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+			bmcr |= BMCR_ANENABLE;
+			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
+
+			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
+		}
+		bp->link_up = 0;
+	}
+
+	if (bp->link_up != link_up) {
+		bnx2_report_link(bp);
+	}
+
+	bnx2_set_mac_link(bp);
+
+	return 0;
+}
+
+static int
+bnx2_reset_phy(struct bnx2 *bp)
+{
+	int i;
+	u32 reg;
+
+        bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
+
+#define PHY_RESET_MAX_WAIT 100
+	for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
+		udelay(10);
+
+		bnx2_read_phy(bp, bp->mii_bmcr, &reg);
+		if (!(reg & BMCR_RESET)) {
+			udelay(20);
+			break;
+		}
+	}
+	if (i == PHY_RESET_MAX_WAIT) {
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static u32
+bnx2_phy_get_pause_adv(struct bnx2 *bp)
+{
+	u32 adv = 0;
+
+	if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
+		(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
+
+		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+			adv = ADVERTISE_1000XPAUSE;
+		}
+		else {
+			adv = ADVERTISE_PAUSE_CAP;
+		}
+	}
+	else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
+		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+			adv = ADVERTISE_1000XPSE_ASYM;
+		}
+		else {
+			adv = ADVERTISE_PAUSE_ASYM;
+		}
+	}
+	else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
+		if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+			adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
+		}
+		else {
+			adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+		}
+	}
+	return adv;
+}
+
+static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
+
+static int
+bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
+{
+	u32 speed_arg = 0, pause_adv;
+
+	pause_adv = bnx2_phy_get_pause_adv(bp);
+
+	if (bp->autoneg & AUTONEG_SPEED) {
+		speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
+		if (bp->advertising & ADVERTISED_10baseT_Half)
+			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
+		if (bp->advertising & ADVERTISED_10baseT_Full)
+			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
+		if (bp->advertising & ADVERTISED_100baseT_Half)
+			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
+		if (bp->advertising & ADVERTISED_100baseT_Full)
+			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
+		if (bp->advertising & ADVERTISED_1000baseT_Full)
+			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
+		if (bp->advertising & ADVERTISED_2500baseX_Full)
+			speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
+	} else {
+		if (bp->req_line_speed == SPEED_2500)
+			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
+		else if (bp->req_line_speed == SPEED_1000)
+			speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
+		else if (bp->req_line_speed == SPEED_100) {
+			if (bp->req_duplex == DUPLEX_FULL)
+				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
+			else
+				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
+		} else if (bp->req_line_speed == SPEED_10) {
+			if (bp->req_duplex == DUPLEX_FULL)
+				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
+			else
+				speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
+		}
+	}
+
+	if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
+		speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
+	if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
+		speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
+
+	if (port == PORT_TP)
+		speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
+			     BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
+
+	bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
+
+	spin_unlock_bh(&bp->phy_lock);
+	bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
+	spin_lock_bh(&bp->phy_lock);
+
+	return 0;
+}
+
+static int
+bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
+{
+	u32 adv, bmcr;
+	u32 new_adv = 0;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+		return bnx2_setup_remote_phy(bp, port);
+
+	if (!(bp->autoneg & AUTONEG_SPEED)) {
+		u32 new_bmcr;
+		int force_link_down = 0;
+
+		if (bp->req_line_speed == SPEED_2500) {
+			if (!bnx2_test_and_enable_2g5(bp))
+				force_link_down = 1;
+		} else if (bp->req_line_speed == SPEED_1000) {
+			if (bnx2_test_and_disable_2g5(bp))
+				force_link_down = 1;
+		}
+		bnx2_read_phy(bp, bp->mii_adv, &adv);
+		adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
+
+		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		new_bmcr = bmcr & ~BMCR_ANENABLE;
+		new_bmcr |= BMCR_SPEED1000;
+
+		if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+			if (bp->req_line_speed == SPEED_2500)
+				bnx2_enable_forced_2g5(bp);
+			else if (bp->req_line_speed == SPEED_1000) {
+				bnx2_disable_forced_2g5(bp);
+				new_bmcr &= ~0x2000;
+			}
+
+		} else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
+			if (bp->req_line_speed == SPEED_2500)
+				new_bmcr |= BCM5708S_BMCR_FORCE_2500;
+			else
+				new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
+		}
+
+		if (bp->req_duplex == DUPLEX_FULL) {
+			adv |= ADVERTISE_1000XFULL;
+			new_bmcr |= BMCR_FULLDPLX;
+		}
+		else {
+			adv |= ADVERTISE_1000XHALF;
+			new_bmcr &= ~BMCR_FULLDPLX;
+		}
+		if ((new_bmcr != bmcr) || (force_link_down)) {
+			/* Force a link down visible on the other side */
+			if (bp->link_up) {
+				bnx2_write_phy(bp, bp->mii_adv, adv &
+					       ~(ADVERTISE_1000XFULL |
+						 ADVERTISE_1000XHALF));
+				bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
+					BMCR_ANRESTART | BMCR_ANENABLE);
+
+				bp->link_up = 0;
+				netif_carrier_off(bp->dev);
+				bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
+				bnx2_report_link(bp);
+			}
+			bnx2_write_phy(bp, bp->mii_adv, adv);
+			bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
+		} else {
+			bnx2_resolve_flow_ctrl(bp);
+			bnx2_set_mac_link(bp);
+		}
+		return 0;
+	}
+
+	bnx2_test_and_enable_2g5(bp);
+
+	if (bp->advertising & ADVERTISED_1000baseT_Full)
+		new_adv |= ADVERTISE_1000XFULL;
+
+	new_adv |= bnx2_phy_get_pause_adv(bp);
+
+	bnx2_read_phy(bp, bp->mii_adv, &adv);
+	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+
+	bp->serdes_an_pending = 0;
+	if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
+		/* Force a link down visible on the other side */
+		if (bp->link_up) {
+			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
+			spin_unlock_bh(&bp->phy_lock);
+			msleep(20);
+			spin_lock_bh(&bp->phy_lock);
+		}
+
+		bnx2_write_phy(bp, bp->mii_adv, new_adv);
+		bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
+			BMCR_ANENABLE);
+		/* Speed up link-up time when the link partner
+		 * does not autonegotiate which is very common
+		 * in blade servers. Some blade servers use
+		 * IPMI for kerboard input and it's important
+		 * to minimize link disruptions. Autoneg. involves
+		 * exchanging base pages plus 3 next pages and
+		 * normally completes in about 120 msec.
+		 */
+		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
+		bp->serdes_an_pending = 1;
+		mod_timer(&bp->timer, jiffies + bp->current_interval);
+	} else {
+		bnx2_resolve_flow_ctrl(bp);
+		bnx2_set_mac_link(bp);
+	}
+
+	return 0;
+}
+
+#define ETHTOOL_ALL_FIBRE_SPEED						\
+	(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?			\
+		(ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
+		(ADVERTISED_1000baseT_Full)
+
+#define ETHTOOL_ALL_COPPER_SPEED					\
+	(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |		\
+	ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |		\
+	ADVERTISED_1000baseT_Full)
+
+#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
+	ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
+
+#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
+
+static void
+bnx2_set_default_remote_link(struct bnx2 *bp)
+{
+	u32 link;
+
+	if (bp->phy_port == PORT_TP)
+		link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
+	else
+		link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
+
+	if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
+		bp->req_line_speed = 0;
+		bp->autoneg |= AUTONEG_SPEED;
+		bp->advertising = ADVERTISED_Autoneg;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
+			bp->advertising |= ADVERTISED_10baseT_Half;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
+			bp->advertising |= ADVERTISED_10baseT_Full;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
+			bp->advertising |= ADVERTISED_100baseT_Half;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
+			bp->advertising |= ADVERTISED_100baseT_Full;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
+			bp->advertising |= ADVERTISED_1000baseT_Full;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
+			bp->advertising |= ADVERTISED_2500baseX_Full;
+	} else {
+		bp->autoneg = 0;
+		bp->advertising = 0;
+		bp->req_duplex = DUPLEX_FULL;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
+			bp->req_line_speed = SPEED_10;
+			if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
+				bp->req_duplex = DUPLEX_HALF;
+		}
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
+			bp->req_line_speed = SPEED_100;
+			if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
+				bp->req_duplex = DUPLEX_HALF;
+		}
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
+			bp->req_line_speed = SPEED_1000;
+		if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
+			bp->req_line_speed = SPEED_2500;
+	}
+}
+
+static void
+bnx2_set_default_link(struct bnx2 *bp)
+{
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
+		bnx2_set_default_remote_link(bp);
+		return;
+	}
+
+	bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
+	bp->req_line_speed = 0;
+	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+		u32 reg;
+
+		bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
+
+		reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
+		reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
+		if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
+			bp->autoneg = 0;
+			bp->req_line_speed = bp->line_speed = SPEED_1000;
+			bp->req_duplex = DUPLEX_FULL;
+		}
+	} else
+		bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
+}
+
+static void
+bnx2_send_heart_beat(struct bnx2 *bp)
+{
+	u32 msg;
+	u32 addr;
+
+	spin_lock(&bp->indirect_lock);
+	msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
+	addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
+	REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
+	REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
+	spin_unlock(&bp->indirect_lock);
+}
+
+static void
+bnx2_remote_phy_event(struct bnx2 *bp)
+{
+	u32 msg;
+	u8 link_up = bp->link_up;
+	u8 old_port;
+
+	msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
+
+	if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
+		bnx2_send_heart_beat(bp);
+
+	msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
+
+	if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
+		bp->link_up = 0;
+	else {
+		u32 speed;
+
+		bp->link_up = 1;
+		speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
+		bp->duplex = DUPLEX_FULL;
+		switch (speed) {
+			case BNX2_LINK_STATUS_10HALF:
+				bp->duplex = DUPLEX_HALF;
+			case BNX2_LINK_STATUS_10FULL:
+				bp->line_speed = SPEED_10;
+				break;
+			case BNX2_LINK_STATUS_100HALF:
+				bp->duplex = DUPLEX_HALF;
+			case BNX2_LINK_STATUS_100BASE_T4:
+			case BNX2_LINK_STATUS_100FULL:
+				bp->line_speed = SPEED_100;
+				break;
+			case BNX2_LINK_STATUS_1000HALF:
+				bp->duplex = DUPLEX_HALF;
+			case BNX2_LINK_STATUS_1000FULL:
+				bp->line_speed = SPEED_1000;
+				break;
+			case BNX2_LINK_STATUS_2500HALF:
+				bp->duplex = DUPLEX_HALF;
+			case BNX2_LINK_STATUS_2500FULL:
+				bp->line_speed = SPEED_2500;
+				break;
+			default:
+				bp->line_speed = 0;
+				break;
+		}
+
+		bp->flow_ctrl = 0;
+		if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
+		    (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
+			if (bp->duplex == DUPLEX_FULL)
+				bp->flow_ctrl = bp->req_flow_ctrl;
+		} else {
+			if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
+				bp->flow_ctrl |= FLOW_CTRL_TX;
+			if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
+				bp->flow_ctrl |= FLOW_CTRL_RX;
+		}
+
+		old_port = bp->phy_port;
+		if (msg & BNX2_LINK_STATUS_SERDES_LINK)
+			bp->phy_port = PORT_FIBRE;
+		else
+			bp->phy_port = PORT_TP;
+
+		if (old_port != bp->phy_port)
+			bnx2_set_default_link(bp);
+
+	}
+	if (bp->link_up != link_up)
+		bnx2_report_link(bp);
+
+	bnx2_set_mac_link(bp);
+}
+
+static int
+bnx2_set_remote_link(struct bnx2 *bp)
+{
+	u32 evt_code;
+
+	evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
+	switch (evt_code) {
+		case BNX2_FW_EVT_CODE_LINK_EVENT:
+			bnx2_remote_phy_event(bp);
+			break;
+		case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
+		default:
+			bnx2_send_heart_beat(bp);
+			break;
+	}
+	return 0;
+}
+
+static int
+bnx2_setup_copper_phy(struct bnx2 *bp)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
+{
+	u32 bmcr;
+	u32 new_bmcr;
+
+	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+
+	if (bp->autoneg & AUTONEG_SPEED) {
+		u32 adv_reg, adv1000_reg;
+		u32 new_adv_reg = 0;
+		u32 new_adv1000_reg = 0;
+
+		bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
+		adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
+			ADVERTISE_PAUSE_ASYM);
+
+		bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
+		adv1000_reg &= PHY_ALL_1000_SPEED;
+
+		if (bp->advertising & ADVERTISED_10baseT_Half)
+			new_adv_reg |= ADVERTISE_10HALF;
+		if (bp->advertising & ADVERTISED_10baseT_Full)
+			new_adv_reg |= ADVERTISE_10FULL;
+		if (bp->advertising & ADVERTISED_100baseT_Half)
+			new_adv_reg |= ADVERTISE_100HALF;
+		if (bp->advertising & ADVERTISED_100baseT_Full)
+			new_adv_reg |= ADVERTISE_100FULL;
+		if (bp->advertising & ADVERTISED_1000baseT_Full)
+			new_adv1000_reg |= ADVERTISE_1000FULL;
+
+		new_adv_reg |= ADVERTISE_CSMA;
+
+		new_adv_reg |= bnx2_phy_get_pause_adv(bp);
+
+		if ((adv1000_reg != new_adv1000_reg) ||
+			(adv_reg != new_adv_reg) ||
+			((bmcr & BMCR_ANENABLE) == 0)) {
+
+			bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
+			bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
+			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
+				BMCR_ANENABLE);
+		}
+		else if (bp->link_up) {
+			/* Flow ctrl may have changed from auto to forced */
+			/* or vice-versa. */
+
+			bnx2_resolve_flow_ctrl(bp);
+			bnx2_set_mac_link(bp);
+		}
+		return 0;
+	}
+
+	new_bmcr = 0;
+	if (bp->req_line_speed == SPEED_100) {
+		new_bmcr |= BMCR_SPEED100;
+	}
+	if (bp->req_duplex == DUPLEX_FULL) {
+		new_bmcr |= BMCR_FULLDPLX;
+	}
+	if (new_bmcr != bmcr) {
+		u32 bmsr;
+
+		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+		bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+
+		if (bmsr & BMSR_LSTATUS) {
+			/* Force link down */
+			bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
+			spin_unlock_bh(&bp->phy_lock);
+			msleep(50);
+			spin_lock_bh(&bp->phy_lock);
+
+			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+			bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
+		}
+
+		bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
+
+		/* Normally, the new speed is setup after the link has
+		 * gone down and up again. In some cases, link will not go
+		 * down so we need to set up the new speed here.
+		 */
+		if (bmsr & BMSR_LSTATUS) {
+			bp->line_speed = bp->req_line_speed;
+			bp->duplex = bp->req_duplex;
+			bnx2_resolve_flow_ctrl(bp);
+			bnx2_set_mac_link(bp);
+		}
+	} else {
+		bnx2_resolve_flow_ctrl(bp);
+		bnx2_set_mac_link(bp);
+	}
+	return 0;
+}
+
+static int
+bnx2_setup_phy(struct bnx2 *bp, u8 port)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
+{
+	if (bp->loopback == MAC_LOOPBACK)
+		return 0;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+		return bnx2_setup_serdes_phy(bp, port);
+	}
+	else {
+		return bnx2_setup_copper_phy(bp);
+	}
+}
+
+static int
+bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
+{
+	u32 val;
+
+	bp->mii_bmcr = MII_BMCR + 0x10;
+	bp->mii_bmsr = MII_BMSR + 0x10;
+	bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
+	bp->mii_adv = MII_ADVERTISE + 0x10;
+	bp->mii_lpa = MII_LPA + 0x10;
+	bp->mii_up1 = MII_BNX2_OVER1G_UP1;
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
+	bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+	if (reset_phy)
+		bnx2_reset_phy(bp);
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
+
+	bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
+	val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
+	val |= MII_BNX2_SD_1000XCTL1_FIBER;
+	bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
+	bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
+	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
+		val |= BCM5708S_UP1_2G5;
+	else
+		val &= ~BCM5708S_UP1_2G5;
+	bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
+	bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
+	val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
+	bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
+
+	val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
+	      MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
+	bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
+
+	bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
+
+	return 0;
+}
+
+static int
+bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
+{
+	u32 val;
+
+	if (reset_phy)
+		bnx2_reset_phy(bp);
+
+	bp->mii_up1 = BCM5708S_UP1;
+
+	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
+	bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
+	bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
+
+	bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
+	val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
+	bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
+
+	bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
+	val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
+	bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
+		bnx2_read_phy(bp, BCM5708S_UP1, &val);
+		val |= BCM5708S_UP1_2G5;
+		bnx2_write_phy(bp, BCM5708S_UP1, val);
+	}
+
+	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
+	    (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
+	    (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
+		/* increase tx signal amplitude */
+		bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
+			       BCM5708S_BLK_ADDR_TX_MISC);
+		bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
+		val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
+		bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
+		bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
+	}
+
+	val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
+	      BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
+
+	if (val) {
+		u32 is_backplane;
+
+		is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
+		if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
+			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
+				       BCM5708S_BLK_ADDR_TX_MISC);
+			bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
+			bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
+				       BCM5708S_BLK_ADDR_DIG);
+		}
+	}
+	return 0;
+}
+
+static int
+bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
+{
+	if (reset_phy)
+		bnx2_reset_phy(bp);
+
+	bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5706)
+        	REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
+
+	if (bp->dev->mtu > 1500) {
+		u32 val;
+
+		/* Set extended packet length bit */
+		bnx2_write_phy(bp, 0x18, 0x7);
+		bnx2_read_phy(bp, 0x18, &val);
+		bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
+
+		bnx2_write_phy(bp, 0x1c, 0x6c00);
+		bnx2_read_phy(bp, 0x1c, &val);
+		bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
+	}
+	else {
+		u32 val;
+
+		bnx2_write_phy(bp, 0x18, 0x7);
+		bnx2_read_phy(bp, 0x18, &val);
+		bnx2_write_phy(bp, 0x18, val & ~0x4007);
+
+		bnx2_write_phy(bp, 0x1c, 0x6c00);
+		bnx2_read_phy(bp, 0x1c, &val);
+		bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
+	}
+
+	return 0;
+}
+
+static int
+bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
+{
+	u32 val;
+
+	if (reset_phy)
+		bnx2_reset_phy(bp);
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
+		bnx2_write_phy(bp, 0x18, 0x0c00);
+		bnx2_write_phy(bp, 0x17, 0x000a);
+		bnx2_write_phy(bp, 0x15, 0x310b);
+		bnx2_write_phy(bp, 0x17, 0x201f);
+		bnx2_write_phy(bp, 0x15, 0x9506);
+		bnx2_write_phy(bp, 0x17, 0x401f);
+		bnx2_write_phy(bp, 0x15, 0x14e2);
+		bnx2_write_phy(bp, 0x18, 0x0400);
+	}
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
+		bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
+			       MII_BNX2_DSP_EXPAND_REG | 0x8);
+		bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
+		val &= ~(1 << 8);
+		bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
+	}
+
+	if (bp->dev->mtu > 1500) {
+		/* Set extended packet length bit */
+		bnx2_write_phy(bp, 0x18, 0x7);
+		bnx2_read_phy(bp, 0x18, &val);
+		bnx2_write_phy(bp, 0x18, val | 0x4000);
+
+		bnx2_read_phy(bp, 0x10, &val);
+		bnx2_write_phy(bp, 0x10, val | 0x1);
+	}
+	else {
+		bnx2_write_phy(bp, 0x18, 0x7);
+		bnx2_read_phy(bp, 0x18, &val);
+		bnx2_write_phy(bp, 0x18, val & ~0x4007);
+
+		bnx2_read_phy(bp, 0x10, &val);
+		bnx2_write_phy(bp, 0x10, val & ~0x1);
+	}
+
+	/* ethernet@wirespeed */
+	bnx2_write_phy(bp, 0x18, 0x7007);
+	bnx2_read_phy(bp, 0x18, &val);
+	bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
+	return 0;
+}
+
+
+static int
+bnx2_init_phy(struct bnx2 *bp, int reset_phy)
+__releases(&bp->phy_lock)
+__acquires(&bp->phy_lock)
+{
+	u32 val;
+	int rc = 0;
+
+	bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
+	bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
+
+	bp->mii_bmcr = MII_BMCR;
+	bp->mii_bmsr = MII_BMSR;
+	bp->mii_bmsr1 = MII_BMSR;
+	bp->mii_adv = MII_ADVERTISE;
+	bp->mii_lpa = MII_LPA;
+
+        REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+		goto setup_phy;
+
+	bnx2_read_phy(bp, MII_PHYSID1, &val);
+	bp->phy_id = val << 16;
+	bnx2_read_phy(bp, MII_PHYSID2, &val);
+	bp->phy_id |= val & 0xffff;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+		if (CHIP_NUM(bp) == CHIP_NUM_5706)
+			rc = bnx2_init_5706s_phy(bp, reset_phy);
+		else if (CHIP_NUM(bp) == CHIP_NUM_5708)
+			rc = bnx2_init_5708s_phy(bp, reset_phy);
+		else if (CHIP_NUM(bp) == CHIP_NUM_5709)
+			rc = bnx2_init_5709s_phy(bp, reset_phy);
+	}
+	else {
+		rc = bnx2_init_copper_phy(bp, reset_phy);
+	}
+
+setup_phy:
+	if (!rc)
+		rc = bnx2_setup_phy(bp, bp->phy_port);
+
+	return rc;
+}
+
+static int
+bnx2_set_mac_loopback(struct bnx2 *bp)
+{
+	u32 mac_mode;
+
+	mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
+	mac_mode &= ~BNX2_EMAC_MODE_PORT;
+	mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
+	REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
+	bp->link_up = 1;
+	return 0;
+}
+
+static int bnx2_test_link(struct bnx2 *);
+
+static int
+bnx2_set_phy_loopback(struct bnx2 *bp)
+{
+	u32 mac_mode;
+	int rc, i;
+
+	spin_lock_bh(&bp->phy_lock);
+	rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
+			    BMCR_SPEED1000);
+	spin_unlock_bh(&bp->phy_lock);
+	if (rc)
+		return rc;
+
+	for (i = 0; i < 10; i++) {
+		if (bnx2_test_link(bp) == 0)
+			break;
+		msleep(100);
+	}
+
+	mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
+	mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
+		      BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
+		      BNX2_EMAC_MODE_25G_MODE);
+
+	mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
+	REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
+	bp->link_up = 1;
+	return 0;
+}
+
+static void
+bnx2_dump_mcp_state(struct bnx2 *bp)
+{
+	struct net_device *dev = bp->dev;
+	u32 mcp_p0, mcp_p1;
+
+	netdev_err(dev, "<--- start MCP states dump --->\n");
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		mcp_p0 = BNX2_MCP_STATE_P0;
+		mcp_p1 = BNX2_MCP_STATE_P1;
+	} else {
+		mcp_p0 = BNX2_MCP_STATE_P0_5708;
+		mcp_p1 = BNX2_MCP_STATE_P1_5708;
+	}
+	netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
+		   bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
+	netdev_err(dev, "DEBUG: MCP mode[%08x] state[%08x] evt_mask[%08x]\n",
+		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
+		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
+		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
+	netdev_err(dev, "DEBUG: pc[%08x] pc[%08x] instr[%08x]\n",
+		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
+		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
+		   bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
+	netdev_err(dev, "DEBUG: shmem states:\n");
+	netdev_err(dev, "DEBUG: drv_mb[%08x] fw_mb[%08x] link_status[%08x]",
+		   bnx2_shmem_rd(bp, BNX2_DRV_MB),
+		   bnx2_shmem_rd(bp, BNX2_FW_MB),
+		   bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
+	pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
+	netdev_err(dev, "DEBUG: dev_info_signature[%08x] reset_type[%08x]",
+		   bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
+		   bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
+	pr_cont(" condition[%08x]\n",
+		bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
+	DP_SHMEM_LINE(bp, 0x3cc);
+	DP_SHMEM_LINE(bp, 0x3dc);
+	DP_SHMEM_LINE(bp, 0x3ec);
+	netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
+	netdev_err(dev, "<--- end MCP states dump --->\n");
+}
+
+static int
+bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
+{
+	int i;
+	u32 val;
+
+	bp->fw_wr_seq++;
+	msg_data |= bp->fw_wr_seq;
+
+	bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
+
+	if (!ack)
+		return 0;
+
+	/* wait for an acknowledgement. */
+	for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
+		msleep(10);
+
+		val = bnx2_shmem_rd(bp, BNX2_FW_MB);
+
+		if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
+			break;
+	}
+	if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
+		return 0;
+
+	/* If we timed out, inform the firmware that this is the case. */
+	if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
+		msg_data &= ~BNX2_DRV_MSG_CODE;
+		msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
+
+		bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
+		if (!silent) {
+			pr_err("fw sync timeout, reset code = %x\n", msg_data);
+			bnx2_dump_mcp_state(bp);
+		}
+
+		return -EBUSY;
+	}
+
+	if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
+		return -EIO;
+
+	return 0;
+}
+
+static int
+bnx2_init_5709_context(struct bnx2 *bp)
+{
+	int i, ret = 0;
+	u32 val;
+
+	val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
+	val |= (BCM_PAGE_BITS - 8) << 16;
+	REG_WR(bp, BNX2_CTX_COMMAND, val);
+	for (i = 0; i < 10; i++) {
+		val = REG_RD(bp, BNX2_CTX_COMMAND);
+		if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
+			break;
+		udelay(2);
+	}
+	if (val & BNX2_CTX_COMMAND_MEM_INIT)
+		return -EBUSY;
+
+	for (i = 0; i < bp->ctx_pages; i++) {
+		int j;
+
+		if (bp->ctx_blk[i])
+			memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
+		else
+			return -ENOMEM;
+
+		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+		       (bp->ctx_blk_mapping[i] & 0xffffffff) |
+		       BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
+		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+		       (u64) bp->ctx_blk_mapping[i] >> 32);
+		REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
+		       BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+		for (j = 0; j < 10; j++) {
+
+			val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
+				break;
+			udelay(5);
+		}
+		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
+			ret = -EBUSY;
+			break;
+		}
+	}
+	return ret;
+}
+
+static void
+bnx2_init_context(struct bnx2 *bp)
+{
+	u32 vcid;
+
+	vcid = 96;
+	while (vcid) {
+		u32 vcid_addr, pcid_addr, offset;
+		int i;
+
+		vcid--;
+
+		if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
+			u32 new_vcid;
+
+			vcid_addr = GET_PCID_ADDR(vcid);
+			if (vcid & 0x8) {
+				new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
+			}
+			else {
+				new_vcid = vcid;
+			}
+			pcid_addr = GET_PCID_ADDR(new_vcid);
+		}
+		else {
+	    		vcid_addr = GET_CID_ADDR(vcid);
+			pcid_addr = vcid_addr;
+		}
+
+		for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
+			vcid_addr += (i << PHY_CTX_SHIFT);
+			pcid_addr += (i << PHY_CTX_SHIFT);
+
+			REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
+			REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
+
+			/* Zero out the context. */
+			for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
+				bnx2_ctx_wr(bp, vcid_addr, offset, 0);
+		}
+	}
+}
+
+static int
+bnx2_alloc_bad_rbuf(struct bnx2 *bp)
+{
+	u16 *good_mbuf;
+	u32 good_mbuf_cnt;
+	u32 val;
+
+	good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
+	if (good_mbuf == NULL) {
+		pr_err("Failed to allocate memory in %s\n", __func__);
+		return -ENOMEM;
+	}
+
+	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+		BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
+
+	good_mbuf_cnt = 0;
+
+	/* Allocate a bunch of mbufs and save the good ones in an array. */
+	val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
+	while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
+		bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
+				BNX2_RBUF_COMMAND_ALLOC_REQ);
+
+		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
+
+		val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
+
+		/* The addresses with Bit 9 set are bad memory blocks. */
+		if (!(val & (1 << 9))) {
+			good_mbuf[good_mbuf_cnt] = (u16) val;
+			good_mbuf_cnt++;
+		}
+
+		val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
+	}
+
+	/* Free the good ones back to the mbuf pool thus discarding
+	 * all the bad ones. */
+	while (good_mbuf_cnt) {
+		good_mbuf_cnt--;
+
+		val = good_mbuf[good_mbuf_cnt];
+		val = (val << 9) | val | 1;
+
+		bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
+	}
+	kfree(good_mbuf);
+	return 0;
+}
+
+static void
+bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
+{
+	u32 val;
+
+	val = (mac_addr[0] << 8) | mac_addr[1];
+
+	REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
+
+	val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+		(mac_addr[4] << 8) | mac_addr[5];
+
+	REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
+}
+
+static inline int
+bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
+{
+	dma_addr_t mapping;
+	struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
+	struct rx_bd *rxbd =
+		&rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
+	struct page *page = alloc_page(gfp);
+
+	if (!page)
+		return -ENOMEM;
+	mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
+			       PCI_DMA_FROMDEVICE);
+	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+		__free_page(page);
+		return -EIO;
+	}
+
+	rx_pg->page = page;
+	dma_unmap_addr_set(rx_pg, mapping, mapping);
+	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
+	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
+	return 0;
+}
+
+static void
+bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
+{
+	struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
+	struct page *page = rx_pg->page;
+
+	if (!page)
+		return;
+
+	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
+		       PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+	__free_page(page);
+	rx_pg->page = NULL;
+}
+
+static inline int
+bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
+{
+	struct sk_buff *skb;
+	struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
+	dma_addr_t mapping;
+	struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
+	unsigned long align;
+
+	skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
+	if (skb == NULL) {
+		return -ENOMEM;
+	}
+
+	if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
+		skb_reserve(skb, BNX2_RX_ALIGN - align);
+
+	mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
+				 PCI_DMA_FROMDEVICE);
+	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+		dev_kfree_skb(skb);
+		return -EIO;
+	}
+
+	rx_buf->skb = skb;
+	rx_buf->desc = (struct l2_fhdr *) skb->data;
+	dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+	rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
+	rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
+
+	rxr->rx_prod_bseq += bp->rx_buf_use_size;
+
+	return 0;
+}
+
+static int
+bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
+{
+	struct status_block *sblk = bnapi->status_blk.msi;
+	u32 new_link_state, old_link_state;
+	int is_set = 1;
+
+	new_link_state = sblk->status_attn_bits & event;
+	old_link_state = sblk->status_attn_bits_ack & event;
+	if (new_link_state != old_link_state) {
+		if (new_link_state)
+			REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
+		else
+			REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
+	} else
+		is_set = 0;
+
+	return is_set;
+}
+
+static void
+bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
+{
+	spin_lock(&bp->phy_lock);
+
+	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
+		bnx2_set_link(bp);
+	if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
+		bnx2_set_remote_link(bp);
+
+	spin_unlock(&bp->phy_lock);
+
+}
+
+static inline u16
+bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
+{
+	u16 cons;
+
+	/* Tell compiler that status block fields can change. */
+	barrier();
+	cons = *bnapi->hw_tx_cons_ptr;
+	barrier();
+	if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
+		cons++;
+	return cons;
+}
+
+static int
+bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
+{
+	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+	u16 hw_cons, sw_cons, sw_ring_cons;
+	int tx_pkt = 0, index;
+	struct netdev_queue *txq;
+
+	index = (bnapi - bp->bnx2_napi);
+	txq = netdev_get_tx_queue(bp->dev, index);
+
+	hw_cons = bnx2_get_hw_tx_cons(bnapi);
+	sw_cons = txr->tx_cons;
+
+	while (sw_cons != hw_cons) {
+		struct sw_tx_bd *tx_buf;
+		struct sk_buff *skb;
+		int i, last;
+
+		sw_ring_cons = TX_RING_IDX(sw_cons);
+
+		tx_buf = &txr->tx_buf_ring[sw_ring_cons];
+		skb = tx_buf->skb;
+
+		/* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
+		prefetch(&skb->end);
+
+		/* partial BD completions possible with TSO packets */
+		if (tx_buf->is_gso) {
+			u16 last_idx, last_ring_idx;
+
+			last_idx = sw_cons + tx_buf->nr_frags + 1;
+			last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
+			if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
+				last_idx++;
+			}
+			if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
+				break;
+			}
+		}
+
+		dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
+			skb_headlen(skb), PCI_DMA_TODEVICE);
+
+		tx_buf->skb = NULL;
+		last = tx_buf->nr_frags;
+
+		for (i = 0; i < last; i++) {
+			sw_cons = NEXT_TX_BD(sw_cons);
+
+			dma_unmap_page(&bp->pdev->dev,
+				dma_unmap_addr(
+					&txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
+					mapping),
+				skb_shinfo(skb)->frags[i].size,
+				PCI_DMA_TODEVICE);
+		}
+
+		sw_cons = NEXT_TX_BD(sw_cons);
+
+		dev_kfree_skb(skb);
+		tx_pkt++;
+		if (tx_pkt == budget)
+			break;
+
+		if (hw_cons == sw_cons)
+			hw_cons = bnx2_get_hw_tx_cons(bnapi);
+	}
+
+	txr->hw_tx_cons = hw_cons;
+	txr->tx_cons = sw_cons;
+
+	/* Need to make the tx_cons update visible to bnx2_start_xmit()
+	 * before checking for netif_tx_queue_stopped().  Without the
+	 * memory barrier, there is a small possibility that bnx2_start_xmit()
+	 * will miss it and cause the queue to be stopped forever.
+	 */
+	smp_mb();
+
+	if (unlikely(netif_tx_queue_stopped(txq)) &&
+		     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
+		__netif_tx_lock(txq, smp_processor_id());
+		if ((netif_tx_queue_stopped(txq)) &&
+		    (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
+			netif_tx_wake_queue(txq);
+		__netif_tx_unlock(txq);
+	}
+
+	return tx_pkt;
+}
+
+static void
+bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+			struct sk_buff *skb, int count)
+{
+	struct sw_pg *cons_rx_pg, *prod_rx_pg;
+	struct rx_bd *cons_bd, *prod_bd;
+	int i;
+	u16 hw_prod, prod;
+	u16 cons = rxr->rx_pg_cons;
+
+	cons_rx_pg = &rxr->rx_pg_ring[cons];
+
+	/* The caller was unable to allocate a new page to replace the
+	 * last one in the frags array, so we need to recycle that page
+	 * and then free the skb.
+	 */
+	if (skb) {
+		struct page *page;
+		struct skb_shared_info *shinfo;
+
+		shinfo = skb_shinfo(skb);
+		shinfo->nr_frags--;
+		page = shinfo->frags[shinfo->nr_frags].page;
+		shinfo->frags[shinfo->nr_frags].page = NULL;
+
+		cons_rx_pg->page = page;
+		dev_kfree_skb(skb);
+	}
+
+	hw_prod = rxr->rx_pg_prod;
+
+	for (i = 0; i < count; i++) {
+		prod = RX_PG_RING_IDX(hw_prod);
+
+		prod_rx_pg = &rxr->rx_pg_ring[prod];
+		cons_rx_pg = &rxr->rx_pg_ring[cons];
+		cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+		prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+
+		if (prod != cons) {
+			prod_rx_pg->page = cons_rx_pg->page;
+			cons_rx_pg->page = NULL;
+			dma_unmap_addr_set(prod_rx_pg, mapping,
+				dma_unmap_addr(cons_rx_pg, mapping));
+
+			prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
+			prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
+
+		}
+		cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
+		hw_prod = NEXT_RX_BD(hw_prod);
+	}
+	rxr->rx_pg_prod = hw_prod;
+	rxr->rx_pg_cons = cons;
+}
+
+static inline void
+bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+		  struct sk_buff *skb, u16 cons, u16 prod)
+{
+	struct sw_bd *cons_rx_buf, *prod_rx_buf;
+	struct rx_bd *cons_bd, *prod_bd;
+
+	cons_rx_buf = &rxr->rx_buf_ring[cons];
+	prod_rx_buf = &rxr->rx_buf_ring[prod];
+
+	dma_sync_single_for_device(&bp->pdev->dev,
+		dma_unmap_addr(cons_rx_buf, mapping),
+		BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
+
+	rxr->rx_prod_bseq += bp->rx_buf_use_size;
+
+	prod_rx_buf->skb = skb;
+	prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
+
+	if (cons == prod)
+		return;
+
+	dma_unmap_addr_set(prod_rx_buf, mapping,
+			dma_unmap_addr(cons_rx_buf, mapping));
+
+	cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
+	prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
+	prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
+	prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
+}
+
+static int
+bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
+	    unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
+	    u32 ring_idx)
+{
+	int err;
+	u16 prod = ring_idx & 0xffff;
+
+	err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
+	if (unlikely(err)) {
+		bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
+		if (hdr_len) {
+			unsigned int raw_len = len + 4;
+			int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
+
+			bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
+		}
+		return err;
+	}
+
+	skb_reserve(skb, BNX2_RX_OFFSET);
+	dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
+			 PCI_DMA_FROMDEVICE);
+
+	if (hdr_len == 0) {
+		skb_put(skb, len);
+		return 0;
+	} else {
+		unsigned int i, frag_len, frag_size, pages;
+		struct sw_pg *rx_pg;
+		u16 pg_cons = rxr->rx_pg_cons;
+		u16 pg_prod = rxr->rx_pg_prod;
+
+		frag_size = len + 4 - hdr_len;
+		pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
+		skb_put(skb, hdr_len);
+
+		for (i = 0; i < pages; i++) {
+			dma_addr_t mapping_old;
+
+			frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
+			if (unlikely(frag_len <= 4)) {
+				unsigned int tail = 4 - frag_len;
+
+				rxr->rx_pg_cons = pg_cons;
+				rxr->rx_pg_prod = pg_prod;
+				bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
+							pages - i);
+				skb->len -= tail;
+				if (i == 0) {
+					skb->tail -= tail;
+				} else {
+					skb_frag_t *frag =
+						&skb_shinfo(skb)->frags[i - 1];
+					frag->size -= tail;
+					skb->data_len -= tail;
+					skb->truesize -= tail;
+				}
+				return 0;
+			}
+			rx_pg = &rxr->rx_pg_ring[pg_cons];
+
+			/* Don't unmap yet.  If we're unable to allocate a new
+			 * page, we need to recycle the page and the DMA addr.
+			 */
+			mapping_old = dma_unmap_addr(rx_pg, mapping);
+			if (i == pages - 1)
+				frag_len -= 4;
+
+			skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
+			rx_pg->page = NULL;
+
+			err = bnx2_alloc_rx_page(bp, rxr,
+						 RX_PG_RING_IDX(pg_prod),
+						 GFP_ATOMIC);
+			if (unlikely(err)) {
+				rxr->rx_pg_cons = pg_cons;
+				rxr->rx_pg_prod = pg_prod;
+				bnx2_reuse_rx_skb_pages(bp, rxr, skb,
+							pages - i);
+				return err;
+			}
+
+			dma_unmap_page(&bp->pdev->dev, mapping_old,
+				       PAGE_SIZE, PCI_DMA_FROMDEVICE);
+
+			frag_size -= frag_len;
+			skb->data_len += frag_len;
+			skb->truesize += frag_len;
+			skb->len += frag_len;
+
+			pg_prod = NEXT_RX_BD(pg_prod);
+			pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
+		}
+		rxr->rx_pg_prod = pg_prod;
+		rxr->rx_pg_cons = pg_cons;
+	}
+	return 0;
+}
+
+static inline u16
+bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
+{
+	u16 cons;
+
+	/* Tell compiler that status block fields can change. */
+	barrier();
+	cons = *bnapi->hw_rx_cons_ptr;
+	barrier();
+	if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
+		cons++;
+	return cons;
+}
+
+static int
+bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
+{
+	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+	u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
+	struct l2_fhdr *rx_hdr;
+	int rx_pkt = 0, pg_ring_used = 0;
+
+	hw_cons = bnx2_get_hw_rx_cons(bnapi);
+	sw_cons = rxr->rx_cons;
+	sw_prod = rxr->rx_prod;
+
+	/* Memory barrier necessary as speculative reads of the rx
+	 * buffer can be ahead of the index in the status block
+	 */
+	rmb();
+	while (sw_cons != hw_cons) {
+		unsigned int len, hdr_len;
+		u32 status;
+		struct sw_bd *rx_buf, *next_rx_buf;
+		struct sk_buff *skb;
+		dma_addr_t dma_addr;
+
+		sw_ring_cons = RX_RING_IDX(sw_cons);
+		sw_ring_prod = RX_RING_IDX(sw_prod);
+
+		rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
+		skb = rx_buf->skb;
+		prefetchw(skb);
+
+		next_rx_buf =
+			&rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+		prefetch(next_rx_buf->desc);
+
+		rx_buf->skb = NULL;
+
+		dma_addr = dma_unmap_addr(rx_buf, mapping);
+
+		dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
+			BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
+			PCI_DMA_FROMDEVICE);
+
+		rx_hdr = rx_buf->desc;
+		len = rx_hdr->l2_fhdr_pkt_len;
+		status = rx_hdr->l2_fhdr_status;
+
+		hdr_len = 0;
+		if (status & L2_FHDR_STATUS_SPLIT) {
+			hdr_len = rx_hdr->l2_fhdr_ip_xsum;
+			pg_ring_used = 1;
+		} else if (len > bp->rx_jumbo_thresh) {
+			hdr_len = bp->rx_jumbo_thresh;
+			pg_ring_used = 1;
+		}
+
+		if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
+				       L2_FHDR_ERRORS_PHY_DECODE |
+				       L2_FHDR_ERRORS_ALIGNMENT |
+				       L2_FHDR_ERRORS_TOO_SHORT |
+				       L2_FHDR_ERRORS_GIANT_FRAME))) {
+
+			bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+					  sw_ring_prod);
+			if (pg_ring_used) {
+				int pages;
+
+				pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
+
+				bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
+			}
+			goto next_rx;
+		}
+
+		len -= 4;
+
+		if (len <= bp->rx_copy_thresh) {
+			struct sk_buff *new_skb;
+
+			new_skb = netdev_alloc_skb(bp->dev, len + 6);
+			if (new_skb == NULL) {
+				bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+						  sw_ring_prod);
+				goto next_rx;
+			}
+
+			/* aligned copy */
+			skb_copy_from_linear_data_offset(skb,
+							 BNX2_RX_OFFSET - 6,
+				      new_skb->data, len + 6);
+			skb_reserve(new_skb, 6);
+			skb_put(new_skb, len);
+
+			bnx2_reuse_rx_skb(bp, rxr, skb,
+				sw_ring_cons, sw_ring_prod);
+
+			skb = new_skb;
+		} else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
+			   dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
+			goto next_rx;
+
+		if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
+		    !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
+			__vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
+
+		skb->protocol = eth_type_trans(skb, bp->dev);
+
+		if ((len > (bp->dev->mtu + ETH_HLEN)) &&
+			(ntohs(skb->protocol) != 0x8100)) {
+
+			dev_kfree_skb(skb);
+			goto next_rx;
+
+		}
+
+		skb_checksum_none_assert(skb);
+		if ((bp->dev->features & NETIF_F_RXCSUM) &&
+			(status & (L2_FHDR_STATUS_TCP_SEGMENT |
+			L2_FHDR_STATUS_UDP_DATAGRAM))) {
+
+			if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
+					      L2_FHDR_ERRORS_UDP_XSUM)) == 0))
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+		}
+		if ((bp->dev->features & NETIF_F_RXHASH) &&
+		    ((status & L2_FHDR_STATUS_USE_RXHASH) ==
+		     L2_FHDR_STATUS_USE_RXHASH))
+			skb->rxhash = rx_hdr->l2_fhdr_hash;
+
+		skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
+		napi_gro_receive(&bnapi->napi, skb);
+		rx_pkt++;
+
+next_rx:
+		sw_cons = NEXT_RX_BD(sw_cons);
+		sw_prod = NEXT_RX_BD(sw_prod);
+
+		if ((rx_pkt == budget))
+			break;
+
+		/* Refresh hw_cons to see if there is new work */
+		if (sw_cons == hw_cons) {
+			hw_cons = bnx2_get_hw_rx_cons(bnapi);
+			rmb();
+		}
+	}
+	rxr->rx_cons = sw_cons;
+	rxr->rx_prod = sw_prod;
+
+	if (pg_ring_used)
+		REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
+
+	REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
+
+	REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
+
+	mmiowb();
+
+	return rx_pkt;
+
+}
+
+/* MSI ISR - The only difference between this and the INTx ISR
+ * is that the MSI interrupt is always serviced.
+ */
+static irqreturn_t
+bnx2_msi(int irq, void *dev_instance)
+{
+	struct bnx2_napi *bnapi = dev_instance;
+	struct bnx2 *bp = bnapi->bp;
+
+	prefetch(bnapi->status_blk.msi);
+	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
+		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+
+	/* Return here if interrupt is disabled. */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0))
+		return IRQ_HANDLED;
+
+	napi_schedule(&bnapi->napi);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+bnx2_msi_1shot(int irq, void *dev_instance)
+{
+	struct bnx2_napi *bnapi = dev_instance;
+	struct bnx2 *bp = bnapi->bp;
+
+	prefetch(bnapi->status_blk.msi);
+
+	/* Return here if interrupt is disabled. */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0))
+		return IRQ_HANDLED;
+
+	napi_schedule(&bnapi->napi);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t
+bnx2_interrupt(int irq, void *dev_instance)
+{
+	struct bnx2_napi *bnapi = dev_instance;
+	struct bnx2 *bp = bnapi->bp;
+	struct status_block *sblk = bnapi->status_blk.msi;
+
+	/* When using INTx, it is possible for the interrupt to arrive
+	 * at the CPU before the status block posted prior to the
+	 * interrupt. Reading a register will flush the status block.
+	 * When using MSI, the MSI message will always complete after
+	 * the status block write.
+	 */
+	if ((sblk->status_idx == bnapi->last_status_idx) &&
+	    (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
+	     BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
+		return IRQ_NONE;
+
+	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+		BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
+		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+
+	/* Read back to deassert IRQ immediately to avoid too many
+	 * spurious interrupts.
+	 */
+	REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
+
+	/* Return here if interrupt is shared and is disabled. */
+	if (unlikely(atomic_read(&bp->intr_sem) != 0))
+		return IRQ_HANDLED;
+
+	if (napi_schedule_prep(&bnapi->napi)) {
+		bnapi->last_status_idx = sblk->status_idx;
+		__napi_schedule(&bnapi->napi);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static inline int
+bnx2_has_fast_work(struct bnx2_napi *bnapi)
+{
+	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+
+	if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
+	    (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
+		return 1;
+	return 0;
+}
+
+#define STATUS_ATTN_EVENTS	(STATUS_ATTN_BITS_LINK_STATE | \
+				 STATUS_ATTN_BITS_TIMER_ABORT)
+
+static inline int
+bnx2_has_work(struct bnx2_napi *bnapi)
+{
+	struct status_block *sblk = bnapi->status_blk.msi;
+
+	if (bnx2_has_fast_work(bnapi))
+		return 1;
+
+#ifdef BCM_CNIC
+	if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
+		return 1;
+#endif
+
+	if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
+	    (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
+		return 1;
+
+	return 0;
+}
+
+static void
+bnx2_chk_missed_msi(struct bnx2 *bp)
+{
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+	u32 msi_ctrl;
+
+	if (bnx2_has_work(bnapi)) {
+		msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
+		if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
+			return;
+
+		if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
+			REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
+			       ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
+			REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
+			bnx2_msi(bp->irq_tbl[0].vector, bnapi);
+		}
+	}
+
+	bp->idle_chk_status_idx = bnapi->last_status_idx;
+}
+
+#ifdef BCM_CNIC
+static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
+{
+	struct cnic_ops *c_ops;
+
+	if (!bnapi->cnic_present)
+		return;
+
+	rcu_read_lock();
+	c_ops = rcu_dereference(bp->cnic_ops);
+	if (c_ops)
+		bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
+						      bnapi->status_blk.msi);
+	rcu_read_unlock();
+}
+#endif
+
+static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
+{
+	struct status_block *sblk = bnapi->status_blk.msi;
+	u32 status_attn_bits = sblk->status_attn_bits;
+	u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
+
+	if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
+	    (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
+
+		bnx2_phy_int(bp, bnapi);
+
+		/* This is needed to take care of transient status
+		 * during link changes.
+		 */
+		REG_WR(bp, BNX2_HC_COMMAND,
+		       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+		REG_RD(bp, BNX2_HC_COMMAND);
+	}
+}
+
+static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
+			  int work_done, int budget)
+{
+	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+
+	if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
+		bnx2_tx_int(bp, bnapi, 0);
+
+	if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
+		work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
+
+	return work_done;
+}
+
+static int bnx2_poll_msix(struct napi_struct *napi, int budget)
+{
+	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
+	struct bnx2 *bp = bnapi->bp;
+	int work_done = 0;
+	struct status_block_msix *sblk = bnapi->status_blk.msix;
+
+	while (1) {
+		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
+		if (unlikely(work_done >= budget))
+			break;
+
+		bnapi->last_status_idx = sblk->status_idx;
+		/* status idx must be read before checking for more work. */
+		rmb();
+		if (likely(!bnx2_has_fast_work(bnapi))) {
+
+			napi_complete(napi);
+			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
+			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+			       bnapi->last_status_idx);
+			break;
+		}
+	}
+	return work_done;
+}
+
+static int bnx2_poll(struct napi_struct *napi, int budget)
+{
+	struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
+	struct bnx2 *bp = bnapi->bp;
+	int work_done = 0;
+	struct status_block *sblk = bnapi->status_blk.msi;
+
+	while (1) {
+		bnx2_poll_link(bp, bnapi);
+
+		work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
+
+#ifdef BCM_CNIC
+		bnx2_poll_cnic(bp, bnapi);
+#endif
+
+		/* bnapi->last_status_idx is used below to tell the hw how
+		 * much work has been processed, so we must read it before
+		 * checking for more work.
+		 */
+		bnapi->last_status_idx = sblk->status_idx;
+
+		if (unlikely(work_done >= budget))
+			break;
+
+		rmb();
+		if (likely(!bnx2_has_work(bnapi))) {
+			napi_complete(napi);
+			if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
+				REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+				       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+				       bnapi->last_status_idx);
+				break;
+			}
+			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+			       BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
+			       bnapi->last_status_idx);
+
+			REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
+			       BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
+			       bnapi->last_status_idx);
+			break;
+		}
+	}
+
+	return work_done;
+}
+
+/* Called with rtnl_lock from vlan functions and also netif_tx_lock
+ * from set_multicast.
+ */
+static void
+bnx2_set_rx_mode(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	u32 rx_mode, sort_mode;
+	struct netdev_hw_addr *ha;
+	int i;
+
+	if (!netif_running(dev))
+		return;
+
+	spin_lock_bh(&bp->phy_lock);
+
+	rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
+				  BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
+	sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
+	if (!(dev->features & NETIF_F_HW_VLAN_RX) &&
+	     (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
+		rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
+	if (dev->flags & IFF_PROMISC) {
+		/* Promiscuous mode. */
+		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
+		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
+			     BNX2_RPM_SORT_USER0_PROM_VLAN;
+	}
+	else if (dev->flags & IFF_ALLMULTI) {
+		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
+			REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+			       0xffffffff);
+        	}
+		sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
+	}
+	else {
+		/* Accept one or more multicast(s). */
+		u32 mc_filter[NUM_MC_HASH_REGISTERS];
+		u32 regidx;
+		u32 bit;
+		u32 crc;
+
+		memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
+
+		netdev_for_each_mc_addr(ha, dev) {
+			crc = ether_crc_le(ETH_ALEN, ha->addr);
+			bit = crc & 0xff;
+			regidx = (bit & 0xe0) >> 5;
+			bit &= 0x1f;
+			mc_filter[regidx] |= (1 << bit);
+		}
+
+		for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
+			REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+			       mc_filter[i]);
+		}
+
+		sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
+	}
+
+	if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
+		rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
+		sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
+			     BNX2_RPM_SORT_USER0_PROM_VLAN;
+	} else if (!(dev->flags & IFF_PROMISC)) {
+		/* Add all entries into to the match filter list */
+		i = 0;
+		netdev_for_each_uc_addr(ha, dev) {
+			bnx2_set_mac_addr(bp, ha->addr,
+					  i + BNX2_START_UNICAST_ADDRESS_INDEX);
+			sort_mode |= (1 <<
+				      (i + BNX2_START_UNICAST_ADDRESS_INDEX));
+			i++;
+		}
+
+	}
+
+	if (rx_mode != bp->rx_mode) {
+		bp->rx_mode = rx_mode;
+		REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
+	}
+
+	REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
+	REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
+	REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
+
+	spin_unlock_bh(&bp->phy_lock);
+}
+
+static int __devinit
+check_fw_section(const struct firmware *fw,
+		 const struct bnx2_fw_file_section *section,
+		 u32 alignment, bool non_empty)
+{
+	u32 offset = be32_to_cpu(section->offset);
+	u32 len = be32_to_cpu(section->len);
+
+	if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
+		return -EINVAL;
+	if ((non_empty && len == 0) || len > fw->size - offset ||
+	    len & (alignment - 1))
+		return -EINVAL;
+	return 0;
+}
+
+static int __devinit
+check_mips_fw_entry(const struct firmware *fw,
+		    const struct bnx2_mips_fw_file_entry *entry)
+{
+	if (check_fw_section(fw, &entry->text, 4, true) ||
+	    check_fw_section(fw, &entry->data, 4, false) ||
+	    check_fw_section(fw, &entry->rodata, 4, false))
+		return -EINVAL;
+	return 0;
+}
+
+static int __devinit
+bnx2_request_firmware(struct bnx2 *bp)
+{
+	const char *mips_fw_file, *rv2p_fw_file;
+	const struct bnx2_mips_fw_file *mips_fw;
+	const struct bnx2_rv2p_fw_file *rv2p_fw;
+	int rc;
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		mips_fw_file = FW_MIPS_FILE_09;
+		if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
+		    (CHIP_ID(bp) == CHIP_ID_5709_A1))
+			rv2p_fw_file = FW_RV2P_FILE_09_Ax;
+		else
+			rv2p_fw_file = FW_RV2P_FILE_09;
+	} else {
+		mips_fw_file = FW_MIPS_FILE_06;
+		rv2p_fw_file = FW_RV2P_FILE_06;
+	}
+
+	rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
+	if (rc) {
+		pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
+		return rc;
+	}
+
+	rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
+	if (rc) {
+		pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
+		return rc;
+	}
+	mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
+	rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
+	if (bp->mips_firmware->size < sizeof(*mips_fw) ||
+	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
+	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
+	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
+	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
+	    check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
+		pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
+		return -EINVAL;
+	}
+	if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
+	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
+	    check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
+		pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static u32
+rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
+{
+	switch (idx) {
+	case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
+		rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
+		rv2p_code |= RV2P_BD_PAGE_SIZE;
+		break;
+	}
+	return rv2p_code;
+}
+
+static int
+load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
+	     const struct bnx2_rv2p_fw_file_entry *fw_entry)
+{
+	u32 rv2p_code_len, file_offset;
+	__be32 *rv2p_code;
+	int i;
+	u32 val, cmd, addr;
+
+	rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
+	file_offset = be32_to_cpu(fw_entry->rv2p.offset);
+
+	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
+
+	if (rv2p_proc == RV2P_PROC1) {
+		cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
+		addr = BNX2_RV2P_PROC1_ADDR_CMD;
+	} else {
+		cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
+		addr = BNX2_RV2P_PROC2_ADDR_CMD;
+	}
+
+	for (i = 0; i < rv2p_code_len; i += 8) {
+		REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
+		rv2p_code++;
+		REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
+		rv2p_code++;
+
+		val = (i / 8) | cmd;
+		REG_WR(bp, addr, val);
+	}
+
+	rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
+	for (i = 0; i < 8; i++) {
+		u32 loc, code;
+
+		loc = be32_to_cpu(fw_entry->fixup[i]);
+		if (loc && ((loc * 4) < rv2p_code_len)) {
+			code = be32_to_cpu(*(rv2p_code + loc - 1));
+			REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
+			code = be32_to_cpu(*(rv2p_code + loc));
+			code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
+			REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
+
+			val = (loc / 2) | cmd;
+			REG_WR(bp, addr, val);
+		}
+	}
+
+	/* Reset the processor, un-stall is done later. */
+	if (rv2p_proc == RV2P_PROC1) {
+		REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
+	}
+	else {
+		REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
+	}
+
+	return 0;
+}
+
+static int
+load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
+	    const struct bnx2_mips_fw_file_entry *fw_entry)
+{
+	u32 addr, len, file_offset;
+	__be32 *data;
+	u32 offset;
+	u32 val;
+
+	/* Halt the CPU. */
+	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
+	val |= cpu_reg->mode_value_halt;
+	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
+	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
+
+	/* Load the Text area. */
+	addr = be32_to_cpu(fw_entry->text.addr);
+	len = be32_to_cpu(fw_entry->text.len);
+	file_offset = be32_to_cpu(fw_entry->text.offset);
+	data = (__be32 *)(bp->mips_firmware->data + file_offset);
+
+	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
+	if (len) {
+		int j;
+
+		for (j = 0; j < (len / 4); j++, offset += 4)
+			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
+	}
+
+	/* Load the Data area. */
+	addr = be32_to_cpu(fw_entry->data.addr);
+	len = be32_to_cpu(fw_entry->data.len);
+	file_offset = be32_to_cpu(fw_entry->data.offset);
+	data = (__be32 *)(bp->mips_firmware->data + file_offset);
+
+	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
+	if (len) {
+		int j;
+
+		for (j = 0; j < (len / 4); j++, offset += 4)
+			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
+	}
+
+	/* Load the Read-Only area. */
+	addr = be32_to_cpu(fw_entry->rodata.addr);
+	len = be32_to_cpu(fw_entry->rodata.len);
+	file_offset = be32_to_cpu(fw_entry->rodata.offset);
+	data = (__be32 *)(bp->mips_firmware->data + file_offset);
+
+	offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
+	if (len) {
+		int j;
+
+		for (j = 0; j < (len / 4); j++, offset += 4)
+			bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
+	}
+
+	/* Clear the pre-fetch instruction. */
+	bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
+
+	val = be32_to_cpu(fw_entry->start_addr);
+	bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
+
+	/* Start the CPU. */
+	val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
+	val &= ~cpu_reg->mode_value_halt;
+	bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
+	bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
+
+	return 0;
+}
+
+static int
+bnx2_init_cpus(struct bnx2 *bp)
+{
+	const struct bnx2_mips_fw_file *mips_fw =
+		(const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
+	const struct bnx2_rv2p_fw_file *rv2p_fw =
+		(const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
+	int rc;
+
+	/* Initialize the RV2P processor. */
+	load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
+	load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
+
+	/* Initialize the RX Processor. */
+	rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
+	if (rc)
+		goto init_cpu_err;
+
+	/* Initialize the TX Processor. */
+	rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
+	if (rc)
+		goto init_cpu_err;
+
+	/* Initialize the TX Patch-up Processor. */
+	rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
+	if (rc)
+		goto init_cpu_err;
+
+	/* Initialize the Completion Processor. */
+	rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
+	if (rc)
+		goto init_cpu_err;
+
+	/* Initialize the Command Processor. */
+	rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
+
+init_cpu_err:
+	return rc;
+}
+
+static int
+bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
+{
+	u16 pmcsr;
+
+	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+	switch (state) {
+	case PCI_D0: {
+		u32 val;
+
+		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+			(pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
+			PCI_PM_CTRL_PME_STATUS);
+
+		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
+			/* delay required during transition out of D3hot */
+			msleep(20);
+
+		val = REG_RD(bp, BNX2_EMAC_MODE);
+		val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
+		val &= ~BNX2_EMAC_MODE_MPKT;
+		REG_WR(bp, BNX2_EMAC_MODE, val);
+
+		val = REG_RD(bp, BNX2_RPM_CONFIG);
+		val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
+		REG_WR(bp, BNX2_RPM_CONFIG, val);
+		break;
+	}
+	case PCI_D3hot: {
+		int i;
+		u32 val, wol_msg;
+
+		if (bp->wol) {
+			u32 advertising;
+			u8 autoneg;
+
+			autoneg = bp->autoneg;
+			advertising = bp->advertising;
+
+			if (bp->phy_port == PORT_TP) {
+				bp->autoneg = AUTONEG_SPEED;
+				bp->advertising = ADVERTISED_10baseT_Half |
+					ADVERTISED_10baseT_Full |
+					ADVERTISED_100baseT_Half |
+					ADVERTISED_100baseT_Full |
+					ADVERTISED_Autoneg;
+			}
+
+			spin_lock_bh(&bp->phy_lock);
+			bnx2_setup_phy(bp, bp->phy_port);
+			spin_unlock_bh(&bp->phy_lock);
+
+			bp->autoneg = autoneg;
+			bp->advertising = advertising;
+
+			bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
+
+			val = REG_RD(bp, BNX2_EMAC_MODE);
+
+			/* Enable port mode. */
+			val &= ~BNX2_EMAC_MODE_PORT;
+			val |= BNX2_EMAC_MODE_MPKT_RCVD |
+			       BNX2_EMAC_MODE_ACPI_RCVD |
+			       BNX2_EMAC_MODE_MPKT;
+			if (bp->phy_port == PORT_TP)
+				val |= BNX2_EMAC_MODE_PORT_MII;
+			else {
+				val |= BNX2_EMAC_MODE_PORT_GMII;
+				if (bp->line_speed == SPEED_2500)
+					val |= BNX2_EMAC_MODE_25G_MODE;
+			}
+
+			REG_WR(bp, BNX2_EMAC_MODE, val);
+
+			/* receive all multicast */
+			for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
+				REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
+				       0xffffffff);
+			}
+			REG_WR(bp, BNX2_EMAC_RX_MODE,
+			       BNX2_EMAC_RX_MODE_SORT_MODE);
+
+			val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
+			      BNX2_RPM_SORT_USER0_MC_EN;
+			REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
+			REG_WR(bp, BNX2_RPM_SORT_USER0, val);
+			REG_WR(bp, BNX2_RPM_SORT_USER0, val |
+			       BNX2_RPM_SORT_USER0_ENA);
+
+			/* Need to enable EMAC and RPM for WOL. */
+			REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+			       BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
+			       BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
+			       BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
+
+			val = REG_RD(bp, BNX2_RPM_CONFIG);
+			val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
+			REG_WR(bp, BNX2_RPM_CONFIG, val);
+
+			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
+		}
+		else {
+			wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
+		}
+
+		if (!(bp->flags & BNX2_FLAG_NO_WOL))
+			bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
+				     1, 0);
+
+		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+		if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
+		    (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
+
+			if (bp->wol)
+				pmcsr |= 3;
+		}
+		else {
+			pmcsr |= 3;
+		}
+		if (bp->wol) {
+			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+		}
+		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+				      pmcsr);
+
+		/* No more memory access after this point until
+		 * device is brought back to D0.
+		 */
+		udelay(50);
+		break;
+	}
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int
+bnx2_acquire_nvram_lock(struct bnx2 *bp)
+{
+	u32 val;
+	int j;
+
+	/* Request access to the flash interface. */
+	REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
+	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+		val = REG_RD(bp, BNX2_NVM_SW_ARB);
+		if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
+			break;
+
+		udelay(5);
+	}
+
+	if (j >= NVRAM_TIMEOUT_COUNT)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int
+bnx2_release_nvram_lock(struct bnx2 *bp)
+{
+	int j;
+	u32 val;
+
+	/* Relinquish nvram interface. */
+	REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
+
+	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+		val = REG_RD(bp, BNX2_NVM_SW_ARB);
+		if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
+			break;
+
+		udelay(5);
+	}
+
+	if (j >= NVRAM_TIMEOUT_COUNT)
+		return -EBUSY;
+
+	return 0;
+}
+
+
+static int
+bnx2_enable_nvram_write(struct bnx2 *bp)
+{
+	u32 val;
+
+	val = REG_RD(bp, BNX2_MISC_CFG);
+	REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
+
+	if (bp->flash_info->flags & BNX2_NV_WREN) {
+		int j;
+
+		REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+		REG_WR(bp, BNX2_NVM_COMMAND,
+		       BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
+
+		for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+			udelay(5);
+
+			val = REG_RD(bp, BNX2_NVM_COMMAND);
+			if (val & BNX2_NVM_COMMAND_DONE)
+				break;
+		}
+
+		if (j >= NVRAM_TIMEOUT_COUNT)
+			return -EBUSY;
+	}
+	return 0;
+}
+
+static void
+bnx2_disable_nvram_write(struct bnx2 *bp)
+{
+	u32 val;
+
+	val = REG_RD(bp, BNX2_MISC_CFG);
+	REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
+}
+
+
+static void
+bnx2_enable_nvram_access(struct bnx2 *bp)
+{
+	u32 val;
+
+	val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
+	/* Enable both bits, even on read. */
+	REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
+	       val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
+}
+
+static void
+bnx2_disable_nvram_access(struct bnx2 *bp)
+{
+	u32 val;
+
+	val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
+	/* Disable both bits, even after read. */
+	REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
+		val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
+			BNX2_NVM_ACCESS_ENABLE_WR_EN));
+}
+
+static int
+bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
+{
+	u32 cmd;
+	int j;
+
+	if (bp->flash_info->flags & BNX2_NV_BUFFERED)
+		/* Buffered flash, no erase needed */
+		return 0;
+
+	/* Build an erase command */
+	cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
+	      BNX2_NVM_COMMAND_DOIT;
+
+	/* Need to clear DONE bit separately. */
+	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+
+	/* Address of the NVRAM to read from. */
+	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
+
+	/* Issue an erase command. */
+	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
+
+	/* Wait for completion. */
+	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+		u32 val;
+
+		udelay(5);
+
+		val = REG_RD(bp, BNX2_NVM_COMMAND);
+		if (val & BNX2_NVM_COMMAND_DONE)
+			break;
+	}
+
+	if (j >= NVRAM_TIMEOUT_COUNT)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int
+bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
+{
+	u32 cmd;
+	int j;
+
+	/* Build the command word. */
+	cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
+
+	/* Calculate an offset of a buffered flash, not needed for 5709. */
+	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
+		offset = ((offset / bp->flash_info->page_size) <<
+			   bp->flash_info->page_bits) +
+			  (offset % bp->flash_info->page_size);
+	}
+
+	/* Need to clear DONE bit separately. */
+	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+
+	/* Address of the NVRAM to read from. */
+	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
+
+	/* Issue a read command. */
+	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
+
+	/* Wait for completion. */
+	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+		u32 val;
+
+		udelay(5);
+
+		val = REG_RD(bp, BNX2_NVM_COMMAND);
+		if (val & BNX2_NVM_COMMAND_DONE) {
+			__be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
+			memcpy(ret_val, &v, 4);
+			break;
+		}
+	}
+	if (j >= NVRAM_TIMEOUT_COUNT)
+		return -EBUSY;
+
+	return 0;
+}
+
+
+static int
+bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
+{
+	u32 cmd;
+	__be32 val32;
+	int j;
+
+	/* Build the command word. */
+	cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
+
+	/* Calculate an offset of a buffered flash, not needed for 5709. */
+	if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
+		offset = ((offset / bp->flash_info->page_size) <<
+			  bp->flash_info->page_bits) +
+			 (offset % bp->flash_info->page_size);
+	}
+
+	/* Need to clear DONE bit separately. */
+	REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
+
+	memcpy(&val32, val, 4);
+
+	/* Write the data. */
+	REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
+
+	/* Address of the NVRAM to write to. */
+	REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
+
+	/* Issue the write command. */
+	REG_WR(bp, BNX2_NVM_COMMAND, cmd);
+
+	/* Wait for completion. */
+	for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
+		udelay(5);
+
+		if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
+			break;
+	}
+	if (j >= NVRAM_TIMEOUT_COUNT)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int
+bnx2_init_nvram(struct bnx2 *bp)
+{
+	u32 val;
+	int j, entry_count, rc = 0;
+	const struct flash_spec *flash;
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		bp->flash_info = &flash_5709;
+		goto get_flash_size;
+	}
+
+	/* Determine the selected interface. */
+	val = REG_RD(bp, BNX2_NVM_CFG1);
+
+	entry_count = ARRAY_SIZE(flash_table);
+
+	if (val & 0x40000000) {
+
+		/* Flash interface has been reconfigured */
+		for (j = 0, flash = &flash_table[0]; j < entry_count;
+		     j++, flash++) {
+			if ((val & FLASH_BACKUP_STRAP_MASK) ==
+			    (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
+				bp->flash_info = flash;
+				break;
+			}
+		}
+	}
+	else {
+		u32 mask;
+		/* Not yet been reconfigured */
+
+		if (val & (1 << 23))
+			mask = FLASH_BACKUP_STRAP_MASK;
+		else
+			mask = FLASH_STRAP_MASK;
+
+		for (j = 0, flash = &flash_table[0]; j < entry_count;
+			j++, flash++) {
+
+			if ((val & mask) == (flash->strapping & mask)) {
+				bp->flash_info = flash;
+
+				/* Request access to the flash interface. */
+				if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
+					return rc;
+
+				/* Enable access to flash interface */
+				bnx2_enable_nvram_access(bp);
+
+				/* Reconfigure the flash interface */
+				REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
+				REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
+				REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
+				REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
+
+				/* Disable access to flash interface */
+				bnx2_disable_nvram_access(bp);
+				bnx2_release_nvram_lock(bp);
+
+				break;
+			}
+		}
+	} /* if (val & 0x40000000) */
+
+	if (j == entry_count) {
+		bp->flash_info = NULL;
+		pr_alert("Unknown flash/EEPROM type\n");
+		return -ENODEV;
+	}
+
+get_flash_size:
+	val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
+	val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
+	if (val)
+		bp->flash_size = val;
+	else
+		bp->flash_size = bp->flash_info->total_size;
+
+	return rc;
+}
+
+static int
+bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
+		int buf_size)
+{
+	int rc = 0;
+	u32 cmd_flags, offset32, len32, extra;
+
+	if (buf_size == 0)
+		return 0;
+
+	/* Request access to the flash interface. */
+	if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
+		return rc;
+
+	/* Enable access to flash interface */
+	bnx2_enable_nvram_access(bp);
+
+	len32 = buf_size;
+	offset32 = offset;
+	extra = 0;
+
+	cmd_flags = 0;
+
+	if (offset32 & 3) {
+		u8 buf[4];
+		u32 pre_len;
+
+		offset32 &= ~3;
+		pre_len = 4 - (offset & 3);
+
+		if (pre_len >= len32) {
+			pre_len = len32;
+			cmd_flags = BNX2_NVM_COMMAND_FIRST |
+				    BNX2_NVM_COMMAND_LAST;
+		}
+		else {
+			cmd_flags = BNX2_NVM_COMMAND_FIRST;
+		}
+
+		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
+
+		if (rc)
+			return rc;
+
+		memcpy(ret_buf, buf + (offset & 3), pre_len);
+
+		offset32 += 4;
+		ret_buf += pre_len;
+		len32 -= pre_len;
+	}
+	if (len32 & 3) {
+		extra = 4 - (len32 & 3);
+		len32 = (len32 + 4) & ~3;
+	}
+
+	if (len32 == 4) {
+		u8 buf[4];
+
+		if (cmd_flags)
+			cmd_flags = BNX2_NVM_COMMAND_LAST;
+		else
+			cmd_flags = BNX2_NVM_COMMAND_FIRST |
+				    BNX2_NVM_COMMAND_LAST;
+
+		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
+
+		memcpy(ret_buf, buf, 4 - extra);
+	}
+	else if (len32 > 0) {
+		u8 buf[4];
+
+		/* Read the first word. */
+		if (cmd_flags)
+			cmd_flags = 0;
+		else
+			cmd_flags = BNX2_NVM_COMMAND_FIRST;
+
+		rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
+
+		/* Advance to the next dword. */
+		offset32 += 4;
+		ret_buf += 4;
+		len32 -= 4;
+
+		while (len32 > 4 && rc == 0) {
+			rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
+
+			/* Advance to the next dword. */
+			offset32 += 4;
+			ret_buf += 4;
+			len32 -= 4;
+		}
+
+		if (rc)
+			return rc;
+
+		cmd_flags = BNX2_NVM_COMMAND_LAST;
+		rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
+
+		memcpy(ret_buf, buf, 4 - extra);
+	}
+
+	/* Disable access to flash interface */
+	bnx2_disable_nvram_access(bp);
+
+	bnx2_release_nvram_lock(bp);
+
+	return rc;
+}
+
+static int
+bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
+		int buf_size)
+{
+	u32 written, offset32, len32;
+	u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
+	int rc = 0;
+	int align_start, align_end;
+
+	buf = data_buf;
+	offset32 = offset;
+	len32 = buf_size;
+	align_start = align_end = 0;
+
+	if ((align_start = (offset32 & 3))) {
+		offset32 &= ~3;
+		len32 += align_start;
+		if (len32 < 4)
+			len32 = 4;
+		if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
+			return rc;
+	}
+
+	if (len32 & 3) {
+		align_end = 4 - (len32 & 3);
+		len32 += align_end;
+		if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
+			return rc;
+	}
+
+	if (align_start || align_end) {
+		align_buf = kmalloc(len32, GFP_KERNEL);
+		if (align_buf == NULL)
+			return -ENOMEM;
+		if (align_start) {
+			memcpy(align_buf, start, 4);
+		}
+		if (align_end) {
+			memcpy(align_buf + len32 - 4, end, 4);
+		}
+		memcpy(align_buf + align_start, data_buf, buf_size);
+		buf = align_buf;
+	}
+
+	if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
+		flash_buffer = kmalloc(264, GFP_KERNEL);
+		if (flash_buffer == NULL) {
+			rc = -ENOMEM;
+			goto nvram_write_end;
+		}
+	}
+
+	written = 0;
+	while ((written < len32) && (rc == 0)) {
+		u32 page_start, page_end, data_start, data_end;
+		u32 addr, cmd_flags;
+		int i;
+
+	        /* Find the page_start addr */
+		page_start = offset32 + written;
+		page_start -= (page_start % bp->flash_info->page_size);
+		/* Find the page_end addr */
+		page_end = page_start + bp->flash_info->page_size;
+		/* Find the data_start addr */
+		data_start = (written == 0) ? offset32 : page_start;
+		/* Find the data_end addr */
+		data_end = (page_end > offset32 + len32) ?
+			(offset32 + len32) : page_end;
+
+		/* Request access to the flash interface. */
+		if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
+			goto nvram_write_end;
+
+		/* Enable access to flash interface */
+		bnx2_enable_nvram_access(bp);
+
+		cmd_flags = BNX2_NVM_COMMAND_FIRST;
+		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
+			int j;
+
+			/* Read the whole page into the buffer
+			 * (non-buffer flash only) */
+			for (j = 0; j < bp->flash_info->page_size; j += 4) {
+				if (j == (bp->flash_info->page_size - 4)) {
+					cmd_flags |= BNX2_NVM_COMMAND_LAST;
+				}
+				rc = bnx2_nvram_read_dword(bp,
+					page_start + j,
+					&flash_buffer[j],
+					cmd_flags);
+
+				if (rc)
+					goto nvram_write_end;
+
+				cmd_flags = 0;
+			}
+		}
+
+		/* Enable writes to flash interface (unlock write-protect) */
+		if ((rc = bnx2_enable_nvram_write(bp)) != 0)
+			goto nvram_write_end;
+
+		/* Loop to write back the buffer data from page_start to
+		 * data_start */
+		i = 0;
+		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
+			/* Erase the page */
+			if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
+				goto nvram_write_end;
+
+			/* Re-enable the write again for the actual write */
+			bnx2_enable_nvram_write(bp);
+
+			for (addr = page_start; addr < data_start;
+				addr += 4, i += 4) {
+
+				rc = bnx2_nvram_write_dword(bp, addr,
+					&flash_buffer[i], cmd_flags);
+
+				if (rc != 0)
+					goto nvram_write_end;
+
+				cmd_flags = 0;
+			}
+		}
+
+		/* Loop to write the new data from data_start to data_end */
+		for (addr = data_start; addr < data_end; addr += 4, i += 4) {
+			if ((addr == page_end - 4) ||
+				((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
+				 (addr == data_end - 4))) {
+
+				cmd_flags |= BNX2_NVM_COMMAND_LAST;
+			}
+			rc = bnx2_nvram_write_dword(bp, addr, buf,
+				cmd_flags);
+
+			if (rc != 0)
+				goto nvram_write_end;
+
+			cmd_flags = 0;
+			buf += 4;
+		}
+
+		/* Loop to write back the buffer data from data_end
+		 * to page_end */
+		if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
+			for (addr = data_end; addr < page_end;
+				addr += 4, i += 4) {
+
+				if (addr == page_end-4) {
+					cmd_flags = BNX2_NVM_COMMAND_LAST;
+                		}
+				rc = bnx2_nvram_write_dword(bp, addr,
+					&flash_buffer[i], cmd_flags);
+
+				if (rc != 0)
+					goto nvram_write_end;
+
+				cmd_flags = 0;
+			}
+		}
+
+		/* Disable writes to flash interface (lock write-protect) */
+		bnx2_disable_nvram_write(bp);
+
+		/* Disable access to flash interface */
+		bnx2_disable_nvram_access(bp);
+		bnx2_release_nvram_lock(bp);
+
+		/* Increment written */
+		written += data_end - data_start;
+	}
+
+nvram_write_end:
+	kfree(flash_buffer);
+	kfree(align_buf);
+	return rc;
+}
+
+static void
+bnx2_init_fw_cap(struct bnx2 *bp)
+{
+	u32 val, sig = 0;
+
+	bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
+	bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
+
+	if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
+		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
+
+	val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
+	if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
+		return;
+
+	if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
+		bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
+		sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
+	}
+
+	if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
+	    (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
+		u32 link;
+
+		bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
+
+		link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
+		if (link & BNX2_LINK_STATUS_SERDES_LINK)
+			bp->phy_port = PORT_FIBRE;
+		else
+			bp->phy_port = PORT_TP;
+
+		sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
+		       BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
+	}
+
+	if (netif_running(bp->dev) && sig)
+		bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
+}
+
+static void
+bnx2_setup_msix_tbl(struct bnx2 *bp)
+{
+	REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
+
+	REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
+	REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
+}
+
+static int
+bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
+{
+	u32 val;
+	int i, rc = 0;
+	u8 old_port;
+
+	/* Wait for the current PCI transaction to complete before
+	 * issuing a reset. */
+	if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
+	    (CHIP_NUM(bp) == CHIP_NUM_5708)) {
+		REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
+		       BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
+		       BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
+		       BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
+		       BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
+		val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
+		udelay(5);
+	} else {  /* 5709 */
+		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
+		REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
+		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+
+		for (i = 0; i < 100; i++) {
+			msleep(1);
+			val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
+			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
+				break;
+		}
+	}
+
+	/* Wait for the firmware to tell us it is ok to issue a reset. */
+	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
+
+	/* Deposit a driver reset signature so the firmware knows that
+	 * this is a soft reset. */
+	bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
+		      BNX2_DRV_RESET_SIGNATURE_MAGIC);
+
+	/* Do a dummy read to force the chip to complete all current transaction
+	 * before we issue a reset. */
+	val = REG_RD(bp, BNX2_MISC_ID);
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
+		REG_RD(bp, BNX2_MISC_COMMAND);
+		udelay(5);
+
+		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
+
+		REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
+
+	} else {
+		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+		      BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
+
+		/* Chip reset. */
+		REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
+
+		/* Reading back any register after chip reset will hang the
+		 * bus on 5706 A0 and A1.  The msleep below provides plenty
+		 * of margin for write posting.
+		 */
+		if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
+		    (CHIP_ID(bp) == CHIP_ID_5706_A1))
+			msleep(20);
+
+		/* Reset takes approximate 30 usec */
+		for (i = 0; i < 10; i++) {
+			val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
+			if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+				    BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
+				break;
+			udelay(10);
+		}
+
+		if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
+			   BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
+			pr_err("Chip reset did not complete\n");
+			return -EBUSY;
+		}
+	}
+
+	/* Make sure byte swapping is properly configured. */
+	val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
+	if (val != 0x01020304) {
+		pr_err("Chip not in correct endian mode\n");
+		return -ENODEV;
+	}
+
+	/* Wait for the firmware to finish its initialization. */
+	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
+	if (rc)
+		return rc;
+
+	spin_lock_bh(&bp->phy_lock);
+	old_port = bp->phy_port;
+	bnx2_init_fw_cap(bp);
+	if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
+	    old_port != bp->phy_port)
+		bnx2_set_default_remote_link(bp);
+	spin_unlock_bh(&bp->phy_lock);
+
+	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
+		/* Adjust the voltage regular to two steps lower.  The default
+		 * of this register is 0x0000000e. */
+		REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
+
+		/* Remove bad rbuf memory from the free pool. */
+		rc = bnx2_alloc_bad_rbuf(bp);
+	}
+
+	if (bp->flags & BNX2_FLAG_USING_MSIX) {
+		bnx2_setup_msix_tbl(bp);
+		/* Prevent MSIX table reads and write from timing out */
+		REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
+			BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
+	}
+
+	return rc;
+}
+
+static int
+bnx2_init_chip(struct bnx2 *bp)
+{
+	u32 val, mtu;
+	int rc, i;
+
+	/* Make sure the interrupt is not active. */
+	REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+
+	val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
+	      BNX2_DMA_CONFIG_DATA_WORD_SWAP |
+#ifdef __BIG_ENDIAN
+	      BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
+#endif
+	      BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
+	      DMA_READ_CHANS << 12 |
+	      DMA_WRITE_CHANS << 16;
+
+	val |= (0x2 << 20) | (1 << 11);
+
+	if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
+		val |= (1 << 23);
+
+	if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
+	    (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
+		val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
+
+	REG_WR(bp, BNX2_DMA_CONFIG, val);
+
+	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
+		val = REG_RD(bp, BNX2_TDMA_CONFIG);
+		val |= BNX2_TDMA_CONFIG_ONE_DMA;
+		REG_WR(bp, BNX2_TDMA_CONFIG, val);
+	}
+
+	if (bp->flags & BNX2_FLAG_PCIX) {
+		u16 val16;
+
+		pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
+				     &val16);
+		pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
+				      val16 & ~PCI_X_CMD_ERO);
+	}
+
+	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
+	       BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
+	       BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
+	       BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
+
+	/* Initialize context mapping and zero out the quick contexts.  The
+	 * context block must have already been enabled. */
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		rc = bnx2_init_5709_context(bp);
+		if (rc)
+			return rc;
+	} else
+		bnx2_init_context(bp);
+
+	if ((rc = bnx2_init_cpus(bp)) != 0)
+		return rc;
+
+	bnx2_init_nvram(bp);
+
+	bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
+
+	val = REG_RD(bp, BNX2_MQ_CONFIG);
+	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
+	val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
+		if (CHIP_REV(bp) == CHIP_REV_Ax)
+			val |= BNX2_MQ_CONFIG_HALT_DIS;
+	}
+
+	REG_WR(bp, BNX2_MQ_CONFIG, val);
+
+	val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
+	REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
+	REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
+
+	val = (BCM_PAGE_BITS - 8) << 24;
+	REG_WR(bp, BNX2_RV2P_CONFIG, val);
+
+	/* Configure page size. */
+	val = REG_RD(bp, BNX2_TBDR_CONFIG);
+	val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
+	val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
+	REG_WR(bp, BNX2_TBDR_CONFIG, val);
+
+	val = bp->mac_addr[0] +
+	      (bp->mac_addr[1] << 8) +
+	      (bp->mac_addr[2] << 16) +
+	      bp->mac_addr[3] +
+	      (bp->mac_addr[4] << 8) +
+	      (bp->mac_addr[5] << 16);
+	REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
+
+	/* Program the MTU.  Also include 4 bytes for CRC32. */
+	mtu = bp->dev->mtu;
+	val = mtu + ETH_HLEN + ETH_FCS_LEN;
+	if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
+		val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
+	REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
+
+	if (mtu < 1500)
+		mtu = 1500;
+
+	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
+	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
+	bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
+
+	memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
+	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
+		bp->bnx2_napi[i].last_status_idx = 0;
+
+	bp->idle_chk_status_idx = 0xffff;
+
+	bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
+
+	/* Set up how to generate a link change interrupt. */
+	REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
+
+	REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
+	       (u64) bp->status_blk_mapping & 0xffffffff);
+	REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
+
+	REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
+	       (u64) bp->stats_blk_mapping & 0xffffffff);
+	REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
+	       (u64) bp->stats_blk_mapping >> 32);
+
+	REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
+	       (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
+
+	REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
+	       (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
+
+	REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
+	       (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
+
+	REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
+
+	REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
+
+	REG_WR(bp, BNX2_HC_COM_TICKS,
+	       (bp->com_ticks_int << 16) | bp->com_ticks);
+
+	REG_WR(bp, BNX2_HC_CMD_TICKS,
+	       (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
+
+	if (bp->flags & BNX2_FLAG_BROKEN_STATS)
+		REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
+	else
+		REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
+	REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
+
+	if (CHIP_ID(bp) == CHIP_ID_5706_A1)
+		val = BNX2_HC_CONFIG_COLLECT_STATS;
+	else {
+		val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
+		      BNX2_HC_CONFIG_COLLECT_STATS;
+	}
+
+	if (bp->flags & BNX2_FLAG_USING_MSIX) {
+		REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
+		       BNX2_HC_MSIX_BIT_VECTOR_VAL);
+
+		val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
+	}
+
+	if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
+		val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
+
+	REG_WR(bp, BNX2_HC_CONFIG, val);
+
+	if (bp->rx_ticks < 25)
+		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
+	else
+		bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
+
+	for (i = 1; i < bp->irq_nvecs; i++) {
+		u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
+			   BNX2_HC_SB_CONFIG_1;
+
+		REG_WR(bp, base,
+			BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
+			BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
+			BNX2_HC_SB_CONFIG_1_ONE_SHOT);
+
+		REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
+			(bp->tx_quick_cons_trip_int << 16) |
+			 bp->tx_quick_cons_trip);
+
+		REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
+			(bp->tx_ticks_int << 16) | bp->tx_ticks);
+
+		REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
+		       (bp->rx_quick_cons_trip_int << 16) |
+			bp->rx_quick_cons_trip);
+
+		REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
+			(bp->rx_ticks_int << 16) | bp->rx_ticks);
+	}
+
+	/* Clear internal stats counters. */
+	REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
+
+	REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
+
+	/* Initialize the receive filter. */
+	bnx2_set_rx_mode(bp->dev);
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+		val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
+		REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
+	}
+	rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
+			  1, 0);
+
+	REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
+	REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
+
+	udelay(20);
+
+	bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
+
+	return rc;
+}
+
+static void
+bnx2_clear_ring_states(struct bnx2 *bp)
+{
+	struct bnx2_napi *bnapi;
+	struct bnx2_tx_ring_info *txr;
+	struct bnx2_rx_ring_info *rxr;
+	int i;
+
+	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+		bnapi = &bp->bnx2_napi[i];
+		txr = &bnapi->tx_ring;
+		rxr = &bnapi->rx_ring;
+
+		txr->tx_cons = 0;
+		txr->hw_tx_cons = 0;
+		rxr->rx_prod_bseq = 0;
+		rxr->rx_prod = 0;
+		rxr->rx_cons = 0;
+		rxr->rx_pg_prod = 0;
+		rxr->rx_pg_cons = 0;
+	}
+}
+
+static void
+bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
+{
+	u32 val, offset0, offset1, offset2, offset3;
+	u32 cid_addr = GET_CID_ADDR(cid);
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		offset0 = BNX2_L2CTX_TYPE_XI;
+		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
+	} else {
+		offset0 = BNX2_L2CTX_TYPE;
+		offset1 = BNX2_L2CTX_CMD_TYPE;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
+	}
+	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
+	bnx2_ctx_wr(bp, cid_addr, offset0, val);
+
+	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
+	bnx2_ctx_wr(bp, cid_addr, offset1, val);
+
+	val = (u64) txr->tx_desc_mapping >> 32;
+	bnx2_ctx_wr(bp, cid_addr, offset2, val);
+
+	val = (u64) txr->tx_desc_mapping & 0xffffffff;
+	bnx2_ctx_wr(bp, cid_addr, offset3, val);
+}
+
+static void
+bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
+{
+	struct tx_bd *txbd;
+	u32 cid = TX_CID;
+	struct bnx2_napi *bnapi;
+	struct bnx2_tx_ring_info *txr;
+
+	bnapi = &bp->bnx2_napi[ring_num];
+	txr = &bnapi->tx_ring;
+
+	if (ring_num == 0)
+		cid = TX_CID;
+	else
+		cid = TX_TSS_CID + ring_num - 1;
+
+	bp->tx_wake_thresh = bp->tx_ring_size / 2;
+
+	txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
+
+	txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
+	txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
+
+	txr->tx_prod = 0;
+	txr->tx_prod_bseq = 0;
+
+	txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
+	txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
+
+	bnx2_init_tx_context(bp, cid, txr);
+}
+
+static void
+bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
+		     int num_rings)
+{
+	int i;
+	struct rx_bd *rxbd;
+
+	for (i = 0; i < num_rings; i++) {
+		int j;
+
+		rxbd = &rx_ring[i][0];
+		for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
+			rxbd->rx_bd_len = buf_size;
+			rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
+		}
+		if (i == (num_rings - 1))
+			j = 0;
+		else
+			j = i + 1;
+		rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
+		rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
+	}
+}
+
+static void
+bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
+{
+	int i;
+	u16 prod, ring_prod;
+	u32 cid, rx_cid_addr, val;
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
+	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+
+	if (ring_num == 0)
+		cid = RX_CID;
+	else
+		cid = RX_RSS_CID + ring_num - 1;
+
+	rx_cid_addr = GET_CID_ADDR(cid);
+
+	bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
+			     bp->rx_buf_use_size, bp->rx_max_ring);
+
+	bnx2_init_rx_context(bp, cid);
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
+		REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
+	}
+
+	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
+	if (bp->rx_pg_ring_size) {
+		bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
+				     rxr->rx_pg_desc_mapping,
+				     PAGE_SIZE, bp->rx_max_pg_ring);
+		val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
+		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
+		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
+		       BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
+
+		val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
+		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
+
+		val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
+		bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
+
+		if (CHIP_NUM(bp) == CHIP_NUM_5709)
+			REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
+	}
+
+	val = (u64) rxr->rx_desc_mapping[0] >> 32;
+	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
+
+	val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
+	bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
+
+	ring_prod = prod = rxr->rx_pg_prod;
+	for (i = 0; i < bp->rx_pg_ring_size; i++) {
+		if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
+			netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
+				    ring_num, i, bp->rx_pg_ring_size);
+			break;
+		}
+		prod = NEXT_RX_BD(prod);
+		ring_prod = RX_PG_RING_IDX(prod);
+	}
+	rxr->rx_pg_prod = prod;
+
+	ring_prod = prod = rxr->rx_prod;
+	for (i = 0; i < bp->rx_ring_size; i++) {
+		if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
+			netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
+				    ring_num, i, bp->rx_ring_size);
+			break;
+		}
+		prod = NEXT_RX_BD(prod);
+		ring_prod = RX_RING_IDX(prod);
+	}
+	rxr->rx_prod = prod;
+
+	rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
+	rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
+	rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
+
+	REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
+	REG_WR16(bp, rxr->rx_bidx_addr, prod);
+
+	REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
+}
+
+static void
+bnx2_init_all_rings(struct bnx2 *bp)
+{
+	int i;
+	u32 val;
+
+	bnx2_clear_ring_states(bp);
+
+	REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
+	for (i = 0; i < bp->num_tx_rings; i++)
+		bnx2_init_tx_ring(bp, i);
+
+	if (bp->num_tx_rings > 1)
+		REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
+		       (TX_TSS_CID << 7));
+
+	REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
+	bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
+
+	for (i = 0; i < bp->num_rx_rings; i++)
+		bnx2_init_rx_ring(bp, i);
+
+	if (bp->num_rx_rings > 1) {
+		u32 tbl_32 = 0;
+
+		for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
+			int shift = (i % 8) << 2;
+
+			tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
+			if ((i % 8) == 7) {
+				REG_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
+				REG_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
+					BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK |
+					BNX2_RLUP_RSS_COMMAND_WRITE |
+					BNX2_RLUP_RSS_COMMAND_HASH_MASK);
+				tbl_32 = 0;
+			}
+		}
+
+		val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
+		      BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
+
+		REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
+
+	}
+}
+
+static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
+{
+	u32 max, num_rings = 1;
+
+	while (ring_size > MAX_RX_DESC_CNT) {
+		ring_size -= MAX_RX_DESC_CNT;
+		num_rings++;
+	}
+	/* round to next power of 2 */
+	max = max_size;
+	while ((max & num_rings) == 0)
+		max >>= 1;
+
+	if (num_rings != max)
+		max <<= 1;
+
+	return max;
+}
+
+static void
+bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
+{
+	u32 rx_size, rx_space, jumbo_size;
+
+	/* 8 for CRC and VLAN */
+	rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
+
+	rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
+		sizeof(struct skb_shared_info);
+
+	bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
+	bp->rx_pg_ring_size = 0;
+	bp->rx_max_pg_ring = 0;
+	bp->rx_max_pg_ring_idx = 0;
+	if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
+		int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
+
+		jumbo_size = size * pages;
+		if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
+			jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
+
+		bp->rx_pg_ring_size = jumbo_size;
+		bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
+							MAX_RX_PG_RINGS);
+		bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
+		rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
+		bp->rx_copy_thresh = 0;
+	}
+
+	bp->rx_buf_use_size = rx_size;
+	/* hw alignment */
+	bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
+	bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
+	bp->rx_ring_size = size;
+	bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
+	bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
+}
+
+static void
+bnx2_free_tx_skbs(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->num_tx_rings; i++) {
+		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+		struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+		int j;
+
+		if (txr->tx_buf_ring == NULL)
+			continue;
+
+		for (j = 0; j < TX_DESC_CNT; ) {
+			struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
+			struct sk_buff *skb = tx_buf->skb;
+			int k, last;
+
+			if (skb == NULL) {
+				j++;
+				continue;
+			}
+
+			dma_unmap_single(&bp->pdev->dev,
+					 dma_unmap_addr(tx_buf, mapping),
+					 skb_headlen(skb),
+					 PCI_DMA_TODEVICE);
+
+			tx_buf->skb = NULL;
+
+			last = tx_buf->nr_frags;
+			j++;
+			for (k = 0; k < last; k++, j++) {
+				tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
+				dma_unmap_page(&bp->pdev->dev,
+					dma_unmap_addr(tx_buf, mapping),
+					skb_shinfo(skb)->frags[k].size,
+					PCI_DMA_TODEVICE);
+			}
+			dev_kfree_skb(skb);
+		}
+	}
+}
+
+static void
+bnx2_free_rx_skbs(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->num_rx_rings; i++) {
+		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+		struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+		int j;
+
+		if (rxr->rx_buf_ring == NULL)
+			return;
+
+		for (j = 0; j < bp->rx_max_ring_idx; j++) {
+			struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
+			struct sk_buff *skb = rx_buf->skb;
+
+			if (skb == NULL)
+				continue;
+
+			dma_unmap_single(&bp->pdev->dev,
+					 dma_unmap_addr(rx_buf, mapping),
+					 bp->rx_buf_use_size,
+					 PCI_DMA_FROMDEVICE);
+
+			rx_buf->skb = NULL;
+
+			dev_kfree_skb(skb);
+		}
+		for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
+			bnx2_free_rx_page(bp, rxr, j);
+	}
+}
+
+static void
+bnx2_free_skbs(struct bnx2 *bp)
+{
+	bnx2_free_tx_skbs(bp);
+	bnx2_free_rx_skbs(bp);
+}
+
+static int
+bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
+{
+	int rc;
+
+	rc = bnx2_reset_chip(bp, reset_code);
+	bnx2_free_skbs(bp);
+	if (rc)
+		return rc;
+
+	if ((rc = bnx2_init_chip(bp)) != 0)
+		return rc;
+
+	bnx2_init_all_rings(bp);
+	return 0;
+}
+
+static int
+bnx2_init_nic(struct bnx2 *bp, int reset_phy)
+{
+	int rc;
+
+	if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
+		return rc;
+
+	spin_lock_bh(&bp->phy_lock);
+	bnx2_init_phy(bp, reset_phy);
+	bnx2_set_link(bp);
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+		bnx2_remote_phy_event(bp);
+	spin_unlock_bh(&bp->phy_lock);
+	return 0;
+}
+
+static int
+bnx2_shutdown_chip(struct bnx2 *bp)
+{
+	u32 reset_code;
+
+	if (bp->flags & BNX2_FLAG_NO_WOL)
+		reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
+	else if (bp->wol)
+		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
+	else
+		reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
+
+	return bnx2_reset_chip(bp, reset_code);
+}
+
+static int
+bnx2_test_registers(struct bnx2 *bp)
+{
+	int ret;
+	int i, is_5709;
+	static const struct {
+		u16   offset;
+		u16   flags;
+#define BNX2_FL_NOT_5709	1
+		u32   rw_mask;
+		u32   ro_mask;
+	} reg_tbl[] = {
+		{ 0x006c, 0, 0x00000000, 0x0000003f },
+		{ 0x0090, 0, 0xffffffff, 0x00000000 },
+		{ 0x0094, 0, 0x00000000, 0x00000000 },
+
+		{ 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
+		{ 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+		{ 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+		{ 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
+		{ 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
+		{ 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
+		{ 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
+		{ 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+		{ 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+
+		{ 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+		{ 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
+		{ 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
+		{ 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
+		{ 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
+		{ 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
+
+		{ 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
+		{ 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
+		{ 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
+
+		{ 0x1000, 0, 0x00000000, 0x00000001 },
+		{ 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
+
+		{ 0x1408, 0, 0x01c00800, 0x00000000 },
+		{ 0x149c, 0, 0x8000ffff, 0x00000000 },
+		{ 0x14a8, 0, 0x00000000, 0x000001ff },
+		{ 0x14ac, 0, 0x0fffffff, 0x10000000 },
+		{ 0x14b0, 0, 0x00000002, 0x00000001 },
+		{ 0x14b8, 0, 0x00000000, 0x00000000 },
+		{ 0x14c0, 0, 0x00000000, 0x00000009 },
+		{ 0x14c4, 0, 0x00003fff, 0x00000000 },
+		{ 0x14cc, 0, 0x00000000, 0x00000001 },
+		{ 0x14d0, 0, 0xffffffff, 0x00000000 },
+
+		{ 0x1800, 0, 0x00000000, 0x00000001 },
+		{ 0x1804, 0, 0x00000000, 0x00000003 },
+
+		{ 0x2800, 0, 0x00000000, 0x00000001 },
+		{ 0x2804, 0, 0x00000000, 0x00003f01 },
+		{ 0x2808, 0, 0x0f3f3f03, 0x00000000 },
+		{ 0x2810, 0, 0xffff0000, 0x00000000 },
+		{ 0x2814, 0, 0xffff0000, 0x00000000 },
+		{ 0x2818, 0, 0xffff0000, 0x00000000 },
+		{ 0x281c, 0, 0xffff0000, 0x00000000 },
+		{ 0x2834, 0, 0xffffffff, 0x00000000 },
+		{ 0x2840, 0, 0x00000000, 0xffffffff },
+		{ 0x2844, 0, 0x00000000, 0xffffffff },
+		{ 0x2848, 0, 0xffffffff, 0x00000000 },
+		{ 0x284c, 0, 0xf800f800, 0x07ff07ff },
+
+		{ 0x2c00, 0, 0x00000000, 0x00000011 },
+		{ 0x2c04, 0, 0x00000000, 0x00030007 },
+
+		{ 0x3c00, 0, 0x00000000, 0x00000001 },
+		{ 0x3c04, 0, 0x00000000, 0x00070000 },
+		{ 0x3c08, 0, 0x00007f71, 0x07f00000 },
+		{ 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
+		{ 0x3c10, 0, 0xffffffff, 0x00000000 },
+		{ 0x3c14, 0, 0x00000000, 0xffffffff },
+		{ 0x3c18, 0, 0x00000000, 0xffffffff },
+		{ 0x3c1c, 0, 0xfffff000, 0x00000000 },
+		{ 0x3c20, 0, 0xffffff00, 0x00000000 },
+
+		{ 0x5004, 0, 0x00000000, 0x0000007f },
+		{ 0x5008, 0, 0x0f0007ff, 0x00000000 },
+
+		{ 0x5c00, 0, 0x00000000, 0x00000001 },
+		{ 0x5c04, 0, 0x00000000, 0x0003000f },
+		{ 0x5c08, 0, 0x00000003, 0x00000000 },
+		{ 0x5c0c, 0, 0x0000fff8, 0x00000000 },
+		{ 0x5c10, 0, 0x00000000, 0xffffffff },
+		{ 0x5c80, 0, 0x00000000, 0x0f7113f1 },
+		{ 0x5c84, 0, 0x00000000, 0x0000f333 },
+		{ 0x5c88, 0, 0x00000000, 0x00077373 },
+		{ 0x5c8c, 0, 0x00000000, 0x0007f737 },
+
+		{ 0x6808, 0, 0x0000ff7f, 0x00000000 },
+		{ 0x680c, 0, 0xffffffff, 0x00000000 },
+		{ 0x6810, 0, 0xffffffff, 0x00000000 },
+		{ 0x6814, 0, 0xffffffff, 0x00000000 },
+		{ 0x6818, 0, 0xffffffff, 0x00000000 },
+		{ 0x681c, 0, 0xffffffff, 0x00000000 },
+		{ 0x6820, 0, 0x00ff00ff, 0x00000000 },
+		{ 0x6824, 0, 0x00ff00ff, 0x00000000 },
+		{ 0x6828, 0, 0x00ff00ff, 0x00000000 },
+		{ 0x682c, 0, 0x03ff03ff, 0x00000000 },
+		{ 0x6830, 0, 0x03ff03ff, 0x00000000 },
+		{ 0x6834, 0, 0x03ff03ff, 0x00000000 },
+		{ 0x6838, 0, 0x03ff03ff, 0x00000000 },
+		{ 0x683c, 0, 0x0000ffff, 0x00000000 },
+		{ 0x6840, 0, 0x00000ff0, 0x00000000 },
+		{ 0x6844, 0, 0x00ffff00, 0x00000000 },
+		{ 0x684c, 0, 0xffffffff, 0x00000000 },
+		{ 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
+		{ 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
+		{ 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
+		{ 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
+		{ 0x6908, 0, 0x00000000, 0x0001ff0f },
+		{ 0x690c, 0, 0x00000000, 0x0ffe00f0 },
+
+		{ 0xffff, 0, 0x00000000, 0x00000000 },
+	};
+
+	ret = 0;
+	is_5709 = 0;
+	if (CHIP_NUM(bp) == CHIP_NUM_5709)
+		is_5709 = 1;
+
+	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
+		u32 offset, rw_mask, ro_mask, save_val, val;
+		u16 flags = reg_tbl[i].flags;
+
+		if (is_5709 && (flags & BNX2_FL_NOT_5709))
+			continue;
+
+		offset = (u32) reg_tbl[i].offset;
+		rw_mask = reg_tbl[i].rw_mask;
+		ro_mask = reg_tbl[i].ro_mask;
+
+		save_val = readl(bp->regview + offset);
+
+		writel(0, bp->regview + offset);
+
+		val = readl(bp->regview + offset);
+		if ((val & rw_mask) != 0) {
+			goto reg_test_err;
+		}
+
+		if ((val & ro_mask) != (save_val & ro_mask)) {
+			goto reg_test_err;
+		}
+
+		writel(0xffffffff, bp->regview + offset);
+
+		val = readl(bp->regview + offset);
+		if ((val & rw_mask) != rw_mask) {
+			goto reg_test_err;
+		}
+
+		if ((val & ro_mask) != (save_val & ro_mask)) {
+			goto reg_test_err;
+		}
+
+		writel(save_val, bp->regview + offset);
+		continue;
+
+reg_test_err:
+		writel(save_val, bp->regview + offset);
+		ret = -ENODEV;
+		break;
+	}
+	return ret;
+}
+
+static int
+bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
+{
+	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
+		0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
+	int i;
+
+	for (i = 0; i < sizeof(test_pattern) / 4; i++) {
+		u32 offset;
+
+		for (offset = 0; offset < size; offset += 4) {
+
+			bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
+
+			if (bnx2_reg_rd_ind(bp, start + offset) !=
+				test_pattern[i]) {
+				return -ENODEV;
+			}
+		}
+	}
+	return 0;
+}
+
+static int
+bnx2_test_memory(struct bnx2 *bp)
+{
+	int ret = 0;
+	int i;
+	static struct mem_entry {
+		u32   offset;
+		u32   len;
+	} mem_tbl_5706[] = {
+		{ 0x60000,  0x4000 },
+		{ 0xa0000,  0x3000 },
+		{ 0xe0000,  0x4000 },
+		{ 0x120000, 0x4000 },
+		{ 0x1a0000, 0x4000 },
+		{ 0x160000, 0x4000 },
+		{ 0xffffffff, 0    },
+	},
+	mem_tbl_5709[] = {
+		{ 0x60000,  0x4000 },
+		{ 0xa0000,  0x3000 },
+		{ 0xe0000,  0x4000 },
+		{ 0x120000, 0x4000 },
+		{ 0x1a0000, 0x4000 },
+		{ 0xffffffff, 0    },
+	};
+	struct mem_entry *mem_tbl;
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709)
+		mem_tbl = mem_tbl_5709;
+	else
+		mem_tbl = mem_tbl_5706;
+
+	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
+		if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
+			mem_tbl[i].len)) != 0) {
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+#define BNX2_MAC_LOOPBACK	0
+#define BNX2_PHY_LOOPBACK	1
+
+static int
+bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
+{
+	unsigned int pkt_size, num_pkts, i;
+	struct sk_buff *skb, *rx_skb;
+	unsigned char *packet;
+	u16 rx_start_idx, rx_idx;
+	dma_addr_t map;
+	struct tx_bd *txbd;
+	struct sw_bd *rx_buf;
+	struct l2_fhdr *rx_hdr;
+	int ret = -ENODEV;
+	struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
+	struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
+	struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
+
+	tx_napi = bnapi;
+
+	txr = &tx_napi->tx_ring;
+	rxr = &bnapi->rx_ring;
+	if (loopback_mode == BNX2_MAC_LOOPBACK) {
+		bp->loopback = MAC_LOOPBACK;
+		bnx2_set_mac_loopback(bp);
+	}
+	else if (loopback_mode == BNX2_PHY_LOOPBACK) {
+		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+			return 0;
+
+		bp->loopback = PHY_LOOPBACK;
+		bnx2_set_phy_loopback(bp);
+	}
+	else
+		return -EINVAL;
+
+	pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
+	skb = netdev_alloc_skb(bp->dev, pkt_size);
+	if (!skb)
+		return -ENOMEM;
+	packet = skb_put(skb, pkt_size);
+	memcpy(packet, bp->dev->dev_addr, 6);
+	memset(packet + 6, 0x0, 8);
+	for (i = 14; i < pkt_size; i++)
+		packet[i] = (unsigned char) (i & 0xff);
+
+	map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
+			     PCI_DMA_TODEVICE);
+	if (dma_mapping_error(&bp->pdev->dev, map)) {
+		dev_kfree_skb(skb);
+		return -EIO;
+	}
+
+	REG_WR(bp, BNX2_HC_COMMAND,
+	       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+
+	REG_RD(bp, BNX2_HC_COMMAND);
+
+	udelay(5);
+	rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
+
+	num_pkts = 0;
+
+	txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
+
+	txbd->tx_bd_haddr_hi = (u64) map >> 32;
+	txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
+	txbd->tx_bd_mss_nbytes = pkt_size;
+	txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
+
+	num_pkts++;
+	txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
+	txr->tx_prod_bseq += pkt_size;
+
+	REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
+	REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
+
+	udelay(100);
+
+	REG_WR(bp, BNX2_HC_COMMAND,
+	       bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+
+	REG_RD(bp, BNX2_HC_COMMAND);
+
+	udelay(5);
+
+	dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
+	dev_kfree_skb(skb);
+
+	if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
+		goto loopback_test_done;
+
+	rx_idx = bnx2_get_hw_rx_cons(bnapi);
+	if (rx_idx != rx_start_idx + num_pkts) {
+		goto loopback_test_done;
+	}
+
+	rx_buf = &rxr->rx_buf_ring[rx_start_idx];
+	rx_skb = rx_buf->skb;
+
+	rx_hdr = rx_buf->desc;
+	skb_reserve(rx_skb, BNX2_RX_OFFSET);
+
+	dma_sync_single_for_cpu(&bp->pdev->dev,
+		dma_unmap_addr(rx_buf, mapping),
+		bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+
+	if (rx_hdr->l2_fhdr_status &
+		(L2_FHDR_ERRORS_BAD_CRC |
+		L2_FHDR_ERRORS_PHY_DECODE |
+		L2_FHDR_ERRORS_ALIGNMENT |
+		L2_FHDR_ERRORS_TOO_SHORT |
+		L2_FHDR_ERRORS_GIANT_FRAME)) {
+
+		goto loopback_test_done;
+	}
+
+	if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
+		goto loopback_test_done;
+	}
+
+	for (i = 14; i < pkt_size; i++) {
+		if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
+			goto loopback_test_done;
+		}
+	}
+
+	ret = 0;
+
+loopback_test_done:
+	bp->loopback = 0;
+	return ret;
+}
+
+#define BNX2_MAC_LOOPBACK_FAILED	1
+#define BNX2_PHY_LOOPBACK_FAILED	2
+#define BNX2_LOOPBACK_FAILED		(BNX2_MAC_LOOPBACK_FAILED |	\
+					 BNX2_PHY_LOOPBACK_FAILED)
+
+static int
+bnx2_test_loopback(struct bnx2 *bp)
+{
+	int rc = 0;
+
+	if (!netif_running(bp->dev))
+		return BNX2_LOOPBACK_FAILED;
+
+	bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
+	spin_lock_bh(&bp->phy_lock);
+	bnx2_init_phy(bp, 1);
+	spin_unlock_bh(&bp->phy_lock);
+	if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
+		rc |= BNX2_MAC_LOOPBACK_FAILED;
+	if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
+		rc |= BNX2_PHY_LOOPBACK_FAILED;
+	return rc;
+}
+
+#define NVRAM_SIZE 0x200
+#define CRC32_RESIDUAL 0xdebb20e3
+
+static int
+bnx2_test_nvram(struct bnx2 *bp)
+{
+	__be32 buf[NVRAM_SIZE / 4];
+	u8 *data = (u8 *) buf;
+	int rc = 0;
+	u32 magic, csum;
+
+	if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
+		goto test_nvram_done;
+
+        magic = be32_to_cpu(buf[0]);
+	if (magic != 0x669955aa) {
+		rc = -ENODEV;
+		goto test_nvram_done;
+	}
+
+	if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
+		goto test_nvram_done;
+
+	csum = ether_crc_le(0x100, data);
+	if (csum != CRC32_RESIDUAL) {
+		rc = -ENODEV;
+		goto test_nvram_done;
+	}
+
+	csum = ether_crc_le(0x100, data + 0x100);
+	if (csum != CRC32_RESIDUAL) {
+		rc = -ENODEV;
+	}
+
+test_nvram_done:
+	return rc;
+}
+
+static int
+bnx2_test_link(struct bnx2 *bp)
+{
+	u32 bmsr;
+
+	if (!netif_running(bp->dev))
+		return -ENODEV;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
+		if (bp->link_up)
+			return 0;
+		return -ENODEV;
+	}
+	spin_lock_bh(&bp->phy_lock);
+	bnx2_enable_bmsr1(bp);
+	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
+	bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
+	bnx2_disable_bmsr1(bp);
+	spin_unlock_bh(&bp->phy_lock);
+
+	if (bmsr & BMSR_LSTATUS) {
+		return 0;
+	}
+	return -ENODEV;
+}
+
+static int
+bnx2_test_intr(struct bnx2 *bp)
+{
+	int i;
+	u16 status_idx;
+
+	if (!netif_running(bp->dev))
+		return -ENODEV;
+
+	status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
+
+	/* This register is not touched during run-time. */
+	REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
+	REG_RD(bp, BNX2_HC_COMMAND);
+
+	for (i = 0; i < 10; i++) {
+		if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
+			status_idx) {
+
+			break;
+		}
+
+		msleep_interruptible(10);
+	}
+	if (i < 10)
+		return 0;
+
+	return -ENODEV;
+}
+
+/* Determining link for parallel detection. */
+static int
+bnx2_5706_serdes_has_link(struct bnx2 *bp)
+{
+	u32 mode_ctl, an_dbg, exp;
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
+		return 0;
+
+	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
+	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
+
+	if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
+		return 0;
+
+	bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
+	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
+	bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
+
+	if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
+		return 0;
+
+	bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
+	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
+	bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
+
+	if (exp & MII_EXPAND_REG1_RUDI_C)	/* receiving CONFIG */
+		return 0;
+
+	return 1;
+}
+
+static void
+bnx2_5706_serdes_timer(struct bnx2 *bp)
+{
+	int check_link = 1;
+
+	spin_lock(&bp->phy_lock);
+	if (bp->serdes_an_pending) {
+		bp->serdes_an_pending--;
+		check_link = 0;
+	} else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
+		u32 bmcr;
+
+		bp->current_interval = BNX2_TIMER_INTERVAL;
+
+		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+
+		if (bmcr & BMCR_ANENABLE) {
+			if (bnx2_5706_serdes_has_link(bp)) {
+				bmcr &= ~BMCR_ANENABLE;
+				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
+				bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
+				bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
+			}
+		}
+	}
+	else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
+		 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
+		u32 phy2;
+
+		bnx2_write_phy(bp, 0x17, 0x0f01);
+		bnx2_read_phy(bp, 0x15, &phy2);
+		if (phy2 & 0x20) {
+			u32 bmcr;
+
+			bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+			bmcr |= BMCR_ANENABLE;
+			bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
+
+			bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
+		}
+	} else
+		bp->current_interval = BNX2_TIMER_INTERVAL;
+
+	if (check_link) {
+		u32 val;
+
+		bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
+		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
+		bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
+
+		if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
+			if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
+				bnx2_5706s_force_link_dn(bp, 1);
+				bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
+			} else
+				bnx2_set_link(bp);
+		} else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
+			bnx2_set_link(bp);
+	}
+	spin_unlock(&bp->phy_lock);
+}
+
+static void
+bnx2_5708_serdes_timer(struct bnx2 *bp)
+{
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+		return;
+
+	if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
+		bp->serdes_an_pending = 0;
+		return;
+	}
+
+	spin_lock(&bp->phy_lock);
+	if (bp->serdes_an_pending)
+		bp->serdes_an_pending--;
+	else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
+		u32 bmcr;
+
+		bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+		if (bmcr & BMCR_ANENABLE) {
+			bnx2_enable_forced_2g5(bp);
+			bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
+		} else {
+			bnx2_disable_forced_2g5(bp);
+			bp->serdes_an_pending = 2;
+			bp->current_interval = BNX2_TIMER_INTERVAL;
+		}
+
+	} else
+		bp->current_interval = BNX2_TIMER_INTERVAL;
+
+	spin_unlock(&bp->phy_lock);
+}
+
+static void
+bnx2_timer(unsigned long data)
+{
+	struct bnx2 *bp = (struct bnx2 *) data;
+
+	if (!netif_running(bp->dev))
+		return;
+
+	if (atomic_read(&bp->intr_sem) != 0)
+		goto bnx2_restart_timer;
+
+	if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
+	     BNX2_FLAG_USING_MSI)
+		bnx2_chk_missed_msi(bp);
+
+	bnx2_send_heart_beat(bp);
+
+	bp->stats_blk->stat_FwRxDrop =
+		bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
+
+	/* workaround occasional corrupted counters */
+	if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
+		REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
+					    BNX2_HC_COMMAND_STATS_NOW);
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+		if (CHIP_NUM(bp) == CHIP_NUM_5706)
+			bnx2_5706_serdes_timer(bp);
+		else
+			bnx2_5708_serdes_timer(bp);
+	}
+
+bnx2_restart_timer:
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+static int
+bnx2_request_irq(struct bnx2 *bp)
+{
+	unsigned long flags;
+	struct bnx2_irq *irq;
+	int rc = 0, i;
+
+	if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
+		flags = 0;
+	else
+		flags = IRQF_SHARED;
+
+	for (i = 0; i < bp->irq_nvecs; i++) {
+		irq = &bp->irq_tbl[i];
+		rc = request_irq(irq->vector, irq->handler, flags, irq->name,
+				 &bp->bnx2_napi[i]);
+		if (rc)
+			break;
+		irq->requested = 1;
+	}
+	return rc;
+}
+
+static void
+__bnx2_free_irq(struct bnx2 *bp)
+{
+	struct bnx2_irq *irq;
+	int i;
+
+	for (i = 0; i < bp->irq_nvecs; i++) {
+		irq = &bp->irq_tbl[i];
+		if (irq->requested)
+			free_irq(irq->vector, &bp->bnx2_napi[i]);
+		irq->requested = 0;
+	}
+}
+
+static void
+bnx2_free_irq(struct bnx2 *bp)
+{
+
+	__bnx2_free_irq(bp);
+	if (bp->flags & BNX2_FLAG_USING_MSI)
+		pci_disable_msi(bp->pdev);
+	else if (bp->flags & BNX2_FLAG_USING_MSIX)
+		pci_disable_msix(bp->pdev);
+
+	bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
+}
+
+static void
+bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
+{
+	int i, total_vecs, rc;
+	struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
+	struct net_device *dev = bp->dev;
+	const int len = sizeof(bp->irq_tbl[0].name);
+
+	bnx2_setup_msix_tbl(bp);
+	REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
+	REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
+	REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
+
+	/*  Need to flush the previous three writes to ensure MSI-X
+	 *  is setup properly */
+	REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
+
+	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+		msix_ent[i].entry = i;
+		msix_ent[i].vector = 0;
+	}
+
+	total_vecs = msix_vecs;
+#ifdef BCM_CNIC
+	total_vecs++;
+#endif
+	rc = -ENOSPC;
+	while (total_vecs >= BNX2_MIN_MSIX_VEC) {
+		rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
+		if (rc <= 0)
+			break;
+		if (rc > 0)
+			total_vecs = rc;
+	}
+
+	if (rc != 0)
+		return;
+
+	msix_vecs = total_vecs;
+#ifdef BCM_CNIC
+	msix_vecs--;
+#endif
+	bp->irq_nvecs = msix_vecs;
+	bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
+	for (i = 0; i < total_vecs; i++) {
+		bp->irq_tbl[i].vector = msix_ent[i].vector;
+		snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
+		bp->irq_tbl[i].handler = bnx2_msi_1shot;
+	}
+}
+
+static int
+bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
+{
+	int cpus = num_online_cpus();
+	int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
+
+	bp->irq_tbl[0].handler = bnx2_interrupt;
+	strcpy(bp->irq_tbl[0].name, bp->dev->name);
+	bp->irq_nvecs = 1;
+	bp->irq_tbl[0].vector = bp->pdev->irq;
+
+	if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
+		bnx2_enable_msix(bp, msix_vecs);
+
+	if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
+	    !(bp->flags & BNX2_FLAG_USING_MSIX)) {
+		if (pci_enable_msi(bp->pdev) == 0) {
+			bp->flags |= BNX2_FLAG_USING_MSI;
+			if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+				bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
+				bp->irq_tbl[0].handler = bnx2_msi_1shot;
+			} else
+				bp->irq_tbl[0].handler = bnx2_msi;
+
+			bp->irq_tbl[0].vector = bp->pdev->irq;
+		}
+	}
+
+	bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
+	netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
+
+	bp->num_rx_rings = bp->irq_nvecs;
+	return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
+}
+
+/* Called with rtnl_lock */
+static int
+bnx2_open(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	int rc;
+
+	netif_carrier_off(dev);
+
+	bnx2_set_power_state(bp, PCI_D0);
+	bnx2_disable_int(bp);
+
+	rc = bnx2_setup_int_mode(bp, disable_msi);
+	if (rc)
+		goto open_err;
+	bnx2_init_napi(bp);
+	bnx2_napi_enable(bp);
+	rc = bnx2_alloc_mem(bp);
+	if (rc)
+		goto open_err;
+
+	rc = bnx2_request_irq(bp);
+	if (rc)
+		goto open_err;
+
+	rc = bnx2_init_nic(bp, 1);
+	if (rc)
+		goto open_err;
+
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+	atomic_set(&bp->intr_sem, 0);
+
+	memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
+
+	bnx2_enable_int(bp);
+
+	if (bp->flags & BNX2_FLAG_USING_MSI) {
+		/* Test MSI to make sure it is working
+		 * If MSI test fails, go back to INTx mode
+		 */
+		if (bnx2_test_intr(bp) != 0) {
+			netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
+
+			bnx2_disable_int(bp);
+			bnx2_free_irq(bp);
+
+			bnx2_setup_int_mode(bp, 1);
+
+			rc = bnx2_init_nic(bp, 0);
+
+			if (!rc)
+				rc = bnx2_request_irq(bp);
+
+			if (rc) {
+				del_timer_sync(&bp->timer);
+				goto open_err;
+			}
+			bnx2_enable_int(bp);
+		}
+	}
+	if (bp->flags & BNX2_FLAG_USING_MSI)
+		netdev_info(dev, "using MSI\n");
+	else if (bp->flags & BNX2_FLAG_USING_MSIX)
+		netdev_info(dev, "using MSIX\n");
+
+	netif_tx_start_all_queues(dev);
+
+	return 0;
+
+open_err:
+	bnx2_napi_disable(bp);
+	bnx2_free_skbs(bp);
+	bnx2_free_irq(bp);
+	bnx2_free_mem(bp);
+	bnx2_del_napi(bp);
+	return rc;
+}
+
+static void
+bnx2_reset_task(struct work_struct *work)
+{
+	struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
+	int rc;
+
+	rtnl_lock();
+	if (!netif_running(bp->dev)) {
+		rtnl_unlock();
+		return;
+	}
+
+	bnx2_netif_stop(bp, true);
+
+	rc = bnx2_init_nic(bp, 1);
+	if (rc) {
+		netdev_err(bp->dev, "failed to reset NIC, closing\n");
+		bnx2_napi_enable(bp);
+		dev_close(bp->dev);
+		rtnl_unlock();
+		return;
+	}
+
+	atomic_set(&bp->intr_sem, 1);
+	bnx2_netif_start(bp, true);
+	rtnl_unlock();
+}
+
+static void
+bnx2_dump_state(struct bnx2 *bp)
+{
+	struct net_device *dev = bp->dev;
+	u32 val1, val2;
+
+	pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
+	netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
+		   atomic_read(&bp->intr_sem), val1);
+	pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
+	pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
+	netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
+	netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
+		   REG_RD(bp, BNX2_EMAC_TX_STATUS),
+		   REG_RD(bp, BNX2_EMAC_RX_STATUS));
+	netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
+		   REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
+	netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
+		   REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
+	if (bp->flags & BNX2_FLAG_USING_MSIX)
+		netdev_err(dev, "DEBUG: PBA[%08x]\n",
+			   REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
+}
+
+static void
+bnx2_tx_timeout(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	bnx2_dump_state(bp);
+	bnx2_dump_mcp_state(bp);
+
+	/* This allows the netif to be shutdown gracefully before resetting */
+	schedule_work(&bp->reset_task);
+}
+
+/* Called with netif_tx_lock.
+ * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue().
+ */
+static netdev_tx_t
+bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	dma_addr_t mapping;
+	struct tx_bd *txbd;
+	struct sw_tx_bd *tx_buf;
+	u32 len, vlan_tag_flags, last_frag, mss;
+	u16 prod, ring_prod;
+	int i;
+	struct bnx2_napi *bnapi;
+	struct bnx2_tx_ring_info *txr;
+	struct netdev_queue *txq;
+
+	/*  Determine which tx ring we will be placed on */
+	i = skb_get_queue_mapping(skb);
+	bnapi = &bp->bnx2_napi[i];
+	txr = &bnapi->tx_ring;
+	txq = netdev_get_tx_queue(dev, i);
+
+	if (unlikely(bnx2_tx_avail(bp, txr) <
+	    (skb_shinfo(skb)->nr_frags + 1))) {
+		netif_tx_stop_queue(txq);
+		netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
+
+		return NETDEV_TX_BUSY;
+	}
+	len = skb_headlen(skb);
+	prod = txr->tx_prod;
+	ring_prod = TX_RING_IDX(prod);
+
+	vlan_tag_flags = 0;
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
+	}
+
+	if (vlan_tx_tag_present(skb)) {
+		vlan_tag_flags |=
+			(TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
+	}
+
+	if ((mss = skb_shinfo(skb)->gso_size)) {
+		u32 tcp_opt_len;
+		struct iphdr *iph;
+
+		vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
+
+		tcp_opt_len = tcp_optlen(skb);
+
+		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
+			u32 tcp_off = skb_transport_offset(skb) -
+				      sizeof(struct ipv6hdr) - ETH_HLEN;
+
+			vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
+					  TX_BD_FLAGS_SW_FLAGS;
+			if (likely(tcp_off == 0))
+				vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
+			else {
+				tcp_off >>= 3;
+				vlan_tag_flags |= ((tcp_off & 0x3) <<
+						   TX_BD_FLAGS_TCP6_OFF0_SHL) |
+						  ((tcp_off & 0x10) <<
+						   TX_BD_FLAGS_TCP6_OFF4_SHL);
+				mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
+			}
+		} else {
+			iph = ip_hdr(skb);
+			if (tcp_opt_len || (iph->ihl > 5)) {
+				vlan_tag_flags |= ((iph->ihl - 5) +
+						   (tcp_opt_len >> 2)) << 8;
+			}
+		}
+	} else
+		mss = 0;
+
+	mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
+	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	tx_buf = &txr->tx_buf_ring[ring_prod];
+	tx_buf->skb = skb;
+	dma_unmap_addr_set(tx_buf, mapping, mapping);
+
+	txbd = &txr->tx_desc_ring[ring_prod];
+
+	txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
+	txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
+	txbd->tx_bd_mss_nbytes = len | (mss << 16);
+	txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
+
+	last_frag = skb_shinfo(skb)->nr_frags;
+	tx_buf->nr_frags = last_frag;
+	tx_buf->is_gso = skb_is_gso(skb);
+
+	for (i = 0; i < last_frag; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		prod = NEXT_TX_BD(prod);
+		ring_prod = TX_RING_IDX(prod);
+		txbd = &txr->tx_desc_ring[ring_prod];
+
+		len = frag->size;
+		mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
+				       len, PCI_DMA_TODEVICE);
+		if (dma_mapping_error(&bp->pdev->dev, mapping))
+			goto dma_error;
+		dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
+				   mapping);
+
+		txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
+		txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
+		txbd->tx_bd_mss_nbytes = len | (mss << 16);
+		txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
+
+	}
+	txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
+
+	prod = NEXT_TX_BD(prod);
+	txr->tx_prod_bseq += skb->len;
+
+	REG_WR16(bp, txr->tx_bidx_addr, prod);
+	REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
+
+	mmiowb();
+
+	txr->tx_prod = prod;
+
+	if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
+		netif_tx_stop_queue(txq);
+
+		/* netif_tx_stop_queue() must be done before checking
+		 * tx index in bnx2_tx_avail() below, because in
+		 * bnx2_tx_int(), we update tx index before checking for
+		 * netif_tx_queue_stopped().
+		 */
+		smp_mb();
+		if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
+			netif_tx_wake_queue(txq);
+	}
+
+	return NETDEV_TX_OK;
+dma_error:
+	/* save value of frag that failed */
+	last_frag = i;
+
+	/* start back at beginning and unmap skb */
+	prod = txr->tx_prod;
+	ring_prod = TX_RING_IDX(prod);
+	tx_buf = &txr->tx_buf_ring[ring_prod];
+	tx_buf->skb = NULL;
+	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
+			 skb_headlen(skb), PCI_DMA_TODEVICE);
+
+	/* unmap remaining mapped pages */
+	for (i = 0; i < last_frag; i++) {
+		prod = NEXT_TX_BD(prod);
+		ring_prod = TX_RING_IDX(prod);
+		tx_buf = &txr->tx_buf_ring[ring_prod];
+		dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
+			       skb_shinfo(skb)->frags[i].size,
+			       PCI_DMA_TODEVICE);
+	}
+
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+/* Called with rtnl_lock */
+static int
+bnx2_close(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	bnx2_disable_int_sync(bp);
+	bnx2_napi_disable(bp);
+	del_timer_sync(&bp->timer);
+	bnx2_shutdown_chip(bp);
+	bnx2_free_irq(bp);
+	bnx2_free_skbs(bp);
+	bnx2_free_mem(bp);
+	bnx2_del_napi(bp);
+	bp->link_up = 0;
+	netif_carrier_off(bp->dev);
+	bnx2_set_power_state(bp, PCI_D3hot);
+	return 0;
+}
+
+static void
+bnx2_save_stats(struct bnx2 *bp)
+{
+	u32 *hw_stats = (u32 *) bp->stats_blk;
+	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
+	int i;
+
+	/* The 1st 10 counters are 64-bit counters */
+	for (i = 0; i < 20; i += 2) {
+		u32 hi;
+		u64 lo;
+
+		hi = temp_stats[i] + hw_stats[i];
+		lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
+		if (lo > 0xffffffff)
+			hi++;
+		temp_stats[i] = hi;
+		temp_stats[i + 1] = lo & 0xffffffff;
+	}
+
+	for ( ; i < sizeof(struct statistics_block) / 4; i++)
+		temp_stats[i] += hw_stats[i];
+}
+
+#define GET_64BIT_NET_STATS64(ctr)		\
+	(((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
+
+#define GET_64BIT_NET_STATS(ctr)				\
+	GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +		\
+	GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
+
+#define GET_32BIT_NET_STATS(ctr)				\
+	(unsigned long) (bp->stats_blk->ctr +			\
+			 bp->temp_stats_blk->ctr)
+
+static struct rtnl_link_stats64 *
+bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (bp->stats_blk == NULL)
+		return net_stats;
+
+	net_stats->rx_packets =
+		GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
+		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
+		GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
+
+	net_stats->tx_packets =
+		GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
+		GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
+		GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
+
+	net_stats->rx_bytes =
+		GET_64BIT_NET_STATS(stat_IfHCInOctets);
+
+	net_stats->tx_bytes =
+		GET_64BIT_NET_STATS(stat_IfHCOutOctets);
+
+	net_stats->multicast =
+		GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts);
+
+	net_stats->collisions =
+		GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
+
+	net_stats->rx_length_errors =
+		GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
+		GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
+
+	net_stats->rx_over_errors =
+		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
+		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
+
+	net_stats->rx_frame_errors =
+		GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
+
+	net_stats->rx_crc_errors =
+		GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
+
+	net_stats->rx_errors = net_stats->rx_length_errors +
+		net_stats->rx_over_errors + net_stats->rx_frame_errors +
+		net_stats->rx_crc_errors;
+
+	net_stats->tx_aborted_errors =
+		GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
+		GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
+
+	if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
+	    (CHIP_ID(bp) == CHIP_ID_5708_A0))
+		net_stats->tx_carrier_errors = 0;
+	else {
+		net_stats->tx_carrier_errors =
+			GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
+	}
+
+	net_stats->tx_errors =
+		GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
+		net_stats->tx_aborted_errors +
+		net_stats->tx_carrier_errors;
+
+	net_stats->rx_missed_errors =
+		GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
+		GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
+		GET_32BIT_NET_STATS(stat_FwRxDrop);
+
+	return net_stats;
+}
+
+/* All ethtool functions called with rtnl_lock */
+
+static int
+bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	int support_serdes = 0, support_copper = 0;
+
+	cmd->supported = SUPPORTED_Autoneg;
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
+		support_serdes = 1;
+		support_copper = 1;
+	} else if (bp->phy_port == PORT_FIBRE)
+		support_serdes = 1;
+	else
+		support_copper = 1;
+
+	if (support_serdes) {
+		cmd->supported |= SUPPORTED_1000baseT_Full |
+			SUPPORTED_FIBRE;
+		if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
+			cmd->supported |= SUPPORTED_2500baseX_Full;
+
+	}
+	if (support_copper) {
+		cmd->supported |= SUPPORTED_10baseT_Half |
+			SUPPORTED_10baseT_Full |
+			SUPPORTED_100baseT_Half |
+			SUPPORTED_100baseT_Full |
+			SUPPORTED_1000baseT_Full |
+			SUPPORTED_TP;
+
+	}
+
+	spin_lock_bh(&bp->phy_lock);
+	cmd->port = bp->phy_port;
+	cmd->advertising = bp->advertising;
+
+	if (bp->autoneg & AUTONEG_SPEED) {
+		cmd->autoneg = AUTONEG_ENABLE;
+	} else {
+		cmd->autoneg = AUTONEG_DISABLE;
+	}
+
+	if (netif_carrier_ok(dev)) {
+		ethtool_cmd_speed_set(cmd, bp->line_speed);
+		cmd->duplex = bp->duplex;
+	}
+	else {
+		ethtool_cmd_speed_set(cmd, -1);
+		cmd->duplex = -1;
+	}
+	spin_unlock_bh(&bp->phy_lock);
+
+	cmd->transceiver = XCVR_INTERNAL;
+	cmd->phy_address = bp->phy_addr;
+
+	return 0;
+}
+
+static int
+bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	u8 autoneg = bp->autoneg;
+	u8 req_duplex = bp->req_duplex;
+	u16 req_line_speed = bp->req_line_speed;
+	u32 advertising = bp->advertising;
+	int err = -EINVAL;
+
+	spin_lock_bh(&bp->phy_lock);
+
+	if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
+		goto err_out_unlock;
+
+	if (cmd->port != bp->phy_port &&
+	    !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
+		goto err_out_unlock;
+
+	/* If device is down, we can store the settings only if the user
+	 * is setting the currently active port.
+	 */
+	if (!netif_running(dev) && cmd->port != bp->phy_port)
+		goto err_out_unlock;
+
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		autoneg |= AUTONEG_SPEED;
+
+		advertising = cmd->advertising;
+		if (cmd->port == PORT_TP) {
+			advertising &= ETHTOOL_ALL_COPPER_SPEED;
+			if (!advertising)
+				advertising = ETHTOOL_ALL_COPPER_SPEED;
+		} else {
+			advertising &= ETHTOOL_ALL_FIBRE_SPEED;
+			if (!advertising)
+				advertising = ETHTOOL_ALL_FIBRE_SPEED;
+		}
+		advertising |= ADVERTISED_Autoneg;
+	}
+	else {
+		u32 speed = ethtool_cmd_speed(cmd);
+		if (cmd->port == PORT_FIBRE) {
+			if ((speed != SPEED_1000 &&
+			     speed != SPEED_2500) ||
+			    (cmd->duplex != DUPLEX_FULL))
+				goto err_out_unlock;
+
+			if (speed == SPEED_2500 &&
+			    !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
+				goto err_out_unlock;
+		} else if (speed == SPEED_1000 || speed == SPEED_2500)
+			goto err_out_unlock;
+
+		autoneg &= ~AUTONEG_SPEED;
+		req_line_speed = speed;
+		req_duplex = cmd->duplex;
+		advertising = 0;
+	}
+
+	bp->autoneg = autoneg;
+	bp->advertising = advertising;
+	bp->req_line_speed = req_line_speed;
+	bp->req_duplex = req_duplex;
+
+	err = 0;
+	/* If device is down, the new settings will be picked up when it is
+	 * brought up.
+	 */
+	if (netif_running(dev))
+		err = bnx2_setup_phy(bp, cmd->port);
+
+err_out_unlock:
+	spin_unlock_bh(&bp->phy_lock);
+
+	return err;
+}
+
+static void
+bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	strcpy(info->driver, DRV_MODULE_NAME);
+	strcpy(info->version, DRV_MODULE_VERSION);
+	strcpy(info->bus_info, pci_name(bp->pdev));
+	strcpy(info->fw_version, bp->fw_version);
+}
+
+#define BNX2_REGDUMP_LEN		(32 * 1024)
+
+static int
+bnx2_get_regs_len(struct net_device *dev)
+{
+	return BNX2_REGDUMP_LEN;
+}
+
+static void
+bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
+{
+	u32 *p = _p, i, offset;
+	u8 *orig_p = _p;
+	struct bnx2 *bp = netdev_priv(dev);
+	static const u32 reg_boundaries[] = {
+		0x0000, 0x0098, 0x0400, 0x045c,
+		0x0800, 0x0880, 0x0c00, 0x0c10,
+		0x0c30, 0x0d08, 0x1000, 0x101c,
+		0x1040, 0x1048, 0x1080, 0x10a4,
+		0x1400, 0x1490, 0x1498, 0x14f0,
+		0x1500, 0x155c, 0x1580, 0x15dc,
+		0x1600, 0x1658, 0x1680, 0x16d8,
+		0x1800, 0x1820, 0x1840, 0x1854,
+		0x1880, 0x1894, 0x1900, 0x1984,
+		0x1c00, 0x1c0c, 0x1c40, 0x1c54,
+		0x1c80, 0x1c94, 0x1d00, 0x1d84,
+		0x2000, 0x2030, 0x23c0, 0x2400,
+		0x2800, 0x2820, 0x2830, 0x2850,
+		0x2b40, 0x2c10, 0x2fc0, 0x3058,
+		0x3c00, 0x3c94, 0x4000, 0x4010,
+		0x4080, 0x4090, 0x43c0, 0x4458,
+		0x4c00, 0x4c18, 0x4c40, 0x4c54,
+		0x4fc0, 0x5010, 0x53c0, 0x5444,
+		0x5c00, 0x5c18, 0x5c80, 0x5c90,
+		0x5fc0, 0x6000, 0x6400, 0x6428,
+		0x6800, 0x6848, 0x684c, 0x6860,
+		0x6888, 0x6910, 0x8000
+	};
+
+	regs->version = 0;
+
+	memset(p, 0, BNX2_REGDUMP_LEN);
+
+	if (!netif_running(bp->dev))
+		return;
+
+	i = 0;
+	offset = reg_boundaries[0];
+	p += offset;
+	while (offset < BNX2_REGDUMP_LEN) {
+		*p++ = REG_RD(bp, offset);
+		offset += 4;
+		if (offset == reg_boundaries[i + 1]) {
+			offset = reg_boundaries[i + 2];
+			p = (u32 *) (orig_p + offset);
+			i += 2;
+		}
+	}
+}
+
+static void
+bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (bp->flags & BNX2_FLAG_NO_WOL) {
+		wol->supported = 0;
+		wol->wolopts = 0;
+	}
+	else {
+		wol->supported = WAKE_MAGIC;
+		if (bp->wol)
+			wol->wolopts = WAKE_MAGIC;
+		else
+			wol->wolopts = 0;
+	}
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int
+bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (wol->wolopts & ~WAKE_MAGIC)
+		return -EINVAL;
+
+	if (wol->wolopts & WAKE_MAGIC) {
+		if (bp->flags & BNX2_FLAG_NO_WOL)
+			return -EINVAL;
+
+		bp->wol = 1;
+	}
+	else {
+		bp->wol = 0;
+	}
+	return 0;
+}
+
+static int
+bnx2_nway_reset(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	u32 bmcr;
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+
+	if (!(bp->autoneg & AUTONEG_SPEED)) {
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&bp->phy_lock);
+
+	if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
+		int rc;
+
+		rc = bnx2_setup_remote_phy(bp, bp->phy_port);
+		spin_unlock_bh(&bp->phy_lock);
+		return rc;
+	}
+
+	/* Force a link down visible on the other side */
+	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+		bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
+		spin_unlock_bh(&bp->phy_lock);
+
+		msleep(20);
+
+		spin_lock_bh(&bp->phy_lock);
+
+		bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
+		bp->serdes_an_pending = 1;
+		mod_timer(&bp->timer, jiffies + bp->current_interval);
+	}
+
+	bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
+	bmcr &= ~BMCR_LOOPBACK;
+	bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
+
+	spin_unlock_bh(&bp->phy_lock);
+
+	return 0;
+}
+
+static u32
+bnx2_get_link(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	return bp->link_up;
+}
+
+static int
+bnx2_get_eeprom_len(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (bp->flash_info == NULL)
+		return 0;
+
+	return (int) bp->flash_size;
+}
+
+static int
+bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+		u8 *eebuf)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	int rc;
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+
+	/* parameters already validated in ethtool_get_eeprom */
+
+	rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
+
+	return rc;
+}
+
+static int
+bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+		u8 *eebuf)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	int rc;
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+
+	/* parameters already validated in ethtool_set_eeprom */
+
+	rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
+
+	return rc;
+}
+
+static int
+bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+	coal->rx_coalesce_usecs = bp->rx_ticks;
+	coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
+	coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
+	coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
+
+	coal->tx_coalesce_usecs = bp->tx_ticks;
+	coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
+	coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
+	coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
+
+	coal->stats_block_coalesce_usecs = bp->stats_ticks;
+
+	return 0;
+}
+
+static int
+bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
+	if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
+
+	bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
+	if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
+
+	bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
+	if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
+
+	bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
+	if (bp->rx_quick_cons_trip_int > 0xff)
+		bp->rx_quick_cons_trip_int = 0xff;
+
+	bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
+	if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
+
+	bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
+	if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
+
+	bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
+	if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
+
+	bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
+	if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
+		0xff;
+
+	bp->stats_ticks = coal->stats_block_coalesce_usecs;
+	if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
+		if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
+			bp->stats_ticks = USEC_PER_SEC;
+	}
+	if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
+		bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
+	bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
+
+	if (netif_running(bp->dev)) {
+		bnx2_netif_stop(bp, true);
+		bnx2_init_nic(bp, 0);
+		bnx2_netif_start(bp, true);
+	}
+
+	return 0;
+}
+
+static void
+bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
+	ering->rx_mini_max_pending = 0;
+	ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
+
+	ering->rx_pending = bp->rx_ring_size;
+	ering->rx_mini_pending = 0;
+	ering->rx_jumbo_pending = bp->rx_pg_ring_size;
+
+	ering->tx_max_pending = MAX_TX_DESC_CNT;
+	ering->tx_pending = bp->tx_ring_size;
+}
+
+static int
+bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
+{
+	if (netif_running(bp->dev)) {
+		/* Reset will erase chipset stats; save them */
+		bnx2_save_stats(bp);
+
+		bnx2_netif_stop(bp, true);
+		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
+		__bnx2_free_irq(bp);
+		bnx2_free_skbs(bp);
+		bnx2_free_mem(bp);
+	}
+
+	bnx2_set_rx_ring_size(bp, rx);
+	bp->tx_ring_size = tx;
+
+	if (netif_running(bp->dev)) {
+		int rc;
+
+		rc = bnx2_alloc_mem(bp);
+		if (!rc)
+			rc = bnx2_request_irq(bp);
+
+		if (!rc)
+			rc = bnx2_init_nic(bp, 0);
+
+		if (rc) {
+			bnx2_napi_enable(bp);
+			dev_close(bp->dev);
+			return rc;
+		}
+#ifdef BCM_CNIC
+		mutex_lock(&bp->cnic_lock);
+		/* Let cnic know about the new status block. */
+		if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
+			bnx2_setup_cnic_irq_info(bp);
+		mutex_unlock(&bp->cnic_lock);
+#endif
+		bnx2_netif_start(bp, true);
+	}
+	return 0;
+}
+
+static int
+bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	int rc;
+
+	if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
+		(ering->tx_pending > MAX_TX_DESC_CNT) ||
+		(ering->tx_pending <= MAX_SKB_FRAGS)) {
+
+		return -EINVAL;
+	}
+	rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
+	return rc;
+}
+
+static void
+bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
+	epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
+	epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
+}
+
+static int
+bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	bp->req_flow_ctrl = 0;
+	if (epause->rx_pause)
+		bp->req_flow_ctrl |= FLOW_CTRL_RX;
+	if (epause->tx_pause)
+		bp->req_flow_ctrl |= FLOW_CTRL_TX;
+
+	if (epause->autoneg) {
+		bp->autoneg |= AUTONEG_FLOW_CTRL;
+	}
+	else {
+		bp->autoneg &= ~AUTONEG_FLOW_CTRL;
+	}
+
+	if (netif_running(dev)) {
+		spin_lock_bh(&bp->phy_lock);
+		bnx2_setup_phy(bp, bp->phy_port);
+		spin_unlock_bh(&bp->phy_lock);
+	}
+
+	return 0;
+}
+
+static struct {
+	char string[ETH_GSTRING_LEN];
+} bnx2_stats_str_arr[] = {
+	{ "rx_bytes" },
+	{ "rx_error_bytes" },
+	{ "tx_bytes" },
+	{ "tx_error_bytes" },
+	{ "rx_ucast_packets" },
+	{ "rx_mcast_packets" },
+	{ "rx_bcast_packets" },
+	{ "tx_ucast_packets" },
+	{ "tx_mcast_packets" },
+	{ "tx_bcast_packets" },
+	{ "tx_mac_errors" },
+	{ "tx_carrier_errors" },
+	{ "rx_crc_errors" },
+	{ "rx_align_errors" },
+	{ "tx_single_collisions" },
+	{ "tx_multi_collisions" },
+	{ "tx_deferred" },
+	{ "tx_excess_collisions" },
+	{ "tx_late_collisions" },
+	{ "tx_total_collisions" },
+	{ "rx_fragments" },
+	{ "rx_jabbers" },
+	{ "rx_undersize_packets" },
+	{ "rx_oversize_packets" },
+	{ "rx_64_byte_packets" },
+	{ "rx_65_to_127_byte_packets" },
+	{ "rx_128_to_255_byte_packets" },
+	{ "rx_256_to_511_byte_packets" },
+	{ "rx_512_to_1023_byte_packets" },
+	{ "rx_1024_to_1522_byte_packets" },
+	{ "rx_1523_to_9022_byte_packets" },
+	{ "tx_64_byte_packets" },
+	{ "tx_65_to_127_byte_packets" },
+	{ "tx_128_to_255_byte_packets" },
+	{ "tx_256_to_511_byte_packets" },
+	{ "tx_512_to_1023_byte_packets" },
+	{ "tx_1024_to_1522_byte_packets" },
+	{ "tx_1523_to_9022_byte_packets" },
+	{ "rx_xon_frames" },
+	{ "rx_xoff_frames" },
+	{ "tx_xon_frames" },
+	{ "tx_xoff_frames" },
+	{ "rx_mac_ctrl_frames" },
+	{ "rx_filtered_packets" },
+	{ "rx_ftq_discards" },
+	{ "rx_discards" },
+	{ "rx_fw_discards" },
+};
+
+#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
+			sizeof(bnx2_stats_str_arr[0]))
+
+#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
+
+static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
+    STATS_OFFSET32(stat_IfHCInOctets_hi),
+    STATS_OFFSET32(stat_IfHCInBadOctets_hi),
+    STATS_OFFSET32(stat_IfHCOutOctets_hi),
+    STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
+    STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
+    STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
+    STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
+    STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
+    STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
+    STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
+    STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
+    STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
+    STATS_OFFSET32(stat_Dot3StatsFCSErrors),
+    STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
+    STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
+    STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
+    STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
+    STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
+    STATS_OFFSET32(stat_Dot3StatsLateCollisions),
+    STATS_OFFSET32(stat_EtherStatsCollisions),
+    STATS_OFFSET32(stat_EtherStatsFragments),
+    STATS_OFFSET32(stat_EtherStatsJabbers),
+    STATS_OFFSET32(stat_EtherStatsUndersizePkts),
+    STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
+    STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
+    STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
+    STATS_OFFSET32(stat_XonPauseFramesReceived),
+    STATS_OFFSET32(stat_XoffPauseFramesReceived),
+    STATS_OFFSET32(stat_OutXonSent),
+    STATS_OFFSET32(stat_OutXoffSent),
+    STATS_OFFSET32(stat_MacControlFramesReceived),
+    STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
+    STATS_OFFSET32(stat_IfInFTQDiscards),
+    STATS_OFFSET32(stat_IfInMBUFDiscards),
+    STATS_OFFSET32(stat_FwRxDrop),
+};
+
+/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
+ * skipped because of errata.
+ */
+static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
+	8,0,8,8,8,8,8,8,8,8,
+	4,0,4,4,4,4,4,4,4,4,
+	4,4,4,4,4,4,4,4,4,4,
+	4,4,4,4,4,4,4,4,4,4,
+	4,4,4,4,4,4,4,
+};
+
+static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
+	8,0,8,8,8,8,8,8,8,8,
+	4,4,4,4,4,4,4,4,4,4,
+	4,4,4,4,4,4,4,4,4,4,
+	4,4,4,4,4,4,4,4,4,4,
+	4,4,4,4,4,4,4,
+};
+
+#define BNX2_NUM_TESTS 6
+
+static struct {
+	char string[ETH_GSTRING_LEN];
+} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
+	{ "register_test (offline)" },
+	{ "memory_test (offline)" },
+	{ "loopback_test (offline)" },
+	{ "nvram_test (online)" },
+	{ "interrupt_test (online)" },
+	{ "link_test (online)" },
+};
+
+static int
+bnx2_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_TEST:
+		return BNX2_NUM_TESTS;
+	case ETH_SS_STATS:
+		return BNX2_NUM_STATS;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void
+bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	bnx2_set_power_state(bp, PCI_D0);
+
+	memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
+	if (etest->flags & ETH_TEST_FL_OFFLINE) {
+		int i;
+
+		bnx2_netif_stop(bp, true);
+		bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
+		bnx2_free_skbs(bp);
+
+		if (bnx2_test_registers(bp) != 0) {
+			buf[0] = 1;
+			etest->flags |= ETH_TEST_FL_FAILED;
+		}
+		if (bnx2_test_memory(bp) != 0) {
+			buf[1] = 1;
+			etest->flags |= ETH_TEST_FL_FAILED;
+		}
+		if ((buf[2] = bnx2_test_loopback(bp)) != 0)
+			etest->flags |= ETH_TEST_FL_FAILED;
+
+		if (!netif_running(bp->dev))
+			bnx2_shutdown_chip(bp);
+		else {
+			bnx2_init_nic(bp, 1);
+			bnx2_netif_start(bp, true);
+		}
+
+		/* wait for link up */
+		for (i = 0; i < 7; i++) {
+			if (bp->link_up)
+				break;
+			msleep_interruptible(1000);
+		}
+	}
+
+	if (bnx2_test_nvram(bp) != 0) {
+		buf[3] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+	if (bnx2_test_intr(bp) != 0) {
+		buf[4] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+
+	if (bnx2_test_link(bp) != 0) {
+		buf[5] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+
+	}
+	if (!netif_running(bp->dev))
+		bnx2_set_power_state(bp, PCI_D3hot);
+}
+
+static void
+bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(buf, bnx2_stats_str_arr,
+			sizeof(bnx2_stats_str_arr));
+		break;
+	case ETH_SS_TEST:
+		memcpy(buf, bnx2_tests_str_arr,
+			sizeof(bnx2_tests_str_arr));
+		break;
+	}
+}
+
+static void
+bnx2_get_ethtool_stats(struct net_device *dev,
+		struct ethtool_stats *stats, u64 *buf)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	int i;
+	u32 *hw_stats = (u32 *) bp->stats_blk;
+	u32 *temp_stats = (u32 *) bp->temp_stats_blk;
+	u8 *stats_len_arr = NULL;
+
+	if (hw_stats == NULL) {
+		memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
+		return;
+	}
+
+	if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
+	    (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
+	    (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
+	    (CHIP_ID(bp) == CHIP_ID_5708_A0))
+		stats_len_arr = bnx2_5706_stats_len_arr;
+	else
+		stats_len_arr = bnx2_5708_stats_len_arr;
+
+	for (i = 0; i < BNX2_NUM_STATS; i++) {
+		unsigned long offset;
+
+		if (stats_len_arr[i] == 0) {
+			/* skip this counter */
+			buf[i] = 0;
+			continue;
+		}
+
+		offset = bnx2_stats_offset_arr[i];
+		if (stats_len_arr[i] == 4) {
+			/* 4-byte counter */
+			buf[i] = (u64) *(hw_stats + offset) +
+				 *(temp_stats + offset);
+			continue;
+		}
+		/* 8-byte counter */
+		buf[i] = (((u64) *(hw_stats + offset)) << 32) +
+			 *(hw_stats + offset + 1) +
+			 (((u64) *(temp_stats + offset)) << 32) +
+			 *(temp_stats + offset + 1);
+	}
+}
+
+static int
+bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		bnx2_set_power_state(bp, PCI_D0);
+
+		bp->leds_save = REG_RD(bp, BNX2_MISC_CFG);
+		REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
+		return 1;	/* cycle on/off once per second */
+
+	case ETHTOOL_ID_ON:
+		REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
+		       BNX2_EMAC_LED_1000MB_OVERRIDE |
+		       BNX2_EMAC_LED_100MB_OVERRIDE |
+		       BNX2_EMAC_LED_10MB_OVERRIDE |
+		       BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
+		       BNX2_EMAC_LED_TRAFFIC);
+		break;
+
+	case ETHTOOL_ID_OFF:
+		REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
+		break;
+
+	case ETHTOOL_ID_INACTIVE:
+		REG_WR(bp, BNX2_EMAC_LED, 0);
+		REG_WR(bp, BNX2_MISC_CFG, bp->leds_save);
+
+		if (!netif_running(dev))
+			bnx2_set_power_state(bp, PCI_D3hot);
+		break;
+	}
+
+	return 0;
+}
+
+static u32
+bnx2_fix_features(struct net_device *dev, u32 features)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
+		features |= NETIF_F_HW_VLAN_RX;
+
+	return features;
+}
+
+static int
+bnx2_set_features(struct net_device *dev, u32 features)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	/* TSO with VLAN tag won't work with current firmware */
+	if (features & NETIF_F_HW_VLAN_TX)
+		dev->vlan_features |= (dev->hw_features & NETIF_F_ALL_TSO);
+	else
+		dev->vlan_features &= ~NETIF_F_ALL_TSO;
+
+	if ((!!(features & NETIF_F_HW_VLAN_RX) !=
+	    !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
+	    netif_running(dev)) {
+		bnx2_netif_stop(bp, false);
+		dev->features = features;
+		bnx2_set_rx_mode(dev);
+		bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
+		bnx2_netif_start(bp, false);
+		return 1;
+	}
+
+	return 0;
+}
+
+static const struct ethtool_ops bnx2_ethtool_ops = {
+	.get_settings		= bnx2_get_settings,
+	.set_settings		= bnx2_set_settings,
+	.get_drvinfo		= bnx2_get_drvinfo,
+	.get_regs_len		= bnx2_get_regs_len,
+	.get_regs		= bnx2_get_regs,
+	.get_wol		= bnx2_get_wol,
+	.set_wol		= bnx2_set_wol,
+	.nway_reset		= bnx2_nway_reset,
+	.get_link		= bnx2_get_link,
+	.get_eeprom_len		= bnx2_get_eeprom_len,
+	.get_eeprom		= bnx2_get_eeprom,
+	.set_eeprom		= bnx2_set_eeprom,
+	.get_coalesce		= bnx2_get_coalesce,
+	.set_coalesce		= bnx2_set_coalesce,
+	.get_ringparam		= bnx2_get_ringparam,
+	.set_ringparam		= bnx2_set_ringparam,
+	.get_pauseparam		= bnx2_get_pauseparam,
+	.set_pauseparam		= bnx2_set_pauseparam,
+	.self_test		= bnx2_self_test,
+	.get_strings		= bnx2_get_strings,
+	.set_phys_id		= bnx2_set_phys_id,
+	.get_ethtool_stats	= bnx2_get_ethtool_stats,
+	.get_sset_count		= bnx2_get_sset_count,
+};
+
+/* Called with rtnl_lock */
+static int
+bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct mii_ioctl_data *data = if_mii(ifr);
+	struct bnx2 *bp = netdev_priv(dev);
+	int err;
+
+	switch(cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = bp->phy_addr;
+
+		/* fallthru */
+	case SIOCGMIIREG: {
+		u32 mii_regval;
+
+		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+			return -EOPNOTSUPP;
+
+		if (!netif_running(dev))
+			return -EAGAIN;
+
+		spin_lock_bh(&bp->phy_lock);
+		err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
+		spin_unlock_bh(&bp->phy_lock);
+
+		data->val_out = mii_regval;
+
+		return err;
+	}
+
+	case SIOCSMIIREG:
+		if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
+			return -EOPNOTSUPP;
+
+		if (!netif_running(dev))
+			return -EAGAIN;
+
+		spin_lock_bh(&bp->phy_lock);
+		err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
+		spin_unlock_bh(&bp->phy_lock);
+
+		return err;
+
+	default:
+		/* do nothing */
+		break;
+	}
+	return -EOPNOTSUPP;
+}
+
+/* Called with rtnl_lock */
+static int
+bnx2_change_mac_addr(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EINVAL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	if (netif_running(dev))
+		bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
+
+	return 0;
+}
+
+/* Called with rtnl_lock */
+static int
+bnx2_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+
+	if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
+		((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
+		return -EINVAL;
+
+	dev->mtu = new_mtu;
+	return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void
+poll_bnx2(struct net_device *dev)
+{
+	struct bnx2 *bp = netdev_priv(dev);
+	int i;
+
+	for (i = 0; i < bp->irq_nvecs; i++) {
+		struct bnx2_irq *irq = &bp->irq_tbl[i];
+
+		disable_irq(irq->vector);
+		irq->handler(irq->vector, &bp->bnx2_napi[i]);
+		enable_irq(irq->vector);
+	}
+}
+#endif
+
+static void __devinit
+bnx2_get_5709_media(struct bnx2 *bp)
+{
+	u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
+	u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
+	u32 strap;
+
+	if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
+		return;
+	else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
+		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
+		return;
+	}
+
+	if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
+		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
+	else
+		strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
+
+	if (PCI_FUNC(bp->pdev->devfn) == 0) {
+		switch (strap) {
+		case 0x4:
+		case 0x5:
+		case 0x6:
+			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
+			return;
+		}
+	} else {
+		switch (strap) {
+		case 0x1:
+		case 0x2:
+		case 0x4:
+			bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
+			return;
+		}
+	}
+}
+
+static void __devinit
+bnx2_get_pci_speed(struct bnx2 *bp)
+{
+	u32 reg;
+
+	reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
+	if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
+		u32 clkreg;
+
+		bp->flags |= BNX2_FLAG_PCIX;
+
+		clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
+
+		clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
+		switch (clkreg) {
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
+			bp->bus_speed_mhz = 133;
+			break;
+
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
+			bp->bus_speed_mhz = 100;
+			break;
+
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
+			bp->bus_speed_mhz = 66;
+			break;
+
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
+			bp->bus_speed_mhz = 50;
+			break;
+
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
+		case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
+			bp->bus_speed_mhz = 33;
+			break;
+		}
+	}
+	else {
+		if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
+			bp->bus_speed_mhz = 66;
+		else
+			bp->bus_speed_mhz = 33;
+	}
+
+	if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
+		bp->flags |= BNX2_FLAG_PCI_32BIT;
+
+}
+
+static void __devinit
+bnx2_read_vpd_fw_ver(struct bnx2 *bp)
+{
+	int rc, i, j;
+	u8 *data;
+	unsigned int block_end, rosize, len;
+
+#define BNX2_VPD_NVRAM_OFFSET	0x300
+#define BNX2_VPD_LEN		128
+#define BNX2_MAX_VER_SLEN	30
+
+	data = kmalloc(256, GFP_KERNEL);
+	if (!data)
+		return;
+
+	rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
+			     BNX2_VPD_LEN);
+	if (rc)
+		goto vpd_done;
+
+	for (i = 0; i < BNX2_VPD_LEN; i += 4) {
+		data[i] = data[i + BNX2_VPD_LEN + 3];
+		data[i + 1] = data[i + BNX2_VPD_LEN + 2];
+		data[i + 2] = data[i + BNX2_VPD_LEN + 1];
+		data[i + 3] = data[i + BNX2_VPD_LEN];
+	}
+
+	i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
+	if (i < 0)
+		goto vpd_done;
+
+	rosize = pci_vpd_lrdt_size(&data[i]);
+	i += PCI_VPD_LRDT_TAG_SIZE;
+	block_end = i + rosize;
+
+	if (block_end > BNX2_VPD_LEN)
+		goto vpd_done;
+
+	j = pci_vpd_find_info_keyword(data, i, rosize,
+				      PCI_VPD_RO_KEYWORD_MFR_ID);
+	if (j < 0)
+		goto vpd_done;
+
+	len = pci_vpd_info_field_size(&data[j]);
+
+	j += PCI_VPD_INFO_FLD_HDR_SIZE;
+	if (j + len > block_end || len != 4 ||
+	    memcmp(&data[j], "1028", 4))
+		goto vpd_done;
+
+	j = pci_vpd_find_info_keyword(data, i, rosize,
+				      PCI_VPD_RO_KEYWORD_VENDOR0);
+	if (j < 0)
+		goto vpd_done;
+
+	len = pci_vpd_info_field_size(&data[j]);
+
+	j += PCI_VPD_INFO_FLD_HDR_SIZE;
+	if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
+		goto vpd_done;
+
+	memcpy(bp->fw_version, &data[j], len);
+	bp->fw_version[len] = ' ';
+
+vpd_done:
+	kfree(data);
+}
+
+static int __devinit
+bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
+{
+	struct bnx2 *bp;
+	unsigned long mem_len;
+	int rc, i, j;
+	u32 reg;
+	u64 dma_mask, persist_dma_mask;
+	int err;
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+	bp = netdev_priv(dev);
+
+	bp->flags = 0;
+	bp->phy_flags = 0;
+
+	bp->temp_stats_blk =
+		kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
+
+	if (bp->temp_stats_blk == NULL) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	/* enable device (incl. PCI PM wakeup), and bus-mastering */
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+		goto err_out;
+	}
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		dev_err(&pdev->dev,
+			"Cannot find PCI device base address, aborting\n");
+		rc = -ENODEV;
+		goto err_out_disable;
+	}
+
+	rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+	if (rc) {
+		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+		goto err_out_disable;
+	}
+
+	pci_set_master(pdev);
+
+	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+	if (bp->pm_cap == 0) {
+		dev_err(&pdev->dev,
+			"Cannot find power management capability, aborting\n");
+		rc = -EIO;
+		goto err_out_release;
+	}
+
+	bp->dev = dev;
+	bp->pdev = pdev;
+
+	spin_lock_init(&bp->phy_lock);
+	spin_lock_init(&bp->indirect_lock);
+#ifdef BCM_CNIC
+	mutex_init(&bp->cnic_lock);
+#endif
+	INIT_WORK(&bp->reset_task, bnx2_reset_task);
+
+	dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
+	mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
+	dev->mem_end = dev->mem_start + mem_len;
+	dev->irq = pdev->irq;
+
+	bp->regview = ioremap_nocache(dev->base_addr, mem_len);
+
+	if (!bp->regview) {
+		dev_err(&pdev->dev, "Cannot map register space, aborting\n");
+		rc = -ENOMEM;
+		goto err_out_release;
+	}
+
+	bnx2_set_power_state(bp, PCI_D0);
+
+	/* Configure byte swap and enable write to the reg_window registers.
+	 * Rely on CPU to do target byte swapping on big endian systems
+	 * The chip's target access swapping will not swap all accesses
+	 */
+	REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
+		   BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+		   BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
+
+	bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+		if (!pci_is_pcie(pdev)) {
+			dev_err(&pdev->dev, "Not PCIE, aborting\n");
+			rc = -EIO;
+			goto err_out_unmap;
+		}
+		bp->flags |= BNX2_FLAG_PCIE;
+		if (CHIP_REV(bp) == CHIP_REV_Ax)
+			bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
+
+		/* AER (Advanced Error Reporting) hooks */
+		err = pci_enable_pcie_error_reporting(pdev);
+		if (!err)
+			bp->flags |= BNX2_FLAG_AER_ENABLED;
+
+	} else {
+		bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
+		if (bp->pcix_cap == 0) {
+			dev_err(&pdev->dev,
+				"Cannot find PCIX capability, aborting\n");
+			rc = -EIO;
+			goto err_out_unmap;
+		}
+		bp->flags |= BNX2_FLAG_BROKEN_STATS;
+	}
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
+		if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
+			bp->flags |= BNX2_FLAG_MSIX_CAP;
+	}
+
+	if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
+		if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
+			bp->flags |= BNX2_FLAG_MSI_CAP;
+	}
+
+	/* 5708 cannot support DMA addresses > 40-bit.  */
+	if (CHIP_NUM(bp) == CHIP_NUM_5708)
+		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
+	else
+		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
+
+	/* Configure DMA attributes. */
+	if (pci_set_dma_mask(pdev, dma_mask) == 0) {
+		dev->features |= NETIF_F_HIGHDMA;
+		rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
+		if (rc) {
+			dev_err(&pdev->dev,
+				"pci_set_consistent_dma_mask failed, aborting\n");
+			goto err_out_unmap;
+		}
+	} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
+		dev_err(&pdev->dev, "System does not support DMA, aborting\n");
+		goto err_out_unmap;
+	}
+
+	if (!(bp->flags & BNX2_FLAG_PCIE))
+		bnx2_get_pci_speed(bp);
+
+	/* 5706A0 may falsely detect SERR and PERR. */
+	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
+		reg = REG_RD(bp, PCI_COMMAND);
+		reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+		REG_WR(bp, PCI_COMMAND, reg);
+	}
+	else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
+		!(bp->flags & BNX2_FLAG_PCIX)) {
+
+		dev_err(&pdev->dev,
+			"5706 A1 can only be used in a PCIX bus, aborting\n");
+		goto err_out_unmap;
+	}
+
+	bnx2_init_nvram(bp);
+
+	reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
+
+	if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
+	    BNX2_SHM_HDR_SIGNATURE_SIG) {
+		u32 off = PCI_FUNC(pdev->devfn) << 2;
+
+		bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
+	} else
+		bp->shmem_base = HOST_VIEW_SHMEM_BASE;
+
+	/* Get the permanent MAC address.  First we need to make sure the
+	 * firmware is actually running.
+	 */
+	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
+
+	if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
+	    BNX2_DEV_INFO_SIGNATURE_MAGIC) {
+		dev_err(&pdev->dev, "Firmware not running, aborting\n");
+		rc = -ENODEV;
+		goto err_out_unmap;
+	}
+
+	bnx2_read_vpd_fw_ver(bp);
+
+	j = strlen(bp->fw_version);
+	reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
+	for (i = 0; i < 3 && j < 24; i++) {
+		u8 num, k, skip0;
+
+		if (i == 0) {
+			bp->fw_version[j++] = 'b';
+			bp->fw_version[j++] = 'c';
+			bp->fw_version[j++] = ' ';
+		}
+		num = (u8) (reg >> (24 - (i * 8)));
+		for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
+			if (num >= k || !skip0 || k == 1) {
+				bp->fw_version[j++] = (num / k) + '0';
+				skip0 = 0;
+			}
+		}
+		if (i != 2)
+			bp->fw_version[j++] = '.';
+	}
+	reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
+	if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
+		bp->wol = 1;
+
+	if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
+		bp->flags |= BNX2_FLAG_ASF_ENABLE;
+
+		for (i = 0; i < 30; i++) {
+			reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
+			if (reg & BNX2_CONDITION_MFW_RUN_MASK)
+				break;
+			msleep(10);
+		}
+	}
+	reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
+	reg &= BNX2_CONDITION_MFW_RUN_MASK;
+	if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
+	    reg != BNX2_CONDITION_MFW_RUN_NONE) {
+		u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
+
+		if (j < 32)
+			bp->fw_version[j++] = ' ';
+		for (i = 0; i < 3 && j < 28; i++) {
+			reg = bnx2_reg_rd_ind(bp, addr + i * 4);
+			reg = be32_to_cpu(reg);
+			memcpy(&bp->fw_version[j], &reg, 4);
+			j += 4;
+		}
+	}
+
+	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
+	bp->mac_addr[0] = (u8) (reg >> 8);
+	bp->mac_addr[1] = (u8) reg;
+
+	reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
+	bp->mac_addr[2] = (u8) (reg >> 24);
+	bp->mac_addr[3] = (u8) (reg >> 16);
+	bp->mac_addr[4] = (u8) (reg >> 8);
+	bp->mac_addr[5] = (u8) reg;
+
+	bp->tx_ring_size = MAX_TX_DESC_CNT;
+	bnx2_set_rx_ring_size(bp, 255);
+
+	bp->tx_quick_cons_trip_int = 2;
+	bp->tx_quick_cons_trip = 20;
+	bp->tx_ticks_int = 18;
+	bp->tx_ticks = 80;
+
+	bp->rx_quick_cons_trip_int = 2;
+	bp->rx_quick_cons_trip = 12;
+	bp->rx_ticks_int = 18;
+	bp->rx_ticks = 18;
+
+	bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
+
+	bp->current_interval = BNX2_TIMER_INTERVAL;
+
+	bp->phy_addr = 1;
+
+	/* Disable WOL support if we are running on a SERDES chip. */
+	if (CHIP_NUM(bp) == CHIP_NUM_5709)
+		bnx2_get_5709_media(bp);
+	else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
+		bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
+
+	bp->phy_port = PORT_TP;
+	if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
+		bp->phy_port = PORT_FIBRE;
+		reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
+		if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
+			bp->flags |= BNX2_FLAG_NO_WOL;
+			bp->wol = 0;
+		}
+		if (CHIP_NUM(bp) == CHIP_NUM_5706) {
+			/* Don't do parallel detect on this board because of
+			 * some board problems.  The link will not go down
+			 * if we do parallel detect.
+			 */
+			if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
+			    pdev->subsystem_device == 0x310c)
+				bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
+		} else {
+			bp->phy_addr = 2;
+			if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
+				bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
+		}
+	} else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
+		   CHIP_NUM(bp) == CHIP_NUM_5708)
+		bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
+	else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
+		 (CHIP_REV(bp) == CHIP_REV_Ax ||
+		  CHIP_REV(bp) == CHIP_REV_Bx))
+		bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
+
+	bnx2_init_fw_cap(bp);
+
+	if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
+	    (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
+	    (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
+	    !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
+		bp->flags |= BNX2_FLAG_NO_WOL;
+		bp->wol = 0;
+	}
+
+	if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
+		bp->tx_quick_cons_trip_int =
+			bp->tx_quick_cons_trip;
+		bp->tx_ticks_int = bp->tx_ticks;
+		bp->rx_quick_cons_trip_int =
+			bp->rx_quick_cons_trip;
+		bp->rx_ticks_int = bp->rx_ticks;
+		bp->comp_prod_trip_int = bp->comp_prod_trip;
+		bp->com_ticks_int = bp->com_ticks;
+		bp->cmd_ticks_int = bp->cmd_ticks;
+	}
+
+	/* Disable MSI on 5706 if AMD 8132 bridge is found.
+	 *
+	 * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
+	 * with byte enables disabled on the unused 32-bit word.  This is legal
+	 * but causes problems on the AMD 8132 which will eventually stop
+	 * responding after a while.
+	 *
+	 * AMD believes this incompatibility is unique to the 5706, and
+	 * prefers to locally disable MSI rather than globally disabling it.
+	 */
+	if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
+		struct pci_dev *amd_8132 = NULL;
+
+		while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
+						  PCI_DEVICE_ID_AMD_8132_BRIDGE,
+						  amd_8132))) {
+
+			if (amd_8132->revision >= 0x10 &&
+			    amd_8132->revision <= 0x13) {
+				disable_msi = 1;
+				pci_dev_put(amd_8132);
+				break;
+			}
+		}
+	}
+
+	bnx2_set_default_link(bp);
+	bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
+
+	init_timer(&bp->timer);
+	bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
+	bp->timer.data = (unsigned long) bp;
+	bp->timer.function = bnx2_timer;
+
+#ifdef BCM_CNIC
+	if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
+		bp->cnic_eth_dev.max_iscsi_conn =
+			(bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
+			 BNX2_ISCSI_MAX_CONN_MASK) >> BNX2_ISCSI_MAX_CONN_SHIFT;
+#endif
+	pci_save_state(pdev);
+
+	return 0;
+
+err_out_unmap:
+	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
+		pci_disable_pcie_error_reporting(pdev);
+		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+	}
+
+	if (bp->regview) {
+		iounmap(bp->regview);
+		bp->regview = NULL;
+	}
+
+err_out_release:
+	pci_release_regions(pdev);
+
+err_out_disable:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+err_out:
+	return rc;
+}
+
+static char * __devinit
+bnx2_bus_string(struct bnx2 *bp, char *str)
+{
+	char *s = str;
+
+	if (bp->flags & BNX2_FLAG_PCIE) {
+		s += sprintf(s, "PCI Express");
+	} else {
+		s += sprintf(s, "PCI");
+		if (bp->flags & BNX2_FLAG_PCIX)
+			s += sprintf(s, "-X");
+		if (bp->flags & BNX2_FLAG_PCI_32BIT)
+			s += sprintf(s, " 32-bit");
+		else
+			s += sprintf(s, " 64-bit");
+		s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
+	}
+	return str;
+}
+
+static void
+bnx2_del_napi(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->irq_nvecs; i++)
+		netif_napi_del(&bp->bnx2_napi[i].napi);
+}
+
+static void
+bnx2_init_napi(struct bnx2 *bp)
+{
+	int i;
+
+	for (i = 0; i < bp->irq_nvecs; i++) {
+		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
+		int (*poll)(struct napi_struct *, int);
+
+		if (i == 0)
+			poll = bnx2_poll;
+		else
+			poll = bnx2_poll_msix;
+
+		netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
+		bnapi->bp = bp;
+	}
+}
+
+static const struct net_device_ops bnx2_netdev_ops = {
+	.ndo_open		= bnx2_open,
+	.ndo_start_xmit		= bnx2_start_xmit,
+	.ndo_stop		= bnx2_close,
+	.ndo_get_stats64	= bnx2_get_stats64,
+	.ndo_set_rx_mode	= bnx2_set_rx_mode,
+	.ndo_do_ioctl		= bnx2_ioctl,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= bnx2_change_mac_addr,
+	.ndo_change_mtu		= bnx2_change_mtu,
+	.ndo_fix_features	= bnx2_fix_features,
+	.ndo_set_features	= bnx2_set_features,
+	.ndo_tx_timeout		= bnx2_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= poll_bnx2,
+#endif
+};
+
+static int __devinit
+bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static int version_printed = 0;
+	struct net_device *dev = NULL;
+	struct bnx2 *bp;
+	int rc;
+	char str[40];
+
+	if (version_printed++ == 0)
+		pr_info("%s", version);
+
+	/* dev zeroed in init_etherdev */
+	dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
+
+	if (!dev)
+		return -ENOMEM;
+
+	rc = bnx2_init_board(pdev, dev);
+	if (rc < 0) {
+		free_netdev(dev);
+		return rc;
+	}
+
+	dev->netdev_ops = &bnx2_netdev_ops;
+	dev->watchdog_timeo = TX_TIMEOUT;
+	dev->ethtool_ops = &bnx2_ethtool_ops;
+
+	bp = netdev_priv(dev);
+
+	pci_set_drvdata(pdev, dev);
+
+	rc = bnx2_request_firmware(bp);
+	if (rc)
+		goto error;
+
+	memcpy(dev->dev_addr, bp->mac_addr, 6);
+	memcpy(dev->perm_addr, bp->mac_addr, 6);
+
+	dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+		NETIF_F_TSO | NETIF_F_TSO_ECN |
+		NETIF_F_RXHASH | NETIF_F_RXCSUM;
+
+	if (CHIP_NUM(bp) == CHIP_NUM_5709)
+		dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+
+	dev->vlan_features = dev->hw_features;
+	dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+	dev->features |= dev->hw_features;
+
+	if ((rc = register_netdev(dev))) {
+		dev_err(&pdev->dev, "Cannot register net device\n");
+		goto error;
+	}
+
+	netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
+		    board_info[ent->driver_data].name,
+		    ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
+		    ((CHIP_ID(bp) & 0x0ff0) >> 4),
+		    bnx2_bus_string(bp, str),
+		    dev->base_addr,
+		    bp->pdev->irq, dev->dev_addr);
+
+	return 0;
+
+error:
+	if (bp->mips_firmware)
+		release_firmware(bp->mips_firmware);
+	if (bp->rv2p_firmware)
+		release_firmware(bp->rv2p_firmware);
+
+	if (bp->regview)
+		iounmap(bp->regview);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(dev);
+	return rc;
+}
+
+static void __devexit
+bnx2_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2 *bp = netdev_priv(dev);
+
+	unregister_netdev(dev);
+
+	del_timer_sync(&bp->timer);
+	cancel_work_sync(&bp->reset_task);
+
+	if (bp->mips_firmware)
+		release_firmware(bp->mips_firmware);
+	if (bp->rv2p_firmware)
+		release_firmware(bp->rv2p_firmware);
+
+	if (bp->regview)
+		iounmap(bp->regview);
+
+	kfree(bp->temp_stats_blk);
+
+	if (bp->flags & BNX2_FLAG_AER_ENABLED) {
+		pci_disable_pcie_error_reporting(pdev);
+		bp->flags &= ~BNX2_FLAG_AER_ENABLED;
+	}
+
+	free_netdev(dev);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static int
+bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2 *bp = netdev_priv(dev);
+
+	/* PCI register 4 needs to be saved whether netif_running() or not.
+	 * MSI address and data need to be saved if using MSI and
+	 * netif_running().
+	 */
+	pci_save_state(pdev);
+	if (!netif_running(dev))
+		return 0;
+
+	cancel_work_sync(&bp->reset_task);
+	bnx2_netif_stop(bp, true);
+	netif_device_detach(dev);
+	del_timer_sync(&bp->timer);
+	bnx2_shutdown_chip(bp);
+	bnx2_free_skbs(bp);
+	bnx2_set_power_state(bp, pci_choose_state(pdev, state));
+	return 0;
+}
+
+static int
+bnx2_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2 *bp = netdev_priv(dev);
+
+	pci_restore_state(pdev);
+	if (!netif_running(dev))
+		return 0;
+
+	bnx2_set_power_state(bp, PCI_D0);
+	netif_device_attach(dev);
+	bnx2_init_nic(bp, 1);
+	bnx2_netif_start(bp, true);
+	return 0;
+}
+
+/**
+ * bnx2_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
+					       pci_channel_state_t state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2 *bp = netdev_priv(dev);
+
+	rtnl_lock();
+	netif_device_detach(dev);
+
+	if (state == pci_channel_io_perm_failure) {
+		rtnl_unlock();
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	if (netif_running(dev)) {
+		bnx2_netif_stop(bp, true);
+		del_timer_sync(&bp->timer);
+		bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
+	}
+
+	pci_disable_device(pdev);
+	rtnl_unlock();
+
+	/* Request a slot slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * bnx2_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2 *bp = netdev_priv(dev);
+	pci_ers_result_t result;
+	int err;
+
+	rtnl_lock();
+	if (pci_enable_device(pdev)) {
+		dev_err(&pdev->dev,
+			"Cannot re-enable PCI device after reset\n");
+		result = PCI_ERS_RESULT_DISCONNECT;
+	} else {
+		pci_set_master(pdev);
+		pci_restore_state(pdev);
+		pci_save_state(pdev);
+
+		if (netif_running(dev)) {
+			bnx2_set_power_state(bp, PCI_D0);
+			bnx2_init_nic(bp, 1);
+		}
+		result = PCI_ERS_RESULT_RECOVERED;
+	}
+	rtnl_unlock();
+
+	if (!(bp->flags & BNX2_FLAG_AER_ENABLED))
+		return result;
+
+	err = pci_cleanup_aer_uncorrect_error_status(pdev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
+			 err); /* non-fatal, continue */
+	}
+
+	return result;
+}
+
+/**
+ * bnx2_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void bnx2_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2 *bp = netdev_priv(dev);
+
+	rtnl_lock();
+	if (netif_running(dev))
+		bnx2_netif_start(bp, true);
+
+	netif_device_attach(dev);
+	rtnl_unlock();
+}
+
+static struct pci_error_handlers bnx2_err_handler = {
+	.error_detected	= bnx2_io_error_detected,
+	.slot_reset	= bnx2_io_slot_reset,
+	.resume		= bnx2_io_resume,
+};
+
+static struct pci_driver bnx2_pci_driver = {
+	.name		= DRV_MODULE_NAME,
+	.id_table	= bnx2_pci_tbl,
+	.probe		= bnx2_init_one,
+	.remove		= __devexit_p(bnx2_remove_one),
+	.suspend	= bnx2_suspend,
+	.resume		= bnx2_resume,
+	.err_handler	= &bnx2_err_handler,
+};
+
+static int __init bnx2_init(void)
+{
+	return pci_register_driver(&bnx2_pci_driver);
+}
+
+static void __exit bnx2_cleanup(void)
+{
+	pci_unregister_driver(&bnx2_pci_driver);
+}
+
+module_init(bnx2_init);
+module_exit(bnx2_cleanup);
+
+
+
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
new file mode 100644
index 0000000..fc50d42
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -0,0 +1,7388 @@
+/* bnx2.h: Broadcom NX2 network driver.
+ *
+ * Copyright (c) 2004-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Michael Chan  (mchan@broadcom.com)
+ */
+
+
+#ifndef BNX2_H
+#define BNX2_H
+
+/* Hardware data structures and register definitions automatically
+ * generated from RTL code. Do not modify.
+ */
+
+/*
+ *  tx_bd definition
+ */
+struct tx_bd {
+	u32 tx_bd_haddr_hi;
+	u32 tx_bd_haddr_lo;
+	u32 tx_bd_mss_nbytes;
+		#define TX_BD_TCP6_OFF2_SHL		(14)
+	u32 tx_bd_vlan_tag_flags;
+		#define TX_BD_FLAGS_CONN_FAULT		(1<<0)
+		#define TX_BD_FLAGS_TCP6_OFF0_MSK	(3<<1)
+		#define TX_BD_FLAGS_TCP6_OFF0_SHL	(1)
+		#define TX_BD_FLAGS_TCP_UDP_CKSUM	(1<<1)
+		#define TX_BD_FLAGS_IP_CKSUM		(1<<2)
+		#define TX_BD_FLAGS_VLAN_TAG		(1<<3)
+		#define TX_BD_FLAGS_COAL_NOW		(1<<4)
+		#define TX_BD_FLAGS_DONT_GEN_CRC	(1<<5)
+		#define TX_BD_FLAGS_END			(1<<6)
+		#define TX_BD_FLAGS_START		(1<<7)
+		#define TX_BD_FLAGS_SW_OPTION_WORD	(0x1f<<8)
+		#define TX_BD_FLAGS_TCP6_OFF4_SHL	(12)
+		#define TX_BD_FLAGS_SW_FLAGS		(1<<13)
+		#define TX_BD_FLAGS_SW_SNAP		(1<<14)
+		#define TX_BD_FLAGS_SW_LSO		(1<<15)
+
+};
+
+
+/*
+ *  rx_bd definition
+ */
+struct rx_bd {
+	u32 rx_bd_haddr_hi;
+	u32 rx_bd_haddr_lo;
+	u32 rx_bd_len;
+	u32 rx_bd_flags;
+		#define RX_BD_FLAGS_NOPUSH		(1<<0)
+		#define RX_BD_FLAGS_DUMMY		(1<<1)
+		#define RX_BD_FLAGS_END			(1<<2)
+		#define RX_BD_FLAGS_START		(1<<3)
+
+};
+
+#define BNX2_RX_ALIGN			16
+
+/*
+ *  status_block definition
+ */
+struct status_block {
+	u32 status_attn_bits;
+		#define STATUS_ATTN_BITS_LINK_STATE		(1L<<0)
+		#define STATUS_ATTN_BITS_TX_SCHEDULER_ABORT	(1L<<1)
+		#define STATUS_ATTN_BITS_TX_BD_READ_ABORT	(1L<<2)
+		#define STATUS_ATTN_BITS_TX_BD_CACHE_ABORT	(1L<<3)
+		#define STATUS_ATTN_BITS_TX_PROCESSOR_ABORT	(1L<<4)
+		#define STATUS_ATTN_BITS_TX_DMA_ABORT		(1L<<5)
+		#define STATUS_ATTN_BITS_TX_PATCHUP_ABORT	(1L<<6)
+		#define STATUS_ATTN_BITS_TX_ASSEMBLER_ABORT	(1L<<7)
+		#define STATUS_ATTN_BITS_RX_PARSER_MAC_ABORT	(1L<<8)
+		#define STATUS_ATTN_BITS_RX_PARSER_CATCHUP_ABORT	(1L<<9)
+		#define STATUS_ATTN_BITS_RX_MBUF_ABORT		(1L<<10)
+		#define STATUS_ATTN_BITS_RX_LOOKUP_ABORT	(1L<<11)
+		#define STATUS_ATTN_BITS_RX_PROCESSOR_ABORT	(1L<<12)
+		#define STATUS_ATTN_BITS_RX_V2P_ABORT		(1L<<13)
+		#define STATUS_ATTN_BITS_RX_BD_CACHE_ABORT	(1L<<14)
+		#define STATUS_ATTN_BITS_RX_DMA_ABORT		(1L<<15)
+		#define STATUS_ATTN_BITS_COMPLETION_ABORT	(1L<<16)
+		#define STATUS_ATTN_BITS_HOST_COALESCE_ABORT	(1L<<17)
+		#define STATUS_ATTN_BITS_MAILBOX_QUEUE_ABORT	(1L<<18)
+		#define STATUS_ATTN_BITS_CONTEXT_ABORT		(1L<<19)
+		#define STATUS_ATTN_BITS_CMD_SCHEDULER_ABORT	(1L<<20)
+		#define STATUS_ATTN_BITS_CMD_PROCESSOR_ABORT	(1L<<21)
+		#define STATUS_ATTN_BITS_MGMT_PROCESSOR_ABORT	(1L<<22)
+		#define STATUS_ATTN_BITS_MAC_ABORT		(1L<<23)
+		#define STATUS_ATTN_BITS_TIMER_ABORT		(1L<<24)
+		#define STATUS_ATTN_BITS_DMAE_ABORT		(1L<<25)
+		#define STATUS_ATTN_BITS_FLSH_ABORT		(1L<<26)
+		#define STATUS_ATTN_BITS_GRC_ABORT		(1L<<27)
+		#define STATUS_ATTN_BITS_EPB_ERROR		(1L<<30)
+		#define STATUS_ATTN_BITS_PARITY_ERROR		(1L<<31)
+
+	u32 status_attn_bits_ack;
+#if defined(__BIG_ENDIAN)
+	u16 status_tx_quick_consumer_index0;
+	u16 status_tx_quick_consumer_index1;
+	u16 status_tx_quick_consumer_index2;
+	u16 status_tx_quick_consumer_index3;
+	u16 status_rx_quick_consumer_index0;
+	u16 status_rx_quick_consumer_index1;
+	u16 status_rx_quick_consumer_index2;
+	u16 status_rx_quick_consumer_index3;
+	u16 status_rx_quick_consumer_index4;
+	u16 status_rx_quick_consumer_index5;
+	u16 status_rx_quick_consumer_index6;
+	u16 status_rx_quick_consumer_index7;
+	u16 status_rx_quick_consumer_index8;
+	u16 status_rx_quick_consumer_index9;
+	u16 status_rx_quick_consumer_index10;
+	u16 status_rx_quick_consumer_index11;
+	u16 status_rx_quick_consumer_index12;
+	u16 status_rx_quick_consumer_index13;
+	u16 status_rx_quick_consumer_index14;
+	u16 status_rx_quick_consumer_index15;
+	u16 status_completion_producer_index;
+	u16 status_cmd_consumer_index;
+	u16 status_idx;
+	u8 status_unused;
+	u8 status_blk_num;
+#elif defined(__LITTLE_ENDIAN)
+	u16 status_tx_quick_consumer_index1;
+	u16 status_tx_quick_consumer_index0;
+	u16 status_tx_quick_consumer_index3;
+	u16 status_tx_quick_consumer_index2;
+	u16 status_rx_quick_consumer_index1;
+	u16 status_rx_quick_consumer_index0;
+	u16 status_rx_quick_consumer_index3;
+	u16 status_rx_quick_consumer_index2;
+	u16 status_rx_quick_consumer_index5;
+	u16 status_rx_quick_consumer_index4;
+	u16 status_rx_quick_consumer_index7;
+	u16 status_rx_quick_consumer_index6;
+	u16 status_rx_quick_consumer_index9;
+	u16 status_rx_quick_consumer_index8;
+	u16 status_rx_quick_consumer_index11;
+	u16 status_rx_quick_consumer_index10;
+	u16 status_rx_quick_consumer_index13;
+	u16 status_rx_quick_consumer_index12;
+	u16 status_rx_quick_consumer_index15;
+	u16 status_rx_quick_consumer_index14;
+	u16 status_cmd_consumer_index;
+	u16 status_completion_producer_index;
+	u8 status_blk_num;
+	u8 status_unused;
+	u16 status_idx;
+#endif
+};
+
+/*
+ *  status_block definition
+ */
+struct status_block_msix {
+#if defined(__BIG_ENDIAN)
+	u16 status_tx_quick_consumer_index;
+	u16 status_rx_quick_consumer_index;
+	u16 status_completion_producer_index;
+	u16 status_cmd_consumer_index;
+	u32 status_unused;
+	u16 status_idx;
+	u8 status_unused2;
+	u8 status_blk_num;
+#elif defined(__LITTLE_ENDIAN)
+	u16 status_rx_quick_consumer_index;
+	u16 status_tx_quick_consumer_index;
+	u16 status_cmd_consumer_index;
+	u16 status_completion_producer_index;
+	u32 status_unused;
+	u8 status_blk_num;
+	u8 status_unused2;
+	u16 status_idx;
+#endif
+};
+
+#define BNX2_SBLK_MSIX_ALIGN_SIZE	128
+
+
+/*
+ *  statistics_block definition
+ */
+struct statistics_block {
+	u32 stat_IfHCInOctets_hi;
+	u32 stat_IfHCInOctets_lo;
+	u32 stat_IfHCInBadOctets_hi;
+	u32 stat_IfHCInBadOctets_lo;
+	u32 stat_IfHCOutOctets_hi;
+	u32 stat_IfHCOutOctets_lo;
+	u32 stat_IfHCOutBadOctets_hi;
+	u32 stat_IfHCOutBadOctets_lo;
+	u32 stat_IfHCInUcastPkts_hi;
+	u32 stat_IfHCInUcastPkts_lo;
+	u32 stat_IfHCInMulticastPkts_hi;
+	u32 stat_IfHCInMulticastPkts_lo;
+	u32 stat_IfHCInBroadcastPkts_hi;
+	u32 stat_IfHCInBroadcastPkts_lo;
+	u32 stat_IfHCOutUcastPkts_hi;
+	u32 stat_IfHCOutUcastPkts_lo;
+	u32 stat_IfHCOutMulticastPkts_hi;
+	u32 stat_IfHCOutMulticastPkts_lo;
+	u32 stat_IfHCOutBroadcastPkts_hi;
+	u32 stat_IfHCOutBroadcastPkts_lo;
+	u32 stat_emac_tx_stat_dot3statsinternalmactransmiterrors;
+	u32 stat_Dot3StatsCarrierSenseErrors;
+	u32 stat_Dot3StatsFCSErrors;
+	u32 stat_Dot3StatsAlignmentErrors;
+	u32 stat_Dot3StatsSingleCollisionFrames;
+	u32 stat_Dot3StatsMultipleCollisionFrames;
+	u32 stat_Dot3StatsDeferredTransmissions;
+	u32 stat_Dot3StatsExcessiveCollisions;
+	u32 stat_Dot3StatsLateCollisions;
+	u32 stat_EtherStatsCollisions;
+	u32 stat_EtherStatsFragments;
+	u32 stat_EtherStatsJabbers;
+	u32 stat_EtherStatsUndersizePkts;
+	u32 stat_EtherStatsOverrsizePkts;
+	u32 stat_EtherStatsPktsRx64Octets;
+	u32 stat_EtherStatsPktsRx65Octetsto127Octets;
+	u32 stat_EtherStatsPktsRx128Octetsto255Octets;
+	u32 stat_EtherStatsPktsRx256Octetsto511Octets;
+	u32 stat_EtherStatsPktsRx512Octetsto1023Octets;
+	u32 stat_EtherStatsPktsRx1024Octetsto1522Octets;
+	u32 stat_EtherStatsPktsRx1523Octetsto9022Octets;
+	u32 stat_EtherStatsPktsTx64Octets;
+	u32 stat_EtherStatsPktsTx65Octetsto127Octets;
+	u32 stat_EtherStatsPktsTx128Octetsto255Octets;
+	u32 stat_EtherStatsPktsTx256Octetsto511Octets;
+	u32 stat_EtherStatsPktsTx512Octetsto1023Octets;
+	u32 stat_EtherStatsPktsTx1024Octetsto1522Octets;
+	u32 stat_EtherStatsPktsTx1523Octetsto9022Octets;
+	u32 stat_XonPauseFramesReceived;
+	u32 stat_XoffPauseFramesReceived;
+	u32 stat_OutXonSent;
+	u32 stat_OutXoffSent;
+	u32 stat_FlowControlDone;
+	u32 stat_MacControlFramesReceived;
+	u32 stat_XoffStateEntered;
+	u32 stat_IfInFramesL2FilterDiscards;
+	u32 stat_IfInRuleCheckerDiscards;
+	u32 stat_IfInFTQDiscards;
+	u32 stat_IfInMBUFDiscards;
+	u32 stat_IfInRuleCheckerP4Hit;
+	u32 stat_CatchupInRuleCheckerDiscards;
+	u32 stat_CatchupInFTQDiscards;
+	u32 stat_CatchupInMBUFDiscards;
+	u32 stat_CatchupInRuleCheckerP4Hit;
+	u32 stat_GenStat00;
+	u32 stat_GenStat01;
+	u32 stat_GenStat02;
+	u32 stat_GenStat03;
+	u32 stat_GenStat04;
+	u32 stat_GenStat05;
+	u32 stat_GenStat06;
+	u32 stat_GenStat07;
+	u32 stat_GenStat08;
+	u32 stat_GenStat09;
+	u32 stat_GenStat10;
+	u32 stat_GenStat11;
+	u32 stat_GenStat12;
+	u32 stat_GenStat13;
+	u32 stat_GenStat14;
+	u32 stat_GenStat15;
+	u32 stat_FwRxDrop;
+};
+
+
+/*
+ *  l2_fhdr definition
+ */
+struct l2_fhdr {
+	u32 l2_fhdr_status;
+		#define L2_FHDR_STATUS_RULE_CLASS	(0x7<<0)
+		#define L2_FHDR_STATUS_RULE_P2		(1<<3)
+		#define L2_FHDR_STATUS_RULE_P3		(1<<4)
+		#define L2_FHDR_STATUS_RULE_P4		(1<<5)
+		#define L2_FHDR_STATUS_L2_VLAN_TAG	(1<<6)
+		#define L2_FHDR_STATUS_L2_LLC_SNAP	(1<<7)
+		#define L2_FHDR_STATUS_RSS_HASH		(1<<8)
+		#define L2_FHDR_STATUS_IP_DATAGRAM	(1<<13)
+		#define L2_FHDR_STATUS_TCP_SEGMENT	(1<<14)
+		#define L2_FHDR_STATUS_UDP_DATAGRAM	(1<<15)
+
+		#define L2_FHDR_STATUS_SPLIT		(1<<16)
+		#define L2_FHDR_ERRORS_BAD_CRC		(1<<17)
+		#define L2_FHDR_ERRORS_PHY_DECODE	(1<<18)
+		#define L2_FHDR_ERRORS_ALIGNMENT	(1<<19)
+		#define L2_FHDR_ERRORS_TOO_SHORT	(1<<20)
+		#define L2_FHDR_ERRORS_GIANT_FRAME	(1<<21)
+		#define L2_FHDR_ERRORS_TCP_XSUM		(1<<28)
+		#define L2_FHDR_ERRORS_UDP_XSUM		(1<<31)
+
+		#define L2_FHDR_STATUS_USE_RXHASH	\
+			(L2_FHDR_STATUS_TCP_SEGMENT | L2_FHDR_STATUS_RSS_HASH)
+
+	u32 l2_fhdr_hash;
+#if defined(__BIG_ENDIAN)
+	u16 l2_fhdr_pkt_len;
+	u16 l2_fhdr_vlan_tag;
+	u16 l2_fhdr_ip_xsum;
+	u16 l2_fhdr_tcp_udp_xsum;
+#elif defined(__LITTLE_ENDIAN)
+	u16 l2_fhdr_vlan_tag;
+	u16 l2_fhdr_pkt_len;
+	u16 l2_fhdr_tcp_udp_xsum;
+	u16 l2_fhdr_ip_xsum;
+#endif
+};
+
+#define BNX2_RX_OFFSET		(sizeof(struct l2_fhdr) + 2)
+
+/*
+ *  l2_context definition
+ */
+#define BNX2_L2CTX_TYPE					0x00000000
+#define BNX2_L2CTX_TYPE_SIZE_L2				 ((0xc0/0x20)<<16)
+#define BNX2_L2CTX_TYPE_TYPE				 (0xf<<28)
+#define BNX2_L2CTX_TYPE_TYPE_EMPTY			 (0<<28)
+#define BNX2_L2CTX_TYPE_TYPE_L2				 (1<<28)
+
+#define BNX2_L2CTX_TX_HOST_BIDX				0x00000088
+#define BNX2_L2CTX_EST_NBD				0x00000088
+#define BNX2_L2CTX_CMD_TYPE				0x00000088
+#define BNX2_L2CTX_CMD_TYPE_TYPE			 (0xf<<24)
+#define BNX2_L2CTX_CMD_TYPE_TYPE_L2			 (0<<24)
+#define BNX2_L2CTX_CMD_TYPE_TYPE_TCP			 (1<<24)
+
+#define BNX2_L2CTX_TX_HOST_BSEQ				0x00000090
+#define BNX2_L2CTX_TSCH_BSEQ				0x00000094
+#define BNX2_L2CTX_TBDR_BSEQ				0x00000098
+#define BNX2_L2CTX_TBDR_BOFF				0x0000009c
+#define BNX2_L2CTX_TBDR_BIDX				0x0000009c
+#define BNX2_L2CTX_TBDR_BHADDR_HI			0x000000a0
+#define BNX2_L2CTX_TBDR_BHADDR_LO			0x000000a4
+#define BNX2_L2CTX_TXP_BOFF				0x000000a8
+#define BNX2_L2CTX_TXP_BIDX				0x000000a8
+#define BNX2_L2CTX_TXP_BSEQ				0x000000ac
+
+#define BNX2_L2CTX_TYPE_XI				0x00000080
+#define BNX2_L2CTX_CMD_TYPE_XI				0x00000240
+#define BNX2_L2CTX_TBDR_BHADDR_HI_XI			0x00000258
+#define BNX2_L2CTX_TBDR_BHADDR_LO_XI			0x0000025c
+
+/*
+ *  l2_bd_chain_context definition
+ */
+#define BNX2_L2CTX_BD_PRE_READ				0x00000000
+#define BNX2_L2CTX_CTX_SIZE				0x00000000
+#define BNX2_L2CTX_CTX_TYPE				0x00000000
+#define BNX2_L2CTX_FLOW_CTRL_ENABLE			 0x000000ff
+#define BNX2_L2CTX_CTX_TYPE_SIZE_L2			 ((0x20/20)<<16)
+#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE		 (0xf<<28)
+#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_UNDEFINED	 (0<<28)
+#define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE	 (1<<28)
+
+#define BNX2_L2CTX_HOST_BDIDX				0x00000004
+#define BNX2_L2CTX_L5_STATUSB_NUM_SHIFT			 16
+#define BNX2_L2CTX_L2_STATUSB_NUM_SHIFT			 24
+#define BNX2_L2CTX_L5_STATUSB_NUM(sb_id)		\
+	(((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L5_STATUSB_NUM_SHIFT) : 0)
+#define BNX2_L2CTX_L2_STATUSB_NUM(sb_id)		\
+	(((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT) : 0)
+#define BNX2_L2CTX_HOST_BSEQ				0x00000008
+#define BNX2_L2CTX_NX_BSEQ				0x0000000c
+#define BNX2_L2CTX_NX_BDHADDR_HI			0x00000010
+#define BNX2_L2CTX_NX_BDHADDR_LO			0x00000014
+#define BNX2_L2CTX_NX_BDIDX				0x00000018
+
+#define BNX2_L2CTX_HOST_PG_BDIDX			0x00000044
+#define BNX2_L2CTX_PG_BUF_SIZE				0x00000048
+#define BNX2_L2CTX_RBDC_KEY				0x0000004c
+#define BNX2_L2CTX_RBDC_JUMBO_KEY			 0x3ffe
+#define BNX2_L2CTX_NX_PG_BDHADDR_HI			0x00000050
+#define BNX2_L2CTX_NX_PG_BDHADDR_LO			0x00000054
+
+/*
+ *  pci_config_l definition
+ *  offset: 0000
+ */
+#define BNX2_PCICFG_MSI_CONTROL				0x00000058
+#define BNX2_PCICFG_MSI_CONTROL_ENABLE			 (1L<<16)
+
+#define BNX2_PCICFG_MISC_CONFIG				0x00000068
+#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP	 (1L<<2)
+#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP	 (1L<<3)
+#define BNX2_PCICFG_MISC_CONFIG_RESERVED1		 (1L<<4)
+#define BNX2_PCICFG_MISC_CONFIG_CLOCK_CTL_ENA		 (1L<<5)
+#define BNX2_PCICFG_MISC_CONFIG_TARGET_GRC_WORD_SWAP	 (1L<<6)
+#define BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA		 (1L<<7)
+#define BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ		 (1L<<8)
+#define BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY		 (1L<<9)
+#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN1_SWAP_EN	 (1L<<10)
+#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN2_SWAP_EN	 (1L<<11)
+#define BNX2_PCICFG_MISC_CONFIG_GRC_WIN3_SWAP_EN	 (1L<<12)
+#define BNX2_PCICFG_MISC_CONFIG_ASIC_METAL_REV		 (0xffL<<16)
+#define BNX2_PCICFG_MISC_CONFIG_ASIC_BASE_REV		 (0xfL<<24)
+#define BNX2_PCICFG_MISC_CONFIG_ASIC_ID			 (0xfL<<28)
+
+#define BNX2_PCICFG_MISC_STATUS				0x0000006c
+#define BNX2_PCICFG_MISC_STATUS_INTA_VALUE		 (1L<<0)
+#define BNX2_PCICFG_MISC_STATUS_32BIT_DET		 (1L<<1)
+#define BNX2_PCICFG_MISC_STATUS_M66EN			 (1L<<2)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_DET		 (1L<<3)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED		 (0x3L<<4)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_66		 (0L<<4)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_100		 (1L<<4)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_133		 (2L<<4)
+#define BNX2_PCICFG_MISC_STATUS_PCIX_SPEED_PCI_MODE	 (3L<<4)
+#define BNX2_PCICFG_MISC_STATUS_BAD_MEM_WRITE_BE	 (1L<<8)
+
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS		0x00000070
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET	 (0xfL<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ	 (0L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ	 (1L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ	 (2L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ	 (3L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ	 (4L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ	 (5L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ	 (6L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ	 (7L<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW	 (0xfL<<0)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_DISABLE	 (1L<<6)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT	 (1L<<7)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC	 (0x7L<<8)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_UNDEF	 (0L<<8)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12	 (1L<<8)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_6	 (2L<<8)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_62	 (4L<<8)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_MIN_POWER	 (1L<<11)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED	 (0xfL<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_100	 (0L<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_80	 (1L<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_50	 (2L<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_40	 (4L<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_25	 (8L<<12)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_CORE_CLK_PLL_STOP	 (1L<<16)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_17	 (1L<<17)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_18	 (1L<<18)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED_19	 (1L<<19)
+#define BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_RESERVED	 (0xfffL<<20)
+
+#define BNX2_PCICFG_REG_WINDOW_ADDRESS			0x00000078
+#define BNX2_PCICFG_REG_WINDOW_ADDRESS_VAL		 (0xfffffL<<2)
+
+#define BNX2_PCICFG_REG_WINDOW				0x00000080
+#define BNX2_PCICFG_INT_ACK_CMD				0x00000084
+#define BNX2_PCICFG_INT_ACK_CMD_INDEX			 (0xffffL<<0)
+#define BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID		 (1L<<16)
+#define BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM	 (1L<<17)
+#define BNX2_PCICFG_INT_ACK_CMD_MASK_INT		 (1L<<18)
+#define BNX2_PCICFG_INT_ACK_CMD_INTERRUPT_NUM		 (0xfL<<24)
+#define BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT		 24
+
+#define BNX2_PCICFG_STATUS_BIT_SET_CMD			0x00000088
+#define BNX2_PCICFG_STATUS_BIT_CLEAR_CMD		0x0000008c
+#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR			0x00000090
+#define BNX2_PCICFG_MAILBOX_QUEUE_DATA			0x00000094
+
+#define BNX2_PCICFG_DEVICE_CONTROL			0x000000b4
+#define BNX2_PCICFG_DEVICE_STATUS_NO_PEND		 ((1L<<5)<<16)
+
+/*
+ *  pci_reg definition
+ *  offset: 0x400
+ */
+#define BNX2_PCI_GRC_WINDOW_ADDR			0x00000400
+#define BNX2_PCI_GRC_WINDOW_ADDR_VALUE			 (0x1ffL<<13)
+#define BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN		 (1L<<31)
+
+#define BNX2_PCI_GRC_WINDOW2_BASE		 	 0xc000
+#define BNX2_PCI_GRC_WINDOW3_BASE		 	 0xe000
+
+#define BNX2_PCI_CONFIG_1				0x00000404
+#define BNX2_PCI_CONFIG_1_RESERVED0			 (0xffL<<0)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY			 (0x7L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_OFF		 (0L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_16		 (1L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_32		 (2L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_64		 (3L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_128		 (4L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_256		 (5L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_512		 (6L<<8)
+#define BNX2_PCI_CONFIG_1_READ_BOUNDARY_1024		 (7L<<8)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY		 (0x7L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_OFF		 (0L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_16		 (1L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_32		 (2L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_64		 (3L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_128		 (4L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_256		 (5L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_512		 (6L<<11)
+#define BNX2_PCI_CONFIG_1_WRITE_BOUNDARY_1024		 (7L<<11)
+#define BNX2_PCI_CONFIG_1_RESERVED1			 (0x3ffffL<<14)
+
+#define BNX2_PCI_CONFIG_2				0x00000408
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE			 (0xfL<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_DISABLED		 (0L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_64K			 (1L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_128K		 (2L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_256K		 (3L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_512K		 (4L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_1M			 (5L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_2M			 (6L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_4M			 (7L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_8M			 (8L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_16M			 (9L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_32M			 (10L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_64M			 (11L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_128M		 (12L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_256M		 (13L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_512M		 (14L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_SIZE_1G			 (15L<<0)
+#define BNX2_PCI_CONFIG_2_BAR1_64ENA			 (1L<<4)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_RETRY			 (1L<<5)
+#define BNX2_PCI_CONFIG_2_CFG_CYCLE_RETRY		 (1L<<6)
+#define BNX2_PCI_CONFIG_2_FIRST_CFG_DONE		 (1L<<7)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE			 (0xffL<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED		 (0L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_1K		 (1L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_2K		 (2L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_4K		 (3L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_8K		 (4L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_16K		 (5L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_32K		 (6L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_64K		 (7L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_128K		 (8L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_256K		 (9L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_512K		 (10L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_1M		 (11L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_2M		 (12L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_4M		 (13L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_8M		 (14L<<8)
+#define BNX2_PCI_CONFIG_2_EXP_ROM_SIZE_16M		 (15L<<8)
+#define BNX2_PCI_CONFIG_2_MAX_SPLIT_LIMIT		 (0x1fL<<16)
+#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT		 (0x3L<<21)
+#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_512		 (0L<<21)
+#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_1K		 (1L<<21)
+#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_2K		 (2L<<21)
+#define BNX2_PCI_CONFIG_2_MAX_READ_LIMIT_4K		 (3L<<21)
+#define BNX2_PCI_CONFIG_2_FORCE_32_BIT_MSTR		 (1L<<23)
+#define BNX2_PCI_CONFIG_2_FORCE_32_BIT_TGT		 (1L<<24)
+#define BNX2_PCI_CONFIG_2_KEEP_REQ_ASSERT		 (1L<<25)
+#define BNX2_PCI_CONFIG_2_RESERVED0			 (0x3fL<<26)
+#define BNX2_PCI_CONFIG_2_BAR_PREFETCH_XI		 (1L<<16)
+#define BNX2_PCI_CONFIG_2_RESERVED0_XI			 (0x7fffL<<17)
+
+#define BNX2_PCI_CONFIG_3				0x0000040c
+#define BNX2_PCI_CONFIG_3_STICKY_BYTE			 (0xffL<<0)
+#define BNX2_PCI_CONFIG_3_REG_STICKY_BYTE		 (0xffL<<8)
+#define BNX2_PCI_CONFIG_3_FORCE_PME			 (1L<<24)
+#define BNX2_PCI_CONFIG_3_PME_STATUS			 (1L<<25)
+#define BNX2_PCI_CONFIG_3_PME_ENABLE			 (1L<<26)
+#define BNX2_PCI_CONFIG_3_PM_STATE			 (0x3L<<27)
+#define BNX2_PCI_CONFIG_3_VAUX_PRESET			 (1L<<30)
+#define BNX2_PCI_CONFIG_3_PCI_POWER			 (1L<<31)
+
+#define BNX2_PCI_PM_DATA_A				0x00000410
+#define BNX2_PCI_PM_DATA_A_PM_DATA_0_PRG		 (0xffL<<0)
+#define BNX2_PCI_PM_DATA_A_PM_DATA_1_PRG		 (0xffL<<8)
+#define BNX2_PCI_PM_DATA_A_PM_DATA_2_PRG		 (0xffL<<16)
+#define BNX2_PCI_PM_DATA_A_PM_DATA_3_PRG		 (0xffL<<24)
+
+#define BNX2_PCI_PM_DATA_B				0x00000414
+#define BNX2_PCI_PM_DATA_B_PM_DATA_4_PRG		 (0xffL<<0)
+#define BNX2_PCI_PM_DATA_B_PM_DATA_5_PRG		 (0xffL<<8)
+#define BNX2_PCI_PM_DATA_B_PM_DATA_6_PRG		 (0xffL<<16)
+#define BNX2_PCI_PM_DATA_B_PM_DATA_7_PRG		 (0xffL<<24)
+
+#define BNX2_PCI_SWAP_DIAG0				0x00000418
+#define BNX2_PCI_SWAP_DIAG1				0x0000041c
+#define BNX2_PCI_EXP_ROM_ADDR				0x00000420
+#define BNX2_PCI_EXP_ROM_ADDR_ADDRESS			 (0x3fffffL<<2)
+#define BNX2_PCI_EXP_ROM_ADDR_REQ			 (1L<<31)
+
+#define BNX2_PCI_EXP_ROM_DATA				0x00000424
+#define BNX2_PCI_VPD_INTF				0x00000428
+#define BNX2_PCI_VPD_INTF_INTF_REQ			 (1L<<0)
+
+#define BNX2_PCI_VPD_ADDR_FLAG				0x0000042c
+#define BNX2_PCI_VPD_ADDR_FLAG_MSK			0x0000ffff
+#define BNX2_PCI_VPD_ADDR_FLAG_SL			0L
+#define BNX2_PCI_VPD_ADDR_FLAG_ADDRESS			 (0x1fffL<<2)
+#define BNX2_PCI_VPD_ADDR_FLAG_WR			 (1L<<15)
+
+#define BNX2_PCI_VPD_DATA				0x00000430
+#define BNX2_PCI_ID_VAL1				0x00000434
+#define BNX2_PCI_ID_VAL1_DEVICE_ID			 (0xffffL<<0)
+#define BNX2_PCI_ID_VAL1_VENDOR_ID			 (0xffffL<<16)
+
+#define BNX2_PCI_ID_VAL2				0x00000438
+#define BNX2_PCI_ID_VAL2_SUBSYSTEM_VENDOR_ID		 (0xffffL<<0)
+#define BNX2_PCI_ID_VAL2_SUBSYSTEM_ID			 (0xffffL<<16)
+
+#define BNX2_PCI_ID_VAL3				0x0000043c
+#define BNX2_PCI_ID_VAL3_CLASS_CODE			 (0xffffffL<<0)
+#define BNX2_PCI_ID_VAL3_REVISION_ID			 (0xffL<<24)
+
+#define BNX2_PCI_ID_VAL4				0x00000440
+#define BNX2_PCI_ID_VAL4_CAP_ENA			 (0xfL<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_0			 (0L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_1			 (1L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_2			 (2L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_3			 (3L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_4			 (4L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_5			 (5L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_6			 (6L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_7			 (7L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_8			 (8L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_9			 (9L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_10			 (10L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_11			 (11L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_12			 (12L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_13			 (13L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_14			 (14L<<0)
+#define BNX2_PCI_ID_VAL4_CAP_ENA_15			 (15L<<0)
+#define BNX2_PCI_ID_VAL4_RESERVED0			 (0x3L<<4)
+#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG			 (0x3L<<6)
+#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_0			 (0L<<6)
+#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_1			 (1L<<6)
+#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_2			 (2L<<6)
+#define BNX2_PCI_ID_VAL4_PM_SCALE_PRG_3			 (3L<<6)
+#define BNX2_PCI_ID_VAL4_MSI_PV_MASK_CAP		 (1L<<8)
+#define BNX2_PCI_ID_VAL4_MSI_LIMIT			 (0x7L<<9)
+#define BNX2_PCI_ID_VAL4_MULTI_MSG_CAP			 (0x7L<<12)
+#define BNX2_PCI_ID_VAL4_MSI_ENABLE			 (1L<<15)
+#define BNX2_PCI_ID_VAL4_MAX_64_ADVERTIZE		 (1L<<16)
+#define BNX2_PCI_ID_VAL4_MAX_133_ADVERTIZE		 (1L<<17)
+#define BNX2_PCI_ID_VAL4_RESERVED2			 (0x7L<<18)
+#define BNX2_PCI_ID_VAL4_MAX_CUMULATIVE_SIZE_B21	 (0x3L<<21)
+#define BNX2_PCI_ID_VAL4_MAX_SPLIT_SIZE_B21		 (0x3L<<23)
+#define BNX2_PCI_ID_VAL4_MAX_CUMULATIVE_SIZE_B0		 (1L<<25)
+#define BNX2_PCI_ID_VAL4_MAX_MEM_READ_SIZE_B10		 (0x3L<<26)
+#define BNX2_PCI_ID_VAL4_MAX_SPLIT_SIZE_B0		 (1L<<28)
+#define BNX2_PCI_ID_VAL4_RESERVED3			 (0x7L<<29)
+#define BNX2_PCI_ID_VAL4_RESERVED3_XI			 (0xffffL<<16)
+
+#define BNX2_PCI_ID_VAL5				0x00000444
+#define BNX2_PCI_ID_VAL5_D1_SUPPORT			 (1L<<0)
+#define BNX2_PCI_ID_VAL5_D2_SUPPORT			 (1L<<1)
+#define BNX2_PCI_ID_VAL5_PME_IN_D0			 (1L<<2)
+#define BNX2_PCI_ID_VAL5_PME_IN_D1			 (1L<<3)
+#define BNX2_PCI_ID_VAL5_PME_IN_D2			 (1L<<4)
+#define BNX2_PCI_ID_VAL5_PME_IN_D3_HOT			 (1L<<5)
+#define BNX2_PCI_ID_VAL5_RESERVED0_TE			 (0x3ffffffL<<6)
+#define BNX2_PCI_ID_VAL5_PM_VERSION_XI			 (0x7L<<6)
+#define BNX2_PCI_ID_VAL5_NO_SOFT_RESET_XI		 (1L<<9)
+#define BNX2_PCI_ID_VAL5_RESERVED0_XI			 (0x3fffffL<<10)
+
+#define BNX2_PCI_PCIX_EXTENDED_STATUS			0x00000448
+#define BNX2_PCI_PCIX_EXTENDED_STATUS_NO_SNOOP		 (1L<<8)
+#define BNX2_PCI_PCIX_EXTENDED_STATUS_LONG_BURST	 (1L<<9)
+#define BNX2_PCI_PCIX_EXTENDED_STATUS_SPLIT_COMP_MSG_CLASS	 (0xfL<<16)
+#define BNX2_PCI_PCIX_EXTENDED_STATUS_SPLIT_COMP_MSG_IDX	 (0xffL<<24)
+
+#define BNX2_PCI_ID_VAL6				0x0000044c
+#define BNX2_PCI_ID_VAL6_MAX_LAT			 (0xffL<<0)
+#define BNX2_PCI_ID_VAL6_MIN_GNT			 (0xffL<<8)
+#define BNX2_PCI_ID_VAL6_BIST				 (0xffL<<16)
+#define BNX2_PCI_ID_VAL6_RESERVED0			 (0xffL<<24)
+
+#define BNX2_PCI_MSI_DATA				0x00000450
+#define BNX2_PCI_MSI_DATA_MSI_DATA			 (0xffffL<<0)
+
+#define BNX2_PCI_MSI_ADDR_H				0x00000454
+#define BNX2_PCI_MSI_ADDR_L				0x00000458
+#define BNX2_PCI_MSI_ADDR_L_VAL				 (0x3fffffffL<<2)
+
+#define BNX2_PCI_CFG_ACCESS_CMD				0x0000045c
+#define BNX2_PCI_CFG_ACCESS_CMD_ADR			 (0x3fL<<2)
+#define BNX2_PCI_CFG_ACCESS_CMD_RD_REQ			 (1L<<27)
+#define BNX2_PCI_CFG_ACCESS_CMD_WR_REQ			 (0xfL<<28)
+
+#define BNX2_PCI_CFG_ACCESS_DATA			0x00000460
+#define BNX2_PCI_MSI_MASK				0x00000464
+#define BNX2_PCI_MSI_MASK_MSI_MASK			 (0xffffffffL<<0)
+
+#define BNX2_PCI_MSI_PEND				0x00000468
+#define BNX2_PCI_MSI_PEND_MSI_PEND			 (0xffffffffL<<0)
+
+#define BNX2_PCI_PM_DATA_C				0x0000046c
+#define BNX2_PCI_PM_DATA_C_PM_DATA_8_PRG		 (0xffL<<0)
+#define BNX2_PCI_PM_DATA_C_RESERVED0			 (0xffffffL<<8)
+
+#define BNX2_PCI_MSIX_CONTROL				0x000004c0
+#define BNX2_PCI_MSIX_CONTROL_MSIX_TBL_SIZ		 (0x7ffL<<0)
+#define BNX2_PCI_MSIX_CONTROL_RESERVED0			 (0x1fffffL<<11)
+
+#define BNX2_PCI_MSIX_TBL_OFF_BIR			0x000004c4
+#define BNX2_PCI_MSIX_TBL_OFF_BIR_MSIX_TBL_BIR		 (0x7L<<0)
+#define BNX2_PCI_MSIX_TBL_OFF_BIR_MSIX_TBL_OFF		 (0x1fffffffL<<3)
+
+#define BNX2_PCI_MSIX_PBA_OFF_BIT			0x000004c8
+#define BNX2_PCI_MSIX_PBA_OFF_BIT_MSIX_PBA_BIR		 (0x7L<<0)
+#define BNX2_PCI_MSIX_PBA_OFF_BIT_MSIX_PBA_OFF		 (0x1fffffffL<<3)
+
+#define BNX2_PCI_PCIE_CAPABILITY			0x000004d0
+#define BNX2_PCI_PCIE_CAPABILITY_INTERRUPT_MSG_NUM	 (0x1fL<<0)
+#define BNX2_PCI_PCIE_CAPABILITY_COMPLY_PCIE_1_1	 (1L<<5)
+
+#define BNX2_PCI_DEVICE_CAPABILITY			0x000004d4
+#define BNX2_PCI_DEVICE_CAPABILITY_MAX_PL_SIZ_SUPPORTED	 (0x7L<<0)
+#define BNX2_PCI_DEVICE_CAPABILITY_EXTENDED_TAG_SUPPORT	 (1L<<5)
+#define BNX2_PCI_DEVICE_CAPABILITY_L0S_ACCEPTABLE_LATENCY	 (0x7L<<6)
+#define BNX2_PCI_DEVICE_CAPABILITY_L1_ACCEPTABLE_LATENCY	 (0x7L<<9)
+#define BNX2_PCI_DEVICE_CAPABILITY_ROLE_BASED_ERR_RPT	 (1L<<15)
+
+#define BNX2_PCI_LINK_CAPABILITY			0x000004dc
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED		 (0xfL<<0)
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED_0001	 (1L<<0)
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_SPEED_0010	 (1L<<0)
+#define BNX2_PCI_LINK_CAPABILITY_MAX_LINK_WIDTH		 (0x1fL<<4)
+#define BNX2_PCI_LINK_CAPABILITY_CLK_POWER_MGMT		 (1L<<9)
+#define BNX2_PCI_LINK_CAPABILITY_ASPM_SUPPORT		 (0x3L<<10)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT		 (0x7L<<12)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT_101	 (5L<<12)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_LAT_110	 (6L<<12)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT		 (0x7L<<15)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT_001	 (1L<<15)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_LAT_010	 (2L<<15)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT	 (0x7L<<18)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT_101	 (5L<<18)
+#define BNX2_PCI_LINK_CAPABILITY_L0S_EXIT_COMM_LAT_110	 (6L<<18)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT	 (0x7L<<21)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT_001	 (1L<<21)
+#define BNX2_PCI_LINK_CAPABILITY_L1_EXIT_COMM_LAT_010	 (2L<<21)
+#define BNX2_PCI_LINK_CAPABILITY_PORT_NUM		 (0xffL<<24)
+
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2		0x000004e4
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_CMPL_TO_RANGE_SUPP	 (0xfL<<0)
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_CMPL_TO_DISABL_SUPP	 (1L<<4)
+#define BNX2_PCI_PCIE_DEVICE_CAPABILITY_2_RESERVED	 (0x7ffffffL<<5)
+
+#define BNX2_PCI_PCIE_LINK_CAPABILITY_2			0x000004e8
+#define BNX2_PCI_PCIE_LINK_CAPABILITY_2_RESERVED	 (0xffffffffL<<0)
+
+#define BNX2_PCI_GRC_WINDOW1_ADDR			0x00000610
+#define BNX2_PCI_GRC_WINDOW1_ADDR_VALUE			 (0x1ffL<<13)
+
+#define BNX2_PCI_GRC_WINDOW2_ADDR			0x00000614
+#define BNX2_PCI_GRC_WINDOW2_ADDR_VALUE			 (0x1ffL<<13)
+
+#define BNX2_PCI_GRC_WINDOW3_ADDR			0x00000618
+#define BNX2_PCI_GRC_WINDOW3_ADDR_VALUE			 (0x1ffL<<13)
+
+#define BNX2_MSIX_TABLE_ADDR				 0x318000
+#define BNX2_MSIX_PBA_ADDR				 0x31c000
+
+/*
+ *  misc_reg definition
+ *  offset: 0x800
+ */
+#define BNX2_MISC_COMMAND				0x00000800
+#define BNX2_MISC_COMMAND_ENABLE_ALL			 (1L<<0)
+#define BNX2_MISC_COMMAND_DISABLE_ALL			 (1L<<1)
+#define BNX2_MISC_COMMAND_SW_RESET			 (1L<<4)
+#define BNX2_MISC_COMMAND_POR_RESET			 (1L<<5)
+#define BNX2_MISC_COMMAND_HD_RESET			 (1L<<6)
+#define BNX2_MISC_COMMAND_CMN_SW_RESET			 (1L<<7)
+#define BNX2_MISC_COMMAND_PAR_ERROR			 (1L<<8)
+#define BNX2_MISC_COMMAND_CS16_ERR			 (1L<<9)
+#define BNX2_MISC_COMMAND_CS16_ERR_LOC			 (0xfL<<12)
+#define BNX2_MISC_COMMAND_PAR_ERR_RAM			 (0x7fL<<16)
+#define BNX2_MISC_COMMAND_POWERDOWN_EVENT		 (1L<<23)
+#define BNX2_MISC_COMMAND_SW_SHUTDOWN			 (1L<<24)
+#define BNX2_MISC_COMMAND_SHUTDOWN_EN			 (1L<<25)
+#define BNX2_MISC_COMMAND_DINTEG_ATTN_EN		 (1L<<26)
+#define BNX2_MISC_COMMAND_PCIE_LINK_IN_L23		 (1L<<27)
+#define BNX2_MISC_COMMAND_PCIE_DIS			 (1L<<28)
+
+#define BNX2_MISC_CFG					0x00000804
+#define BNX2_MISC_CFG_GRC_TMOUT				 (1L<<0)
+#define BNX2_MISC_CFG_NVM_WR_EN				 (0x3L<<1)
+#define BNX2_MISC_CFG_NVM_WR_EN_PROTECT			 (0L<<1)
+#define BNX2_MISC_CFG_NVM_WR_EN_PCI			 (1L<<1)
+#define BNX2_MISC_CFG_NVM_WR_EN_ALLOW			 (2L<<1)
+#define BNX2_MISC_CFG_NVM_WR_EN_ALLOW2			 (3L<<1)
+#define BNX2_MISC_CFG_BIST_EN				 (1L<<3)
+#define BNX2_MISC_CFG_CK25_OUT_ALT_SRC			 (1L<<4)
+#define BNX2_MISC_CFG_RESERVED5_TE			 (1L<<5)
+#define BNX2_MISC_CFG_RESERVED6_TE			 (1L<<6)
+#define BNX2_MISC_CFG_CLK_CTL_OVERRIDE			 (1L<<7)
+#define BNX2_MISC_CFG_LEDMODE				 (0x7L<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC			 (0L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY1_TE			 (1L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY2_TE			 (2L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY3_TE			 (3L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY4_TE			 (4L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY5_TE			 (5L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY6_TE			 (6L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY7_TE			 (7L<<8)
+#define BNX2_MISC_CFG_MCP_GRC_TMOUT_TE			 (1L<<11)
+#define BNX2_MISC_CFG_DBU_GRC_TMOUT_TE			 (1L<<12)
+#define BNX2_MISC_CFG_LEDMODE_XI			 (0xfL<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC_XI			 (0L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY1_XI			 (1L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY2_XI			 (2L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY3_XI			 (3L<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC2_XI			 (4L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY4_XI			 (5L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY5_XI			 (6L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY6_XI			 (7L<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC3_XI			 (8L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY7_XI			 (9L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY8_XI			 (10L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY9_XI			 (11L<<8)
+#define BNX2_MISC_CFG_LEDMODE_MAC4_XI			 (12L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY10_XI			 (13L<<8)
+#define BNX2_MISC_CFG_LEDMODE_PHY11_XI			 (14L<<8)
+#define BNX2_MISC_CFG_LEDMODE_UNUSED_XI			 (15L<<8)
+#define BNX2_MISC_CFG_PORT_SELECT_XI			 (1L<<13)
+#define BNX2_MISC_CFG_PARITY_MODE_XI			 (1L<<14)
+
+#define BNX2_MISC_ID					0x00000808
+#define BNX2_MISC_ID_BOND_ID				 (0xfL<<0)
+#define BNX2_MISC_ID_BOND_ID_X				 (0L<<0)
+#define BNX2_MISC_ID_BOND_ID_C				 (3L<<0)
+#define BNX2_MISC_ID_BOND_ID_S				 (12L<<0)
+#define BNX2_MISC_ID_CHIP_METAL				 (0xffL<<4)
+#define BNX2_MISC_ID_CHIP_REV				 (0xfL<<12)
+#define BNX2_MISC_ID_CHIP_NUM				 (0xffffL<<16)
+
+#define BNX2_MISC_ENABLE_STATUS_BITS			0x0000080c
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_SCHEDULER_ENABLE	 (1L<<0)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_BD_READ_ENABLE	 (1L<<1)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_BD_CACHE_ENABLE	 (1L<<2)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PROCESSOR_ENABLE	 (1L<<3)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_DMA_ENABLE	 (1L<<4)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PATCHUP_ENABLE	 (1L<<5)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_PAYLOAD_Q_ENABLE	 (1L<<6)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_HEADER_Q_ENABLE	 (1L<<7)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TX_ASSEMBLER_ENABLE	 (1L<<8)
+#define BNX2_MISC_ENABLE_STATUS_BITS_EMAC_ENABLE	 (1L<<9)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PARSER_MAC_ENABLE	 (1L<<10)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PARSER_CATCHUP_ENABLE	 (1L<<11)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_MBUF_ENABLE	 (1L<<12)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_LOOKUP_ENABLE	 (1L<<13)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_PROCESSOR_ENABLE	 (1L<<14)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE	 (1L<<15)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_BD_CACHE_ENABLE	 (1L<<16)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RX_DMA_ENABLE	 (1L<<17)
+#define BNX2_MISC_ENABLE_STATUS_BITS_COMPLETION_ENABLE	 (1L<<18)
+#define BNX2_MISC_ENABLE_STATUS_BITS_HOST_COALESCE_ENABLE	 (1L<<19)
+#define BNX2_MISC_ENABLE_STATUS_BITS_MAILBOX_QUEUE_ENABLE	 (1L<<20)
+#define BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE	 (1L<<21)
+#define BNX2_MISC_ENABLE_STATUS_BITS_CMD_SCHEDULER_ENABLE	 (1L<<22)
+#define BNX2_MISC_ENABLE_STATUS_BITS_CMD_PROCESSOR_ENABLE	 (1L<<23)
+#define BNX2_MISC_ENABLE_STATUS_BITS_MGMT_PROCESSOR_ENABLE	 (1L<<24)
+#define BNX2_MISC_ENABLE_STATUS_BITS_TIMER_ENABLE	 (1L<<25)
+#define BNX2_MISC_ENABLE_STATUS_BITS_DMA_ENGINE_ENABLE	 (1L<<26)
+#define BNX2_MISC_ENABLE_STATUS_BITS_UMP_ENABLE		 (1L<<27)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RV2P_CMD_SCHEDULER_ENABLE	 (1L<<28)
+#define BNX2_MISC_ENABLE_STATUS_BITS_RSVD_FUTURE_ENABLE	 (0x7L<<29)
+
+#define BNX2_MISC_ENABLE_SET_BITS			0x00000810
+#define BNX2_MISC_ENABLE_SET_BITS_TX_SCHEDULER_ENABLE	 (1L<<0)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_BD_READ_ENABLE	 (1L<<1)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_BD_CACHE_ENABLE	 (1L<<2)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_PROCESSOR_ENABLE	 (1L<<3)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_DMA_ENABLE		 (1L<<4)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_PATCHUP_ENABLE	 (1L<<5)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_PAYLOAD_Q_ENABLE	 (1L<<6)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE	 (1L<<7)
+#define BNX2_MISC_ENABLE_SET_BITS_TX_ASSEMBLER_ENABLE	 (1L<<8)
+#define BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE		 (1L<<9)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE	 (1L<<10)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_CATCHUP_ENABLE	 (1L<<11)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE	 (1L<<12)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_LOOKUP_ENABLE	 (1L<<13)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_PROCESSOR_ENABLE	 (1L<<14)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_V2P_ENABLE		 (1L<<15)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_BD_CACHE_ENABLE	 (1L<<16)
+#define BNX2_MISC_ENABLE_SET_BITS_RX_DMA_ENABLE		 (1L<<17)
+#define BNX2_MISC_ENABLE_SET_BITS_COMPLETION_ENABLE	 (1L<<18)
+#define BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE	 (1L<<19)
+#define BNX2_MISC_ENABLE_SET_BITS_MAILBOX_QUEUE_ENABLE	 (1L<<20)
+#define BNX2_MISC_ENABLE_SET_BITS_CONTEXT_ENABLE	 (1L<<21)
+#define BNX2_MISC_ENABLE_SET_BITS_CMD_SCHEDULER_ENABLE	 (1L<<22)
+#define BNX2_MISC_ENABLE_SET_BITS_CMD_PROCESSOR_ENABLE	 (1L<<23)
+#define BNX2_MISC_ENABLE_SET_BITS_MGMT_PROCESSOR_ENABLE	 (1L<<24)
+#define BNX2_MISC_ENABLE_SET_BITS_TIMER_ENABLE		 (1L<<25)
+#define BNX2_MISC_ENABLE_SET_BITS_DMA_ENGINE_ENABLE	 (1L<<26)
+#define BNX2_MISC_ENABLE_SET_BITS_UMP_ENABLE		 (1L<<27)
+#define BNX2_MISC_ENABLE_SET_BITS_RV2P_CMD_SCHEDULER_ENABLE	 (1L<<28)
+#define BNX2_MISC_ENABLE_SET_BITS_RSVD_FUTURE_ENABLE	 (0x7L<<29)
+
+#define BNX2_MISC_ENABLE_CLR_BITS			0x00000814
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_SCHEDULER_ENABLE	 (1L<<0)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_BD_READ_ENABLE	 (1L<<1)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_BD_CACHE_ENABLE	 (1L<<2)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_PROCESSOR_ENABLE	 (1L<<3)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE		 (1L<<4)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_PATCHUP_ENABLE	 (1L<<5)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_PAYLOAD_Q_ENABLE	 (1L<<6)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_HEADER_Q_ENABLE	 (1L<<7)
+#define BNX2_MISC_ENABLE_CLR_BITS_TX_ASSEMBLER_ENABLE	 (1L<<8)
+#define BNX2_MISC_ENABLE_CLR_BITS_EMAC_ENABLE		 (1L<<9)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_PARSER_MAC_ENABLE	 (1L<<10)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_PARSER_CATCHUP_ENABLE	 (1L<<11)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_MBUF_ENABLE	 (1L<<12)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_LOOKUP_ENABLE	 (1L<<13)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_PROCESSOR_ENABLE	 (1L<<14)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_V2P_ENABLE		 (1L<<15)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_BD_CACHE_ENABLE	 (1L<<16)
+#define BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE		 (1L<<17)
+#define BNX2_MISC_ENABLE_CLR_BITS_COMPLETION_ENABLE	 (1L<<18)
+#define BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE	 (1L<<19)
+#define BNX2_MISC_ENABLE_CLR_BITS_MAILBOX_QUEUE_ENABLE	 (1L<<20)
+#define BNX2_MISC_ENABLE_CLR_BITS_CONTEXT_ENABLE	 (1L<<21)
+#define BNX2_MISC_ENABLE_CLR_BITS_CMD_SCHEDULER_ENABLE	 (1L<<22)
+#define BNX2_MISC_ENABLE_CLR_BITS_CMD_PROCESSOR_ENABLE	 (1L<<23)
+#define BNX2_MISC_ENABLE_CLR_BITS_MGMT_PROCESSOR_ENABLE	 (1L<<24)
+#define BNX2_MISC_ENABLE_CLR_BITS_TIMER_ENABLE		 (1L<<25)
+#define BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE	 (1L<<26)
+#define BNX2_MISC_ENABLE_CLR_BITS_UMP_ENABLE		 (1L<<27)
+#define BNX2_MISC_ENABLE_CLR_BITS_RV2P_CMD_SCHEDULER_ENABLE	 (1L<<28)
+#define BNX2_MISC_ENABLE_CLR_BITS_RSVD_FUTURE_ENABLE	 (0x7L<<29)
+
+#define BNX2_MISC_CLOCK_CONTROL_BITS			0x00000818
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET	 (0xfL<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ	 (0L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ	 (1L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ	 (2L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ	 (3L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ	 (4L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ	 (5L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ	 (6L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ	 (7L<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW	 (0xfL<<0)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_DISABLE	 (1L<<6)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT	 (1L<<7)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC	 (0x7L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_UNDEF	 (0L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_12	 (1L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_6	 (2L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_SRC_62	 (4L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED0_XI	 (0x7L<<8)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_MIN_POWER		 (1L<<11)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED	 (0xfL<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_100	 (0L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_80	 (1L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_50	 (2L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_40	 (4L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_25	 (8L<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED1_XI	 (0xfL<<12)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_STOP	 (1L<<16)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_17_TE	 (1L<<17)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_18_TE	 (1L<<18)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_19_TE	 (1L<<19)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED_TE	 (0xfffL<<20)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_ALT_MGMT_XI	 (1L<<17)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED2_XI	 (0x3fL<<18)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_VCO_XI	 (0x7L<<24)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_RESERVED3_XI	 (1L<<27)
+#define BNX2_MISC_CLOCK_CONTROL_BITS_CORE_CLK_PLL_SPEED_XI	 (0xfL<<28)
+
+#define BNX2_MISC_SPIO					0x0000081c
+#define BNX2_MISC_SPIO_VALUE				 (0xffL<<0)
+#define BNX2_MISC_SPIO_SET				 (0xffL<<8)
+#define BNX2_MISC_SPIO_CLR				 (0xffL<<16)
+#define BNX2_MISC_SPIO_FLOAT				 (0xffL<<24)
+
+#define BNX2_MISC_SPIO_INT				0x00000820
+#define BNX2_MISC_SPIO_INT_INT_STATE_TE			 (0xfL<<0)
+#define BNX2_MISC_SPIO_INT_OLD_VALUE_TE			 (0xfL<<8)
+#define BNX2_MISC_SPIO_INT_OLD_SET_TE			 (0xfL<<16)
+#define BNX2_MISC_SPIO_INT_OLD_CLR_TE			 (0xfL<<24)
+#define BNX2_MISC_SPIO_INT_INT_STATE_XI			 (0xffL<<0)
+#define BNX2_MISC_SPIO_INT_OLD_VALUE_XI			 (0xffL<<8)
+#define BNX2_MISC_SPIO_INT_OLD_SET_XI			 (0xffL<<16)
+#define BNX2_MISC_SPIO_INT_OLD_CLR_XI			 (0xffL<<24)
+
+#define BNX2_MISC_CONFIG_LFSR				0x00000824
+#define BNX2_MISC_CONFIG_LFSR_DIV			 (0xffffL<<0)
+
+#define BNX2_MISC_LFSR_MASK_BITS			0x00000828
+#define BNX2_MISC_LFSR_MASK_BITS_TX_SCHEDULER_ENABLE	 (1L<<0)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_BD_READ_ENABLE	 (1L<<1)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_BD_CACHE_ENABLE	 (1L<<2)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_PROCESSOR_ENABLE	 (1L<<3)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_DMA_ENABLE		 (1L<<4)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_PATCHUP_ENABLE	 (1L<<5)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_PAYLOAD_Q_ENABLE	 (1L<<6)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_HEADER_Q_ENABLE	 (1L<<7)
+#define BNX2_MISC_LFSR_MASK_BITS_TX_ASSEMBLER_ENABLE	 (1L<<8)
+#define BNX2_MISC_LFSR_MASK_BITS_EMAC_ENABLE		 (1L<<9)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_PARSER_MAC_ENABLE	 (1L<<10)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_PARSER_CATCHUP_ENABLE	 (1L<<11)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_MBUF_ENABLE		 (1L<<12)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_LOOKUP_ENABLE	 (1L<<13)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_PROCESSOR_ENABLE	 (1L<<14)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_V2P_ENABLE		 (1L<<15)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_BD_CACHE_ENABLE	 (1L<<16)
+#define BNX2_MISC_LFSR_MASK_BITS_RX_DMA_ENABLE		 (1L<<17)
+#define BNX2_MISC_LFSR_MASK_BITS_COMPLETION_ENABLE	 (1L<<18)
+#define BNX2_MISC_LFSR_MASK_BITS_HOST_COALESCE_ENABLE	 (1L<<19)
+#define BNX2_MISC_LFSR_MASK_BITS_MAILBOX_QUEUE_ENABLE	 (1L<<20)
+#define BNX2_MISC_LFSR_MASK_BITS_CONTEXT_ENABLE		 (1L<<21)
+#define BNX2_MISC_LFSR_MASK_BITS_CMD_SCHEDULER_ENABLE	 (1L<<22)
+#define BNX2_MISC_LFSR_MASK_BITS_CMD_PROCESSOR_ENABLE	 (1L<<23)
+#define BNX2_MISC_LFSR_MASK_BITS_MGMT_PROCESSOR_ENABLE	 (1L<<24)
+#define BNX2_MISC_LFSR_MASK_BITS_TIMER_ENABLE		 (1L<<25)
+#define BNX2_MISC_LFSR_MASK_BITS_DMA_ENGINE_ENABLE	 (1L<<26)
+#define BNX2_MISC_LFSR_MASK_BITS_UMP_ENABLE		 (1L<<27)
+#define BNX2_MISC_LFSR_MASK_BITS_RV2P_CMD_SCHEDULER_ENABLE	 (1L<<28)
+#define BNX2_MISC_LFSR_MASK_BITS_RSVD_FUTURE_ENABLE	 (0x7L<<29)
+
+#define BNX2_MISC_ARB_REQ0				0x0000082c
+#define BNX2_MISC_ARB_REQ1				0x00000830
+#define BNX2_MISC_ARB_REQ2				0x00000834
+#define BNX2_MISC_ARB_REQ3				0x00000838
+#define BNX2_MISC_ARB_REQ4				0x0000083c
+#define BNX2_MISC_ARB_FREE0				0x00000840
+#define BNX2_MISC_ARB_FREE1				0x00000844
+#define BNX2_MISC_ARB_FREE2				0x00000848
+#define BNX2_MISC_ARB_FREE3				0x0000084c
+#define BNX2_MISC_ARB_FREE4				0x00000850
+#define BNX2_MISC_ARB_REQ_STATUS0			0x00000854
+#define BNX2_MISC_ARB_REQ_STATUS1			0x00000858
+#define BNX2_MISC_ARB_REQ_STATUS2			0x0000085c
+#define BNX2_MISC_ARB_REQ_STATUS3			0x00000860
+#define BNX2_MISC_ARB_REQ_STATUS4			0x00000864
+#define BNX2_MISC_ARB_GNT0				0x00000868
+#define BNX2_MISC_ARB_GNT0_0				 (0x7L<<0)
+#define BNX2_MISC_ARB_GNT0_1				 (0x7L<<4)
+#define BNX2_MISC_ARB_GNT0_2				 (0x7L<<8)
+#define BNX2_MISC_ARB_GNT0_3				 (0x7L<<12)
+#define BNX2_MISC_ARB_GNT0_4				 (0x7L<<16)
+#define BNX2_MISC_ARB_GNT0_5				 (0x7L<<20)
+#define BNX2_MISC_ARB_GNT0_6				 (0x7L<<24)
+#define BNX2_MISC_ARB_GNT0_7				 (0x7L<<28)
+
+#define BNX2_MISC_ARB_GNT1				0x0000086c
+#define BNX2_MISC_ARB_GNT1_8				 (0x7L<<0)
+#define BNX2_MISC_ARB_GNT1_9				 (0x7L<<4)
+#define BNX2_MISC_ARB_GNT1_10				 (0x7L<<8)
+#define BNX2_MISC_ARB_GNT1_11				 (0x7L<<12)
+#define BNX2_MISC_ARB_GNT1_12				 (0x7L<<16)
+#define BNX2_MISC_ARB_GNT1_13				 (0x7L<<20)
+#define BNX2_MISC_ARB_GNT1_14				 (0x7L<<24)
+#define BNX2_MISC_ARB_GNT1_15				 (0x7L<<28)
+
+#define BNX2_MISC_ARB_GNT2				0x00000870
+#define BNX2_MISC_ARB_GNT2_16				 (0x7L<<0)
+#define BNX2_MISC_ARB_GNT2_17				 (0x7L<<4)
+#define BNX2_MISC_ARB_GNT2_18				 (0x7L<<8)
+#define BNX2_MISC_ARB_GNT2_19				 (0x7L<<12)
+#define BNX2_MISC_ARB_GNT2_20				 (0x7L<<16)
+#define BNX2_MISC_ARB_GNT2_21				 (0x7L<<20)
+#define BNX2_MISC_ARB_GNT2_22				 (0x7L<<24)
+#define BNX2_MISC_ARB_GNT2_23				 (0x7L<<28)
+
+#define BNX2_MISC_ARB_GNT3				0x00000874
+#define BNX2_MISC_ARB_GNT3_24				 (0x7L<<0)
+#define BNX2_MISC_ARB_GNT3_25				 (0x7L<<4)
+#define BNX2_MISC_ARB_GNT3_26				 (0x7L<<8)
+#define BNX2_MISC_ARB_GNT3_27				 (0x7L<<12)
+#define BNX2_MISC_ARB_GNT3_28				 (0x7L<<16)
+#define BNX2_MISC_ARB_GNT3_29				 (0x7L<<20)
+#define BNX2_MISC_ARB_GNT3_30				 (0x7L<<24)
+#define BNX2_MISC_ARB_GNT3_31				 (0x7L<<28)
+
+#define BNX2_MISC_RESERVED1				0x00000878
+#define BNX2_MISC_RESERVED1_MISC_RESERVED1_VALUE	 (0x3fL<<0)
+
+#define BNX2_MISC_RESERVED2				0x0000087c
+#define BNX2_MISC_RESERVED2_PCIE_DIS			 (1L<<0)
+#define BNX2_MISC_RESERVED2_LINK_IN_L23			 (1L<<1)
+
+#define BNX2_MISC_SM_ASF_CONTROL			0x00000880
+#define BNX2_MISC_SM_ASF_CONTROL_ASF_RST		 (1L<<0)
+#define BNX2_MISC_SM_ASF_CONTROL_TSC_EN			 (1L<<1)
+#define BNX2_MISC_SM_ASF_CONTROL_WG_TO			 (1L<<2)
+#define BNX2_MISC_SM_ASF_CONTROL_HB_TO			 (1L<<3)
+#define BNX2_MISC_SM_ASF_CONTROL_PA_TO			 (1L<<4)
+#define BNX2_MISC_SM_ASF_CONTROL_PL_TO			 (1L<<5)
+#define BNX2_MISC_SM_ASF_CONTROL_RT_TO			 (1L<<6)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_EVENT		 (1L<<7)
+#define BNX2_MISC_SM_ASF_CONTROL_STRETCH_EN		 (1L<<8)
+#define BNX2_MISC_SM_ASF_CONTROL_STRETCH_PULSE		 (1L<<9)
+#define BNX2_MISC_SM_ASF_CONTROL_RES			 (0x3L<<10)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_EN			 (1L<<12)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_BB_EN		 (1L<<13)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_NO_ADDR_FILT	 (1L<<14)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_AUTOREAD		 (1L<<15)
+#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR1		 (0x7fL<<16)
+#define BNX2_MISC_SM_ASF_CONTROL_NIC_SMB_ADDR2		 (0x7fL<<23)
+#define BNX2_MISC_SM_ASF_CONTROL_EN_NIC_SMB_ADDR_0	 (1L<<30)
+#define BNX2_MISC_SM_ASF_CONTROL_SMB_EARLY_ATTN		 (1L<<31)
+
+#define BNX2_MISC_SMB_IN				0x00000884
+#define BNX2_MISC_SMB_IN_DAT_IN				 (0xffL<<0)
+#define BNX2_MISC_SMB_IN_RDY				 (1L<<8)
+#define BNX2_MISC_SMB_IN_DONE				 (1L<<9)
+#define BNX2_MISC_SMB_IN_FIRSTBYTE			 (1L<<10)
+#define BNX2_MISC_SMB_IN_STATUS				 (0x7L<<11)
+#define BNX2_MISC_SMB_IN_STATUS_OK			 (0x0L<<11)
+#define BNX2_MISC_SMB_IN_STATUS_PEC			 (0x1L<<11)
+#define BNX2_MISC_SMB_IN_STATUS_OFLOW			 (0x2L<<11)
+#define BNX2_MISC_SMB_IN_STATUS_STOP			 (0x3L<<11)
+#define BNX2_MISC_SMB_IN_STATUS_TIMEOUT			 (0x4L<<11)
+
+#define BNX2_MISC_SMB_OUT				0x00000888
+#define BNX2_MISC_SMB_OUT_DAT_OUT			 (0xffL<<0)
+#define BNX2_MISC_SMB_OUT_RDY				 (1L<<8)
+#define BNX2_MISC_SMB_OUT_START				 (1L<<9)
+#define BNX2_MISC_SMB_OUT_LAST				 (1L<<10)
+#define BNX2_MISC_SMB_OUT_ACC_TYPE			 (1L<<11)
+#define BNX2_MISC_SMB_OUT_ENB_PEC			 (1L<<12)
+#define BNX2_MISC_SMB_OUT_GET_RX_LEN			 (1L<<13)
+#define BNX2_MISC_SMB_OUT_SMB_READ_LEN			 (0x3fL<<14)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS		 (0xfL<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_OK		 (0L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_FIRST_NACK	 (1L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_UFLOW		 (2L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_STOP		 (3L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_TIMEOUT	 (4L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_FIRST_LOST	 (5L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_BADACK		 (6L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_SUB_NACK	 (9L<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_STATUS_SUB_LOST	 (0xdL<<20)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_SLAVEMODE		 (1L<<24)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_DAT_EN		 (1L<<25)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_DAT_IN		 (1L<<26)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_CLK_EN		 (1L<<27)
+#define BNX2_MISC_SMB_OUT_SMB_OUT_CLK_IN		 (1L<<28)
+
+#define BNX2_MISC_SMB_WATCHDOG				0x0000088c
+#define BNX2_MISC_SMB_WATCHDOG_WATCHDOG			 (0xffffL<<0)
+
+#define BNX2_MISC_SMB_HEARTBEAT				0x00000890
+#define BNX2_MISC_SMB_HEARTBEAT_HEARTBEAT		 (0xffffL<<0)
+
+#define BNX2_MISC_SMB_POLL_ASF				0x00000894
+#define BNX2_MISC_SMB_POLL_ASF_POLL_ASF			 (0xffffL<<0)
+
+#define BNX2_MISC_SMB_POLL_LEGACY			0x00000898
+#define BNX2_MISC_SMB_POLL_LEGACY_POLL_LEGACY		 (0xffffL<<0)
+
+#define BNX2_MISC_SMB_RETRAN				0x0000089c
+#define BNX2_MISC_SMB_RETRAN_RETRAN			 (0xffL<<0)
+
+#define BNX2_MISC_SMB_TIMESTAMP				0x000008a0
+#define BNX2_MISC_SMB_TIMESTAMP_TIMESTAMP		 (0xffffffffL<<0)
+
+#define BNX2_MISC_PERR_ENA0				0x000008a4
+#define BNX2_MISC_PERR_ENA0_COM_MISC_CTXC		 (1L<<0)
+#define BNX2_MISC_PERR_ENA0_COM_MISC_REGF		 (1L<<1)
+#define BNX2_MISC_PERR_ENA0_COM_MISC_SCPAD		 (1L<<2)
+#define BNX2_MISC_PERR_ENA0_CP_MISC_CTXC		 (1L<<3)
+#define BNX2_MISC_PERR_ENA0_CP_MISC_REGF		 (1L<<4)
+#define BNX2_MISC_PERR_ENA0_CP_MISC_SCPAD		 (1L<<5)
+#define BNX2_MISC_PERR_ENA0_CS_MISC_TMEM		 (1L<<6)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM0		 (1L<<7)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM1		 (1L<<8)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM2		 (1L<<9)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM3		 (1L<<10)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM4		 (1L<<11)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_ACCM5		 (1L<<12)
+#define BNX2_MISC_PERR_ENA0_CTX_MISC_PGTBL		 (1L<<13)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR0		 (1L<<14)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR1		 (1L<<15)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR2		 (1L<<16)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR3		 (1L<<17)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DR4		 (1L<<18)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW0		 (1L<<19)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW1		 (1L<<20)
+#define BNX2_MISC_PERR_ENA0_DMAE_MISC_DW2		 (1L<<21)
+#define BNX2_MISC_PERR_ENA0_HC_MISC_DMA			 (1L<<22)
+#define BNX2_MISC_PERR_ENA0_MCP_MISC_REGF		 (1L<<23)
+#define BNX2_MISC_PERR_ENA0_MCP_MISC_SCPAD		 (1L<<24)
+#define BNX2_MISC_PERR_ENA0_MQ_MISC_CTX			 (1L<<25)
+#define BNX2_MISC_PERR_ENA0_RBDC_MISC			 (1L<<26)
+#define BNX2_MISC_PERR_ENA0_RBUF_MISC_MB		 (1L<<27)
+#define BNX2_MISC_PERR_ENA0_RBUF_MISC_PTR		 (1L<<28)
+#define BNX2_MISC_PERR_ENA0_RDE_MISC_RPC		 (1L<<29)
+#define BNX2_MISC_PERR_ENA0_RDE_MISC_RPM		 (1L<<30)
+#define BNX2_MISC_PERR_ENA0_RV2P_MISC_CB0REGS		 (1L<<31)
+#define BNX2_MISC_PERR_ENA0_COM_DMAE_PERR_EN_XI		 (1L<<0)
+#define BNX2_MISC_PERR_ENA0_CP_DMAE_PERR_EN_XI		 (1L<<1)
+#define BNX2_MISC_PERR_ENA0_RPM_ACPIBEMEM_PERR_EN_XI	 (1L<<2)
+#define BNX2_MISC_PERR_ENA0_CTX_USAGE_CNT_PERR_EN_XI	 (1L<<3)
+#define BNX2_MISC_PERR_ENA0_CTX_PGTBL_PERR_EN_XI	 (1L<<4)
+#define BNX2_MISC_PERR_ENA0_CTX_CACHE_PERR_EN_XI	 (1L<<5)
+#define BNX2_MISC_PERR_ENA0_CTX_MIRROR_PERR_EN_XI	 (1L<<6)
+#define BNX2_MISC_PERR_ENA0_COM_CTXC_PERR_EN_XI		 (1L<<7)
+#define BNX2_MISC_PERR_ENA0_COM_SCPAD_PERR_EN_XI	 (1L<<8)
+#define BNX2_MISC_PERR_ENA0_CP_CTXC_PERR_EN_XI		 (1L<<9)
+#define BNX2_MISC_PERR_ENA0_CP_SCPAD_PERR_EN_XI		 (1L<<10)
+#define BNX2_MISC_PERR_ENA0_RXP_RBUFC_PERR_EN_XI	 (1L<<11)
+#define BNX2_MISC_PERR_ENA0_RXP_CTXC_PERR_EN_XI		 (1L<<12)
+#define BNX2_MISC_PERR_ENA0_RXP_SCPAD_PERR_EN_XI	 (1L<<13)
+#define BNX2_MISC_PERR_ENA0_TPAT_SCPAD_PERR_EN_XI	 (1L<<14)
+#define BNX2_MISC_PERR_ENA0_TXP_CTXC_PERR_EN_XI		 (1L<<15)
+#define BNX2_MISC_PERR_ENA0_TXP_SCPAD_PERR_EN_XI	 (1L<<16)
+#define BNX2_MISC_PERR_ENA0_CS_TMEM_PERR_EN_XI		 (1L<<17)
+#define BNX2_MISC_PERR_ENA0_MQ_CTX_PERR_EN_XI		 (1L<<18)
+#define BNX2_MISC_PERR_ENA0_RPM_DFIFOMEM_PERR_EN_XI	 (1L<<19)
+#define BNX2_MISC_PERR_ENA0_RPC_DFIFOMEM_PERR_EN_XI	 (1L<<20)
+#define BNX2_MISC_PERR_ENA0_RBUF_PTRMEM_PERR_EN_XI	 (1L<<21)
+#define BNX2_MISC_PERR_ENA0_RBUF_DATAMEM_PERR_EN_XI	 (1L<<22)
+#define BNX2_MISC_PERR_ENA0_RV2P_P2IRAM_PERR_EN_XI	 (1L<<23)
+#define BNX2_MISC_PERR_ENA0_RV2P_P1IRAM_PERR_EN_XI	 (1L<<24)
+#define BNX2_MISC_PERR_ENA0_RV2P_CB1REGS_PERR_EN_XI	 (1L<<25)
+#define BNX2_MISC_PERR_ENA0_RV2P_CB0REGS_PERR_EN_XI	 (1L<<26)
+#define BNX2_MISC_PERR_ENA0_TPBUF_PERR_EN_XI		 (1L<<27)
+#define BNX2_MISC_PERR_ENA0_THBUF_PERR_EN_XI		 (1L<<28)
+#define BNX2_MISC_PERR_ENA0_TDMA_PERR_EN_XI		 (1L<<29)
+#define BNX2_MISC_PERR_ENA0_TBDC_PERR_EN_XI		 (1L<<30)
+#define BNX2_MISC_PERR_ENA0_TSCH_LR_PERR_EN_XI		 (1L<<31)
+
+#define BNX2_MISC_PERR_ENA1				0x000008a8
+#define BNX2_MISC_PERR_ENA1_RV2P_MISC_CB1REGS		 (1L<<0)
+#define BNX2_MISC_PERR_ENA1_RV2P_MISC_P1IRAM		 (1L<<1)
+#define BNX2_MISC_PERR_ENA1_RV2P_MISC_P2IRAM		 (1L<<2)
+#define BNX2_MISC_PERR_ENA1_RXP_MISC_CTXC		 (1L<<3)
+#define BNX2_MISC_PERR_ENA1_RXP_MISC_REGF		 (1L<<4)
+#define BNX2_MISC_PERR_ENA1_RXP_MISC_SCPAD		 (1L<<5)
+#define BNX2_MISC_PERR_ENA1_RXP_MISC_RBUFC		 (1L<<6)
+#define BNX2_MISC_PERR_ENA1_TBDC_MISC			 (1L<<7)
+#define BNX2_MISC_PERR_ENA1_TDMA_MISC			 (1L<<8)
+#define BNX2_MISC_PERR_ENA1_THBUF_MISC_MB0		 (1L<<9)
+#define BNX2_MISC_PERR_ENA1_THBUF_MISC_MB1		 (1L<<10)
+#define BNX2_MISC_PERR_ENA1_TPAT_MISC_REGF		 (1L<<11)
+#define BNX2_MISC_PERR_ENA1_TPAT_MISC_SCPAD		 (1L<<12)
+#define BNX2_MISC_PERR_ENA1_TPBUF_MISC_MB		 (1L<<13)
+#define BNX2_MISC_PERR_ENA1_TSCH_MISC_LR		 (1L<<14)
+#define BNX2_MISC_PERR_ENA1_TXP_MISC_CTXC		 (1L<<15)
+#define BNX2_MISC_PERR_ENA1_TXP_MISC_REGF		 (1L<<16)
+#define BNX2_MISC_PERR_ENA1_TXP_MISC_SCPAD		 (1L<<17)
+#define BNX2_MISC_PERR_ENA1_UMP_MISC_FIORX		 (1L<<18)
+#define BNX2_MISC_PERR_ENA1_UMP_MISC_FIOTX		 (1L<<19)
+#define BNX2_MISC_PERR_ENA1_UMP_MISC_RX			 (1L<<20)
+#define BNX2_MISC_PERR_ENA1_UMP_MISC_TX			 (1L<<21)
+#define BNX2_MISC_PERR_ENA1_RDMAQ_MISC			 (1L<<22)
+#define BNX2_MISC_PERR_ENA1_CSQ_MISC			 (1L<<23)
+#define BNX2_MISC_PERR_ENA1_CPQ_MISC			 (1L<<24)
+#define BNX2_MISC_PERR_ENA1_MCPQ_MISC			 (1L<<25)
+#define BNX2_MISC_PERR_ENA1_RV2PMQ_MISC			 (1L<<26)
+#define BNX2_MISC_PERR_ENA1_RV2PPQ_MISC			 (1L<<27)
+#define BNX2_MISC_PERR_ENA1_RV2PTQ_MISC			 (1L<<28)
+#define BNX2_MISC_PERR_ENA1_RXPQ_MISC			 (1L<<29)
+#define BNX2_MISC_PERR_ENA1_RXPCQ_MISC			 (1L<<30)
+#define BNX2_MISC_PERR_ENA1_RLUPQ_MISC			 (1L<<31)
+#define BNX2_MISC_PERR_ENA1_RBDC_PERR_EN_XI		 (1L<<0)
+#define BNX2_MISC_PERR_ENA1_RDMA_DFIFO_PERR_EN_XI	 (1L<<2)
+#define BNX2_MISC_PERR_ENA1_HC_STATS_PERR_EN_XI		 (1L<<3)
+#define BNX2_MISC_PERR_ENA1_HC_MSIX_PERR_EN_XI		 (1L<<4)
+#define BNX2_MISC_PERR_ENA1_HC_PRODUCSTB_PERR_EN_XI	 (1L<<5)
+#define BNX2_MISC_PERR_ENA1_HC_CONSUMSTB_PERR_EN_XI	 (1L<<6)
+#define BNX2_MISC_PERR_ENA1_TPATQ_PERR_EN_XI		 (1L<<7)
+#define BNX2_MISC_PERR_ENA1_MCPQ_PERR_EN_XI		 (1L<<8)
+#define BNX2_MISC_PERR_ENA1_TDMAQ_PERR_EN_XI		 (1L<<9)
+#define BNX2_MISC_PERR_ENA1_TXPQ_PERR_EN_XI		 (1L<<10)
+#define BNX2_MISC_PERR_ENA1_COMTQ_PERR_EN_XI		 (1L<<11)
+#define BNX2_MISC_PERR_ENA1_COMQ_PERR_EN_XI		 (1L<<12)
+#define BNX2_MISC_PERR_ENA1_RLUPQ_PERR_EN_XI		 (1L<<13)
+#define BNX2_MISC_PERR_ENA1_RXPQ_PERR_EN_XI		 (1L<<14)
+#define BNX2_MISC_PERR_ENA1_RV2PPQ_PERR_EN_XI		 (1L<<15)
+#define BNX2_MISC_PERR_ENA1_RDMAQ_PERR_EN_XI		 (1L<<16)
+#define BNX2_MISC_PERR_ENA1_TASQ_PERR_EN_XI		 (1L<<17)
+#define BNX2_MISC_PERR_ENA1_TBDRQ_PERR_EN_XI		 (1L<<18)
+#define BNX2_MISC_PERR_ENA1_TSCHQ_PERR_EN_XI		 (1L<<19)
+#define BNX2_MISC_PERR_ENA1_COMXQ_PERR_EN_XI		 (1L<<20)
+#define BNX2_MISC_PERR_ENA1_RXPCQ_PERR_EN_XI		 (1L<<21)
+#define BNX2_MISC_PERR_ENA1_RV2PTQ_PERR_EN_XI		 (1L<<22)
+#define BNX2_MISC_PERR_ENA1_RV2PMQ_PERR_EN_XI		 (1L<<23)
+#define BNX2_MISC_PERR_ENA1_CPQ_PERR_EN_XI		 (1L<<24)
+#define BNX2_MISC_PERR_ENA1_CSQ_PERR_EN_XI		 (1L<<25)
+#define BNX2_MISC_PERR_ENA1_RLUP_CID_PERR_EN_XI		 (1L<<26)
+#define BNX2_MISC_PERR_ENA1_RV2PCS_TMEM_PERR_EN_XI	 (1L<<27)
+#define BNX2_MISC_PERR_ENA1_RV2PCSQ_PERR_EN_XI		 (1L<<28)
+#define BNX2_MISC_PERR_ENA1_MQ_IDX_PERR_EN_XI		 (1L<<29)
+
+#define BNX2_MISC_PERR_ENA2				0x000008ac
+#define BNX2_MISC_PERR_ENA2_COMQ_MISC			 (1L<<0)
+#define BNX2_MISC_PERR_ENA2_COMXQ_MISC			 (1L<<1)
+#define BNX2_MISC_PERR_ENA2_COMTQ_MISC			 (1L<<2)
+#define BNX2_MISC_PERR_ENA2_TSCHQ_MISC			 (1L<<3)
+#define BNX2_MISC_PERR_ENA2_TBDRQ_MISC			 (1L<<4)
+#define BNX2_MISC_PERR_ENA2_TXPQ_MISC			 (1L<<5)
+#define BNX2_MISC_PERR_ENA2_TDMAQ_MISC			 (1L<<6)
+#define BNX2_MISC_PERR_ENA2_TPATQ_MISC			 (1L<<7)
+#define BNX2_MISC_PERR_ENA2_TASQ_MISC			 (1L<<8)
+#define BNX2_MISC_PERR_ENA2_TGT_FIFO_PERR_EN_XI		 (1L<<0)
+#define BNX2_MISC_PERR_ENA2_UMP_TX_PERR_EN_XI		 (1L<<1)
+#define BNX2_MISC_PERR_ENA2_UMP_RX_PERR_EN_XI		 (1L<<2)
+#define BNX2_MISC_PERR_ENA2_MCP_ROM_PERR_EN_XI		 (1L<<3)
+#define BNX2_MISC_PERR_ENA2_MCP_SCPAD_PERR_EN_XI	 (1L<<4)
+#define BNX2_MISC_PERR_ENA2_HB_MEM_PERR_EN_XI		 (1L<<5)
+#define BNX2_MISC_PERR_ENA2_PCIE_REPLAY_PERR_EN_XI	 (1L<<6)
+
+#define BNX2_MISC_DEBUG_VECTOR_SEL			0x000008b0
+#define BNX2_MISC_DEBUG_VECTOR_SEL_0			 (0xfffL<<0)
+#define BNX2_MISC_DEBUG_VECTOR_SEL_1			 (0xfffL<<12)
+#define BNX2_MISC_DEBUG_VECTOR_SEL_1_XI			 (0xfffL<<15)
+
+#define BNX2_MISC_VREG_CONTROL				0x000008b4
+#define BNX2_MISC_VREG_CONTROL_1_2			 (0xfL<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_XI		 (0xfL<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS14_XI	 (0L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS12_XI	 (1L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS10_XI	 (2L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS8_XI	 (3L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS6_XI	 (4L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS4_XI	 (5L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_PLUS2_XI	 (6L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_NOM_XI		 (7L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS2_XI	 (8L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS4_XI	 (9L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS6_XI	 (10L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS8_XI	 (11L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS10_XI	 (12L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS12_XI	 (13L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS14_XI	 (14L<<0)
+#define BNX2_MISC_VREG_CONTROL_1_0_MAIN_MINUS16_XI	 (15L<<0)
+#define BNX2_MISC_VREG_CONTROL_2_5			 (0xfL<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS14		 (0L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS12		 (1L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS10		 (2L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS8		 (3L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS6		 (4L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS4		 (5L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_PLUS2		 (6L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_NOM			 (7L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS2		 (8L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS4		 (9L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS6		 (10L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS8		 (11L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS10		 (12L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS12		 (13L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS14		 (14L<<4)
+#define BNX2_MISC_VREG_CONTROL_2_5_MINUS16		 (15L<<4)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT			 (0xfL<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS14		 (0L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS12		 (1L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS10		 (2L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS8		 (3L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS6		 (4L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS4		 (5L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_PLUS2		 (6L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_NOM		 (7L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS2		 (8L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS4		 (9L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS6		 (10L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS8		 (11L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS10		 (12L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS12		 (13L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS14		 (14L<<8)
+#define BNX2_MISC_VREG_CONTROL_1_0_MGMT_MINUS16		 (15L<<8)
+
+#define BNX2_MISC_FINAL_CLK_CTL_VAL			0x000008b8
+#define BNX2_MISC_FINAL_CLK_CTL_VAL_MISC_FINAL_CLK_CTL_VAL	 (0x3ffffffL<<6)
+
+#define BNX2_MISC_GP_HW_CTL0				0x000008bc
+#define BNX2_MISC_GP_HW_CTL0_TX_DRIVE			 (1L<<0)
+#define BNX2_MISC_GP_HW_CTL0_RMII_MODE			 (1L<<1)
+#define BNX2_MISC_GP_HW_CTL0_RMII_CRSDV_SEL		 (1L<<2)
+#define BNX2_MISC_GP_HW_CTL0_RVMII_MODE			 (1L<<3)
+#define BNX2_MISC_GP_HW_CTL0_FLASH_SAMP_SCLK_NEGEDGE_TE	 (1L<<4)
+#define BNX2_MISC_GP_HW_CTL0_HIDDEN_REVISION_ID_TE	 (1L<<5)
+#define BNX2_MISC_GP_HW_CTL0_HC_CNTL_TMOUT_CTR_RST_TE	 (1L<<6)
+#define BNX2_MISC_GP_HW_CTL0_RESERVED1_XI		 (0x7L<<4)
+#define BNX2_MISC_GP_HW_CTL0_ENA_CORE_RST_ON_MAIN_PWR_GOING_AWAY	 (1L<<7)
+#define BNX2_MISC_GP_HW_CTL0_ENA_SEL_VAUX_B_IN_L2_TE	 (1L<<8)
+#define BNX2_MISC_GP_HW_CTL0_GRC_BNK_FREE_FIX_TE	 (1L<<9)
+#define BNX2_MISC_GP_HW_CTL0_LED_ACT_SEL_TE		 (1L<<10)
+#define BNX2_MISC_GP_HW_CTL0_RESERVED2_XI		 (0x7L<<8)
+#define BNX2_MISC_GP_HW_CTL0_UP1_DEF0			 (1L<<11)
+#define BNX2_MISC_GP_HW_CTL0_FIBER_MODE_DIS_DEF		 (1L<<12)
+#define BNX2_MISC_GP_HW_CTL0_FORCE2500_DEF		 (1L<<13)
+#define BNX2_MISC_GP_HW_CTL0_AUTODETECT_DIS_DEF		 (1L<<14)
+#define BNX2_MISC_GP_HW_CTL0_PARALLEL_DETECT_DEF	 (1L<<15)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI		 (0xfL<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_3MA		 (0L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_2P5MA		 (1L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_2P0MA		 (3L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_1P5MA		 (5L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_1P0MA		 (7L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_DAI_PWRDN		 (15L<<16)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PRE2DIS		 (1L<<20)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PRE1DIS		 (1L<<21)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT		 (0x3L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_M6P		 (0L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_M0P		 (1L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_P0P		 (2L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_CTAT_P6P		 (3L<<22)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT		 (0x3L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_M6P		 (0L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_M0P		 (1L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_P0P		 (2L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_PTAT_P6P		 (3L<<24)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ		 (0x3L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_240UA	 (0L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_160UA	 (1L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_400UA	 (2L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_IAMP_ADJ_320UA	 (3L<<26)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ		 (0x3L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_240UA	 (0L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_160UA	 (1L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_400UA	 (2L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_ICBUF_ADJ_320UA	 (3L<<28)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ		 (0x3L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P57	 (0L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P45	 (1L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P62	 (2L<<30)
+#define BNX2_MISC_GP_HW_CTL0_OSCCTRL_XTAL_ADJ_1P66	 (3L<<30)
+
+#define BNX2_MISC_GP_HW_CTL1				0x000008c0
+#define BNX2_MISC_GP_HW_CTL1_1_ATTN_BTN_PRSNT_TE	 (1L<<0)
+#define BNX2_MISC_GP_HW_CTL1_1_ATTN_IND_PRSNT_TE	 (1L<<1)
+#define BNX2_MISC_GP_HW_CTL1_1_PWR_IND_PRSNT_TE		 (1L<<2)
+#define BNX2_MISC_GP_HW_CTL1_0_PCIE_LOOPBACK_TE		 (1L<<3)
+#define BNX2_MISC_GP_HW_CTL1_RESERVED_SOFT_XI		 (0xffffL<<0)
+#define BNX2_MISC_GP_HW_CTL1_RESERVED_HARD_XI		 (0xffffL<<16)
+
+#define BNX2_MISC_NEW_HW_CTL				0x000008c4
+#define BNX2_MISC_NEW_HW_CTL_MAIN_POR_BYPASS		 (1L<<0)
+#define BNX2_MISC_NEW_HW_CTL_RINGOSC_ENABLE		 (1L<<1)
+#define BNX2_MISC_NEW_HW_CTL_RINGOSC_SEL0		 (1L<<2)
+#define BNX2_MISC_NEW_HW_CTL_RINGOSC_SEL1		 (1L<<3)
+#define BNX2_MISC_NEW_HW_CTL_RESERVED_SHARED		 (0xfffL<<4)
+#define BNX2_MISC_NEW_HW_CTL_RESERVED_SPLIT		 (0xffffL<<16)
+
+#define BNX2_MISC_NEW_CORE_CTL				0x000008c8
+#define BNX2_MISC_NEW_CORE_CTL_LINK_HOLDOFF_SUCCESS	 (1L<<0)
+#define BNX2_MISC_NEW_CORE_CTL_LINK_HOLDOFF_REQ		 (1L<<1)
+#define BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE		 (1L<<16)
+#define BNX2_MISC_NEW_CORE_CTL_RESERVED_CMN		 (0x3fffL<<2)
+#define BNX2_MISC_NEW_CORE_CTL_RESERVED_TC		 (0xffffL<<16)
+
+#define BNX2_MISC_ECO_HW_CTL				0x000008cc
+#define BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN		 (1L<<0)
+#define BNX2_MISC_ECO_HW_CTL_RESERVED_SOFT		 (0x7fffL<<1)
+#define BNX2_MISC_ECO_HW_CTL_RESERVED_HARD		 (0xffffL<<16)
+
+#define BNX2_MISC_ECO_CORE_CTL				0x000008d0
+#define BNX2_MISC_ECO_CORE_CTL_RESERVED_SOFT		 (0xffffL<<0)
+#define BNX2_MISC_ECO_CORE_CTL_RESERVED_HARD		 (0xffffL<<16)
+
+#define BNX2_MISC_PPIO					0x000008d4
+#define BNX2_MISC_PPIO_VALUE				 (0xfL<<0)
+#define BNX2_MISC_PPIO_SET				 (0xfL<<8)
+#define BNX2_MISC_PPIO_CLR				 (0xfL<<16)
+#define BNX2_MISC_PPIO_FLOAT				 (0xfL<<24)
+
+#define BNX2_MISC_PPIO_INT				0x000008d8
+#define BNX2_MISC_PPIO_INT_INT_STATE			 (0xfL<<0)
+#define BNX2_MISC_PPIO_INT_OLD_VALUE			 (0xfL<<8)
+#define BNX2_MISC_PPIO_INT_OLD_SET			 (0xfL<<16)
+#define BNX2_MISC_PPIO_INT_OLD_CLR			 (0xfL<<24)
+
+#define BNX2_MISC_RESET_NUMS				0x000008dc
+#define BNX2_MISC_RESET_NUMS_NUM_HARD_RESETS		 (0x7L<<0)
+#define BNX2_MISC_RESET_NUMS_NUM_PCIE_RESETS		 (0x7L<<4)
+#define BNX2_MISC_RESET_NUMS_NUM_PERSTB_RESETS		 (0x7L<<8)
+#define BNX2_MISC_RESET_NUMS_NUM_CMN_RESETS		 (0x7L<<12)
+#define BNX2_MISC_RESET_NUMS_NUM_PORT_RESETS		 (0x7L<<16)
+
+#define BNX2_MISC_CS16_ERR				0x000008e0
+#define BNX2_MISC_CS16_ERR_ENA_PCI			 (1L<<0)
+#define BNX2_MISC_CS16_ERR_ENA_RDMA			 (1L<<1)
+#define BNX2_MISC_CS16_ERR_ENA_TDMA			 (1L<<2)
+#define BNX2_MISC_CS16_ERR_ENA_EMAC			 (1L<<3)
+#define BNX2_MISC_CS16_ERR_ENA_CTX			 (1L<<4)
+#define BNX2_MISC_CS16_ERR_ENA_TBDR			 (1L<<5)
+#define BNX2_MISC_CS16_ERR_ENA_RBDC			 (1L<<6)
+#define BNX2_MISC_CS16_ERR_ENA_COM			 (1L<<7)
+#define BNX2_MISC_CS16_ERR_ENA_CP			 (1L<<8)
+#define BNX2_MISC_CS16_ERR_STA_PCI			 (1L<<16)
+#define BNX2_MISC_CS16_ERR_STA_RDMA			 (1L<<17)
+#define BNX2_MISC_CS16_ERR_STA_TDMA			 (1L<<18)
+#define BNX2_MISC_CS16_ERR_STA_EMAC			 (1L<<19)
+#define BNX2_MISC_CS16_ERR_STA_CTX			 (1L<<20)
+#define BNX2_MISC_CS16_ERR_STA_TBDR			 (1L<<21)
+#define BNX2_MISC_CS16_ERR_STA_RBDC			 (1L<<22)
+#define BNX2_MISC_CS16_ERR_STA_COM			 (1L<<23)
+#define BNX2_MISC_CS16_ERR_STA_CP			 (1L<<24)
+
+#define BNX2_MISC_SPIO_EVENT				0x000008e4
+#define BNX2_MISC_SPIO_EVENT_ENABLE			 (0xffL<<0)
+
+#define BNX2_MISC_PPIO_EVENT				0x000008e8
+#define BNX2_MISC_PPIO_EVENT_ENABLE			 (0xfL<<0)
+
+#define BNX2_MISC_DUAL_MEDIA_CTRL			0x000008ec
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID		 (0xffL<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_X		 (0L<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C		 (3L<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S		 (12L<<0)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP	 (0x7L<<8)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PORT_SWAP_PIN		 (1L<<11)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES1_SIGDET	 (1L<<12)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES0_SIGDET	 (1L<<13)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY1_SIGDET		 (1L<<14)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY0_SIGDET		 (1L<<15)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_LCPLL_RST		 (1L<<16)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES1_RST		 (1L<<17)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_SERDES0_RST		 (1L<<18)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY1_RST		 (1L<<19)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY0_RST		 (1L<<20)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL		 (0x7L<<21)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PORT_SWAP		 (1L<<24)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE	 (1L<<25)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ	 (0xfL<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_SER1_IDDQ	 (1L<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_SER0_IDDQ	 (2L<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_PHY1_IDDQ	 (4L<<26)
+#define BNX2_MISC_DUAL_MEDIA_CTRL_PHY_SERDES_IDDQ_PHY0_IDDQ	 (8L<<26)
+
+#define BNX2_MISC_OTP_CMD1				0x000008f0
+#define BNX2_MISC_OTP_CMD1_FMODE			 (0x7L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_IDLE			 (0L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_WRITE			 (1L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_INIT			 (2L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_SET			 (3L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_RST			 (4L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_VERIFY			 (5L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_RESERVED0		 (6L<<0)
+#define BNX2_MISC_OTP_CMD1_FMODE_RESERVED1		 (7L<<0)
+#define BNX2_MISC_OTP_CMD1_USEPINS			 (1L<<8)
+#define BNX2_MISC_OTP_CMD1_PROGSEL			 (1L<<9)
+#define BNX2_MISC_OTP_CMD1_PROGSTART			 (1L<<10)
+#define BNX2_MISC_OTP_CMD1_PCOUNT			 (0x7L<<16)
+#define BNX2_MISC_OTP_CMD1_PBYP				 (1L<<19)
+#define BNX2_MISC_OTP_CMD1_VSEL				 (0xfL<<20)
+#define BNX2_MISC_OTP_CMD1_TM				 (0x7L<<27)
+#define BNX2_MISC_OTP_CMD1_SADBYP			 (1L<<30)
+#define BNX2_MISC_OTP_CMD1_DEBUG			 (1L<<31)
+
+#define BNX2_MISC_OTP_CMD2				0x000008f4
+#define BNX2_MISC_OTP_CMD2_OTP_ROM_ADDR			 (0x3ffL<<0)
+#define BNX2_MISC_OTP_CMD2_DOSEL			 (0x7fL<<16)
+#define BNX2_MISC_OTP_CMD2_DOSEL_0			 (0L<<16)
+#define BNX2_MISC_OTP_CMD2_DOSEL_1			 (1L<<16)
+#define BNX2_MISC_OTP_CMD2_DOSEL_127			 (127L<<16)
+
+#define BNX2_MISC_OTP_STATUS				0x000008f8
+#define BNX2_MISC_OTP_STATUS_DATA			 (0xffL<<0)
+#define BNX2_MISC_OTP_STATUS_VALID			 (1L<<8)
+#define BNX2_MISC_OTP_STATUS_BUSY			 (1L<<9)
+#define BNX2_MISC_OTP_STATUS_BUSYSM			 (1L<<10)
+#define BNX2_MISC_OTP_STATUS_DONE			 (1L<<11)
+
+#define BNX2_MISC_OTP_SHIFT1_CMD			0x000008fc
+#define BNX2_MISC_OTP_SHIFT1_CMD_RESET_MODE_N		 (1L<<0)
+#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_DONE		 (1L<<1)
+#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_START		 (1L<<2)
+#define BNX2_MISC_OTP_SHIFT1_CMD_LOAD_DATA		 (1L<<3)
+#define BNX2_MISC_OTP_SHIFT1_CMD_SHIFT_SELECT		 (0x1fL<<8)
+
+#define BNX2_MISC_OTP_SHIFT1_DATA			0x00000900
+#define BNX2_MISC_OTP_SHIFT2_CMD			0x00000904
+#define BNX2_MISC_OTP_SHIFT2_CMD_RESET_MODE_N		 (1L<<0)
+#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_DONE		 (1L<<1)
+#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_START		 (1L<<2)
+#define BNX2_MISC_OTP_SHIFT2_CMD_LOAD_DATA		 (1L<<3)
+#define BNX2_MISC_OTP_SHIFT2_CMD_SHIFT_SELECT		 (0x1fL<<8)
+
+#define BNX2_MISC_OTP_SHIFT2_DATA			0x00000908
+#define BNX2_MISC_BIST_CS0				0x0000090c
+#define BNX2_MISC_BIST_CS0_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS0_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS0_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS0_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS0_MBIST_GO			 (1L<<9)
+#define BNX2_MISC_BIST_CS0_BIST_OVERRIDE		 (1L<<31)
+
+#define BNX2_MISC_BIST_MEMSTATUS0			0x00000910
+#define BNX2_MISC_BIST_CS1				0x00000914
+#define BNX2_MISC_BIST_CS1_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS1_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS1_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS1_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS1_MBIST_GO			 (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS1			0x00000918
+#define BNX2_MISC_BIST_CS2				0x0000091c
+#define BNX2_MISC_BIST_CS2_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS2_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS2_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS2_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS2_MBIST_GO			 (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS2			0x00000920
+#define BNX2_MISC_BIST_CS3				0x00000924
+#define BNX2_MISC_BIST_CS3_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS3_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS3_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS3_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS3_MBIST_GO			 (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS3			0x00000928
+#define BNX2_MISC_BIST_CS4				0x0000092c
+#define BNX2_MISC_BIST_CS4_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS4_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS4_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS4_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS4_MBIST_GO			 (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS4			0x00000930
+#define BNX2_MISC_BIST_CS5				0x00000934
+#define BNX2_MISC_BIST_CS5_MBIST_EN			 (1L<<0)
+#define BNX2_MISC_BIST_CS5_BIST_SETUP			 (0x3L<<1)
+#define BNX2_MISC_BIST_CS5_MBIST_ASYNC_RESET		 (1L<<3)
+#define BNX2_MISC_BIST_CS5_MBIST_DONE			 (1L<<8)
+#define BNX2_MISC_BIST_CS5_MBIST_GO			 (1L<<9)
+
+#define BNX2_MISC_BIST_MEMSTATUS5			0x00000938
+#define BNX2_MISC_MEM_TM0				0x0000093c
+#define BNX2_MISC_MEM_TM0_PCIE_REPLAY_TM		 (0xfL<<0)
+#define BNX2_MISC_MEM_TM0_MCP_SCPAD			 (0xfL<<8)
+#define BNX2_MISC_MEM_TM0_UMP_TM			 (0xffL<<16)
+#define BNX2_MISC_MEM_TM0_HB_MEM_TM			 (0xfL<<24)
+
+#define BNX2_MISC_USPLL_CTRL				0x00000940
+#define BNX2_MISC_USPLL_CTRL_PH_DET_DIS			 (1L<<0)
+#define BNX2_MISC_USPLL_CTRL_FREQ_DET_DIS		 (1L<<1)
+#define BNX2_MISC_USPLL_CTRL_LCPX			 (0x3fL<<2)
+#define BNX2_MISC_USPLL_CTRL_RX				 (0x3L<<8)
+#define BNX2_MISC_USPLL_CTRL_VC_EN			 (1L<<10)
+#define BNX2_MISC_USPLL_CTRL_VCO_MG			 (0x3L<<11)
+#define BNX2_MISC_USPLL_CTRL_KVCO_XF			 (0x7L<<13)
+#define BNX2_MISC_USPLL_CTRL_KVCO_XS			 (0x7L<<16)
+#define BNX2_MISC_USPLL_CTRL_TESTD_EN			 (1L<<19)
+#define BNX2_MISC_USPLL_CTRL_TESTD_SEL			 (0x7L<<20)
+#define BNX2_MISC_USPLL_CTRL_TESTA_EN			 (1L<<23)
+#define BNX2_MISC_USPLL_CTRL_TESTA_SEL			 (0x3L<<24)
+#define BNX2_MISC_USPLL_CTRL_ATTEN_FREF			 (1L<<26)
+#define BNX2_MISC_USPLL_CTRL_DIGITAL_RST		 (1L<<27)
+#define BNX2_MISC_USPLL_CTRL_ANALOG_RST			 (1L<<28)
+#define BNX2_MISC_USPLL_CTRL_LOCK			 (1L<<29)
+
+#define BNX2_MISC_PERR_STATUS0				0x00000944
+#define BNX2_MISC_PERR_STATUS0_COM_DMAE_PERR		 (1L<<0)
+#define BNX2_MISC_PERR_STATUS0_CP_DMAE_PERR		 (1L<<1)
+#define BNX2_MISC_PERR_STATUS0_RPM_ACPIBEMEM_PERR	 (1L<<2)
+#define BNX2_MISC_PERR_STATUS0_CTX_USAGE_CNT_PERR	 (1L<<3)
+#define BNX2_MISC_PERR_STATUS0_CTX_PGTBL_PERR		 (1L<<4)
+#define BNX2_MISC_PERR_STATUS0_CTX_CACHE_PERR		 (1L<<5)
+#define BNX2_MISC_PERR_STATUS0_CTX_MIRROR_PERR		 (1L<<6)
+#define BNX2_MISC_PERR_STATUS0_COM_CTXC_PERR		 (1L<<7)
+#define BNX2_MISC_PERR_STATUS0_COM_SCPAD_PERR		 (1L<<8)
+#define BNX2_MISC_PERR_STATUS0_CP_CTXC_PERR		 (1L<<9)
+#define BNX2_MISC_PERR_STATUS0_CP_SCPAD_PERR		 (1L<<10)
+#define BNX2_MISC_PERR_STATUS0_RXP_RBUFC_PERR		 (1L<<11)
+#define BNX2_MISC_PERR_STATUS0_RXP_CTXC_PERR		 (1L<<12)
+#define BNX2_MISC_PERR_STATUS0_RXP_SCPAD_PERR		 (1L<<13)
+#define BNX2_MISC_PERR_STATUS0_TPAT_SCPAD_PERR		 (1L<<14)
+#define BNX2_MISC_PERR_STATUS0_TXP_CTXC_PERR		 (1L<<15)
+#define BNX2_MISC_PERR_STATUS0_TXP_SCPAD_PERR		 (1L<<16)
+#define BNX2_MISC_PERR_STATUS0_CS_TMEM_PERR		 (1L<<17)
+#define BNX2_MISC_PERR_STATUS0_MQ_CTX_PERR		 (1L<<18)
+#define BNX2_MISC_PERR_STATUS0_RPM_DFIFOMEM_PERR	 (1L<<19)
+#define BNX2_MISC_PERR_STATUS0_RPC_DFIFOMEM_PERR	 (1L<<20)
+#define BNX2_MISC_PERR_STATUS0_RBUF_PTRMEM_PERR		 (1L<<21)
+#define BNX2_MISC_PERR_STATUS0_RBUF_DATAMEM_PERR	 (1L<<22)
+#define BNX2_MISC_PERR_STATUS0_RV2P_P2IRAM_PERR		 (1L<<23)
+#define BNX2_MISC_PERR_STATUS0_RV2P_P1IRAM_PERR		 (1L<<24)
+#define BNX2_MISC_PERR_STATUS0_RV2P_CB1REGS_PERR	 (1L<<25)
+#define BNX2_MISC_PERR_STATUS0_RV2P_CB0REGS_PERR	 (1L<<26)
+#define BNX2_MISC_PERR_STATUS0_TPBUF_PERR		 (1L<<27)
+#define BNX2_MISC_PERR_STATUS0_THBUF_PERR		 (1L<<28)
+#define BNX2_MISC_PERR_STATUS0_TDMA_PERR		 (1L<<29)
+#define BNX2_MISC_PERR_STATUS0_TBDC_PERR		 (1L<<30)
+#define BNX2_MISC_PERR_STATUS0_TSCH_LR_PERR		 (1L<<31)
+
+#define BNX2_MISC_PERR_STATUS1				0x00000948
+#define BNX2_MISC_PERR_STATUS1_RBDC_PERR		 (1L<<0)
+#define BNX2_MISC_PERR_STATUS1_RDMA_DFIFO_PERR		 (1L<<2)
+#define BNX2_MISC_PERR_STATUS1_HC_STATS_PERR		 (1L<<3)
+#define BNX2_MISC_PERR_STATUS1_HC_MSIX_PERR		 (1L<<4)
+#define BNX2_MISC_PERR_STATUS1_HC_PRODUCSTB_PERR	 (1L<<5)
+#define BNX2_MISC_PERR_STATUS1_HC_CONSUMSTB_PERR	 (1L<<6)
+#define BNX2_MISC_PERR_STATUS1_TPATQ_PERR		 (1L<<7)
+#define BNX2_MISC_PERR_STATUS1_MCPQ_PERR		 (1L<<8)
+#define BNX2_MISC_PERR_STATUS1_TDMAQ_PERR		 (1L<<9)
+#define BNX2_MISC_PERR_STATUS1_TXPQ_PERR		 (1L<<10)
+#define BNX2_MISC_PERR_STATUS1_COMTQ_PERR		 (1L<<11)
+#define BNX2_MISC_PERR_STATUS1_COMQ_PERR		 (1L<<12)
+#define BNX2_MISC_PERR_STATUS1_RLUPQ_PERR		 (1L<<13)
+#define BNX2_MISC_PERR_STATUS1_RXPQ_PERR		 (1L<<14)
+#define BNX2_MISC_PERR_STATUS1_RV2PPQ_PERR		 (1L<<15)
+#define BNX2_MISC_PERR_STATUS1_RDMAQ_PERR		 (1L<<16)
+#define BNX2_MISC_PERR_STATUS1_TASQ_PERR		 (1L<<17)
+#define BNX2_MISC_PERR_STATUS1_TBDRQ_PERR		 (1L<<18)
+#define BNX2_MISC_PERR_STATUS1_TSCHQ_PERR		 (1L<<19)
+#define BNX2_MISC_PERR_STATUS1_COMXQ_PERR		 (1L<<20)
+#define BNX2_MISC_PERR_STATUS1_RXPCQ_PERR		 (1L<<21)
+#define BNX2_MISC_PERR_STATUS1_RV2PTQ_PERR		 (1L<<22)
+#define BNX2_MISC_PERR_STATUS1_RV2PMQ_PERR		 (1L<<23)
+#define BNX2_MISC_PERR_STATUS1_CPQ_PERR			 (1L<<24)
+#define BNX2_MISC_PERR_STATUS1_CSQ_PERR			 (1L<<25)
+#define BNX2_MISC_PERR_STATUS1_RLUP_CID_PERR		 (1L<<26)
+#define BNX2_MISC_PERR_STATUS1_RV2PCS_TMEM_PERR		 (1L<<27)
+#define BNX2_MISC_PERR_STATUS1_RV2PCSQ_PERR		 (1L<<28)
+#define BNX2_MISC_PERR_STATUS1_MQ_IDX_PERR		 (1L<<29)
+
+#define BNX2_MISC_PERR_STATUS2				0x0000094c
+#define BNX2_MISC_PERR_STATUS2_TGT_FIFO_PERR		 (1L<<0)
+#define BNX2_MISC_PERR_STATUS2_UMP_TX_PERR		 (1L<<1)
+#define BNX2_MISC_PERR_STATUS2_UMP_RX_PERR		 (1L<<2)
+#define BNX2_MISC_PERR_STATUS2_MCP_ROM_PERR		 (1L<<3)
+#define BNX2_MISC_PERR_STATUS2_MCP_SCPAD_PERR		 (1L<<4)
+#define BNX2_MISC_PERR_STATUS2_HB_MEM_PERR		 (1L<<5)
+#define BNX2_MISC_PERR_STATUS2_PCIE_REPLAY_PERR		 (1L<<6)
+
+#define BNX2_MISC_LCPLL_CTRL0				0x00000950
+#define BNX2_MISC_LCPLL_CTRL0_OAC			 (0x7L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_NEGTWENTY		 (0L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_ZERO			 (1L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_TWENTY		 (3L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_OAC_FORTY			 (7L<<0)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL			 (0x7L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_360		 (0L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_480		 (1L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_600		 (3L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_ICP_CTRL_720		 (7L<<3)
+#define BNX2_MISC_LCPLL_CTRL0_BIAS_CTRL			 (0x3L<<6)
+#define BNX2_MISC_LCPLL_CTRL0_PLL_OBSERVE		 (0x7L<<8)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL			 (0x3L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_0		 (0L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_1		 (1L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_VTH_CTRL_2		 (2L<<11)
+#define BNX2_MISC_LCPLL_CTRL0_PLLSEQSTART		 (1L<<13)
+#define BNX2_MISC_LCPLL_CTRL0_RESERVED			 (1L<<14)
+#define BNX2_MISC_LCPLL_CTRL0_CAPRETRY_EN		 (1L<<15)
+#define BNX2_MISC_LCPLL_CTRL0_FREQMONITOR_EN		 (1L<<16)
+#define BNX2_MISC_LCPLL_CTRL0_FREQDETRESTART_EN		 (1L<<17)
+#define BNX2_MISC_LCPLL_CTRL0_FREQDETRETRY_EN		 (1L<<18)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFDONE_EN		 (1L<<19)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFDONE		 (1L<<20)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCEFPASS		 (1L<<21)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPDONE_EN	 (1L<<22)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPDONE		 (1L<<23)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPPASS_EN	 (1L<<24)
+#define BNX2_MISC_LCPLL_CTRL0_PLLFORCECAPPASS		 (1L<<25)
+#define BNX2_MISC_LCPLL_CTRL0_CAPRESTART		 (1L<<26)
+#define BNX2_MISC_LCPLL_CTRL0_CAPSELECTM_EN		 (1L<<27)
+
+#define BNX2_MISC_LCPLL_CTRL1				0x00000954
+#define BNX2_MISC_LCPLL_CTRL1_CAPSELECTM		 (0x1fL<<0)
+#define BNX2_MISC_LCPLL_CTRL1_CAPFORCESLOWDOWN_EN	 (1L<<5)
+#define BNX2_MISC_LCPLL_CTRL1_CAPFORCESLOWDOWN		 (1L<<6)
+#define BNX2_MISC_LCPLL_CTRL1_SLOWDN_XOR		 (1L<<7)
+
+#define BNX2_MISC_LCPLL_STATUS				0x00000958
+#define BNX2_MISC_LCPLL_STATUS_FREQDONE_SM		 (1L<<0)
+#define BNX2_MISC_LCPLL_STATUS_FREQPASS_SM		 (1L<<1)
+#define BNX2_MISC_LCPLL_STATUS_PLLSEQDONE		 (1L<<2)
+#define BNX2_MISC_LCPLL_STATUS_PLLSEQPASS		 (1L<<3)
+#define BNX2_MISC_LCPLL_STATUS_PLLSTATE			 (0x7L<<4)
+#define BNX2_MISC_LCPLL_STATUS_CAPSTATE			 (0x7L<<7)
+#define BNX2_MISC_LCPLL_STATUS_CAPSELECT		 (0x1fL<<10)
+#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR		 (1L<<15)
+#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR_0	 (0L<<15)
+#define BNX2_MISC_LCPLL_STATUS_SLOWDN_INDICATOR_1	 (1L<<15)
+
+#define BNX2_MISC_OSCFUNDS_CTRL				0x0000095c
+#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON		 (1L<<5)
+#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON_OFF		 (0L<<5)
+#define BNX2_MISC_OSCFUNDS_CTRL_FREQ_MON_ON		 (1L<<5)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM		 (0x3L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_0		 (0L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_1		 (1L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_2		 (2L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_XTAL_ADJCM_3		 (3L<<6)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ		 (0x3L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_0		 (0L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_1		 (1L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_2		 (2L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_ICBUF_ADJ_3		 (3L<<8)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ		 (0x3L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_0		 (0L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_1		 (1L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_2		 (2L<<10)
+#define BNX2_MISC_OSCFUNDS_CTRL_IAMP_ADJ_3		 (3L<<10)
+
+
+/*
+ *  nvm_reg definition
+ *  offset: 0x6400
+ */
+#define BNX2_NVM_COMMAND				0x00006400
+#define BNX2_NVM_COMMAND_RST				 (1L<<0)
+#define BNX2_NVM_COMMAND_DONE				 (1L<<3)
+#define BNX2_NVM_COMMAND_DOIT				 (1L<<4)
+#define BNX2_NVM_COMMAND_WR				 (1L<<5)
+#define BNX2_NVM_COMMAND_ERASE				 (1L<<6)
+#define BNX2_NVM_COMMAND_FIRST				 (1L<<7)
+#define BNX2_NVM_COMMAND_LAST				 (1L<<8)
+#define BNX2_NVM_COMMAND_WREN				 (1L<<16)
+#define BNX2_NVM_COMMAND_WRDI				 (1L<<17)
+#define BNX2_NVM_COMMAND_EWSR				 (1L<<18)
+#define BNX2_NVM_COMMAND_WRSR				 (1L<<19)
+#define BNX2_NVM_COMMAND_RD_ID				 (1L<<20)
+#define BNX2_NVM_COMMAND_RD_STATUS			 (1L<<21)
+#define BNX2_NVM_COMMAND_MODE_256			 (1L<<22)
+
+#define BNX2_NVM_STATUS					0x00006404
+#define BNX2_NVM_STATUS_PI_FSM_STATE			 (0xfL<<0)
+#define BNX2_NVM_STATUS_EE_FSM_STATE			 (0xfL<<4)
+#define BNX2_NVM_STATUS_EQ_FSM_STATE			 (0xfL<<8)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_XI		 (0x1fL<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_IDLE_XI	 (0L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD0_XI	 (1L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD1_XI	 (2L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD_FINISH0_XI	 (3L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CMD_FINISH1_XI	 (4L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_ADDR0_XI	 (5L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA0_XI	 (6L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA1_XI	 (7L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WRITE_DATA2_XI	 (8L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA0_XI	 (9L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA1_XI	 (10L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_DATA2_XI	 (11L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID0_XI	 (12L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID1_XI	 (13L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID2_XI	 (14L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID3_XI	 (15L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_READ_STATUS_RDID4_XI	 (16L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_CHECK_BUSY0_XI	 (17L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_ST_WREN_XI	 (18L<<0)
+#define BNX2_NVM_STATUS_SPI_FSM_STATE_SPI_WAIT_XI	 (19L<<0)
+
+#define BNX2_NVM_WRITE					0x00006408
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE			 (0xffffffffL<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_BIT_BANG		 (0L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_EECLK		 (1L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_EEDATA		 (2L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SCLK		 (4L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_CS_B		 (8L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SO		 (16L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SI		 (32L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SI_XI		 (1L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SO_XI		 (2L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_CS_B_XI		 (4L<<0)
+#define BNX2_NVM_WRITE_NVM_WRITE_VALUE_SCLK_XI		 (8L<<0)
+
+#define BNX2_NVM_ADDR					0x0000640c
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE			 (0xffffffL<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_BIT_BANG		 (0L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_EECLK		 (1L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_EEDATA		 (2L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SCLK		 (4L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_CS_B		 (8L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SO			 (16L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SI			 (32L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SI_XI		 (1L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SO_XI		 (2L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_CS_B_XI		 (4L<<0)
+#define BNX2_NVM_ADDR_NVM_ADDR_VALUE_SCLK_XI		 (8L<<0)
+
+#define BNX2_NVM_READ					0x00006410
+#define BNX2_NVM_READ_NVM_READ_VALUE			 (0xffffffffL<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_BIT_BANG		 (0L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_EECLK		 (1L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_EEDATA		 (2L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SCLK		 (4L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_CS_B		 (8L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SO			 (16L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SI			 (32L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SI_XI		 (1L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SO_XI		 (2L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_CS_B_XI		 (4L<<0)
+#define BNX2_NVM_READ_NVM_READ_VALUE_SCLK_XI		 (8L<<0)
+
+#define BNX2_NVM_CFG1					0x00006414
+#define BNX2_NVM_CFG1_FLASH_MODE			 (1L<<0)
+#define BNX2_NVM_CFG1_BUFFER_MODE			 (1L<<1)
+#define BNX2_NVM_CFG1_PASS_MODE				 (1L<<2)
+#define BNX2_NVM_CFG1_BITBANG_MODE			 (1L<<3)
+#define BNX2_NVM_CFG1_STATUS_BIT			 (0x7L<<4)
+#define BNX2_NVM_CFG1_STATUS_BIT_FLASH_RDY		 (0L<<4)
+#define BNX2_NVM_CFG1_STATUS_BIT_BUFFER_RDY		 (7L<<4)
+#define BNX2_NVM_CFG1_SPI_CLK_DIV			 (0xfL<<7)
+#define BNX2_NVM_CFG1_SEE_CLK_DIV			 (0x7ffL<<11)
+#define BNX2_NVM_CFG1_STRAP_CONTROL_0			 (1L<<23)
+#define BNX2_NVM_CFG1_PROTECT_MODE			 (1L<<24)
+#define BNX2_NVM_CFG1_FLASH_SIZE			 (1L<<25)
+#define BNX2_NVM_CFG1_FW_USTRAP_1			 (1L<<26)
+#define BNX2_NVM_CFG1_FW_USTRAP_0			 (1L<<27)
+#define BNX2_NVM_CFG1_FW_USTRAP_2			 (1L<<28)
+#define BNX2_NVM_CFG1_FW_USTRAP_3			 (1L<<29)
+#define BNX2_NVM_CFG1_FW_FLASH_TYPE_EN			 (1L<<30)
+#define BNX2_NVM_CFG1_COMPAT_BYPASSS			 (1L<<31)
+
+#define BNX2_NVM_CFG2					0x00006418
+#define BNX2_NVM_CFG2_ERASE_CMD				 (0xffL<<0)
+#define BNX2_NVM_CFG2_DUMMY				 (0xffL<<8)
+#define BNX2_NVM_CFG2_STATUS_CMD			 (0xffL<<16)
+#define BNX2_NVM_CFG2_READ_ID				 (0xffL<<24)
+
+#define BNX2_NVM_CFG3					0x0000641c
+#define BNX2_NVM_CFG3_BUFFER_RD_CMD			 (0xffL<<0)
+#define BNX2_NVM_CFG3_WRITE_CMD				 (0xffL<<8)
+#define BNX2_NVM_CFG3_BUFFER_WRITE_CMD			 (0xffL<<16)
+#define BNX2_NVM_CFG3_READ_CMD				 (0xffL<<24)
+
+#define BNX2_NVM_SW_ARB					0x00006420
+#define BNX2_NVM_SW_ARB_ARB_REQ_SET0			 (1L<<0)
+#define BNX2_NVM_SW_ARB_ARB_REQ_SET1			 (1L<<1)
+#define BNX2_NVM_SW_ARB_ARB_REQ_SET2			 (1L<<2)
+#define BNX2_NVM_SW_ARB_ARB_REQ_SET3			 (1L<<3)
+#define BNX2_NVM_SW_ARB_ARB_REQ_CLR0			 (1L<<4)
+#define BNX2_NVM_SW_ARB_ARB_REQ_CLR1			 (1L<<5)
+#define BNX2_NVM_SW_ARB_ARB_REQ_CLR2			 (1L<<6)
+#define BNX2_NVM_SW_ARB_ARB_REQ_CLR3			 (1L<<7)
+#define BNX2_NVM_SW_ARB_ARB_ARB0			 (1L<<8)
+#define BNX2_NVM_SW_ARB_ARB_ARB1			 (1L<<9)
+#define BNX2_NVM_SW_ARB_ARB_ARB2			 (1L<<10)
+#define BNX2_NVM_SW_ARB_ARB_ARB3			 (1L<<11)
+#define BNX2_NVM_SW_ARB_REQ0				 (1L<<12)
+#define BNX2_NVM_SW_ARB_REQ1				 (1L<<13)
+#define BNX2_NVM_SW_ARB_REQ2				 (1L<<14)
+#define BNX2_NVM_SW_ARB_REQ3				 (1L<<15)
+
+#define BNX2_NVM_ACCESS_ENABLE				0x00006424
+#define BNX2_NVM_ACCESS_ENABLE_EN			 (1L<<0)
+#define BNX2_NVM_ACCESS_ENABLE_WR_EN			 (1L<<1)
+
+#define BNX2_NVM_WRITE1					0x00006428
+#define BNX2_NVM_WRITE1_WREN_CMD			 (0xffL<<0)
+#define BNX2_NVM_WRITE1_WRDI_CMD			 (0xffL<<8)
+#define BNX2_NVM_WRITE1_SR_DATA				 (0xffL<<16)
+
+#define BNX2_NVM_CFG4					0x0000642c
+#define BNX2_NVM_CFG4_FLASH_SIZE			 (0x7L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_1MBIT			 (0L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_2MBIT			 (1L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_4MBIT			 (2L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_8MBIT			 (3L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_16MBIT			 (4L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_32MBIT			 (5L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_64MBIT			 (6L<<0)
+#define BNX2_NVM_CFG4_FLASH_SIZE_128MBIT		 (7L<<0)
+#define BNX2_NVM_CFG4_FLASH_VENDOR			 (1L<<3)
+#define BNX2_NVM_CFG4_FLASH_VENDOR_ST			 (0L<<3)
+#define BNX2_NVM_CFG4_FLASH_VENDOR_ATMEL		 (1L<<3)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC		 (0x3L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT8	 (0L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT9	 (1L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT10	 (2L<<4)
+#define BNX2_NVM_CFG4_MODE_256_EMPTY_BIT_LOC_BIT11	 (3L<<4)
+#define BNX2_NVM_CFG4_STATUS_BIT_POLARITY		 (1L<<6)
+#define BNX2_NVM_CFG4_RESERVED				 (0x1ffffffL<<7)
+
+#define BNX2_NVM_RECONFIG				0x00006430
+#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE		 (0xfL<<0)
+#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE_ST		 (0L<<0)
+#define BNX2_NVM_RECONFIG_ORIG_STRAP_VALUE_ATMEL	 (1L<<0)
+#define BNX2_NVM_RECONFIG_RECONFIG_STRAP_VALUE		 (0xfL<<4)
+#define BNX2_NVM_RECONFIG_RESERVED			 (0x7fffffL<<8)
+#define BNX2_NVM_RECONFIG_RECONFIG_DONE			 (1L<<31)
+
+
+
+/*
+ *  dma_reg definition
+ *  offset: 0xc00
+ */
+#define BNX2_DMA_COMMAND				0x00000c00
+#define BNX2_DMA_COMMAND_ENABLE				 (1L<<0)
+
+#define BNX2_DMA_STATUS					0x00000c04
+#define BNX2_DMA_STATUS_PAR_ERROR_STATE			 (1L<<0)
+#define BNX2_DMA_STATUS_READ_TRANSFERS_STAT		 (1L<<16)
+#define BNX2_DMA_STATUS_READ_DELAY_PCI_CLKS_STAT	 (1L<<17)
+#define BNX2_DMA_STATUS_BIG_READ_TRANSFERS_STAT		 (1L<<18)
+#define BNX2_DMA_STATUS_BIG_READ_DELAY_PCI_CLKS_STAT	 (1L<<19)
+#define BNX2_DMA_STATUS_BIG_READ_RETRY_AFTER_DATA_STAT	 (1L<<20)
+#define BNX2_DMA_STATUS_WRITE_TRANSFERS_STAT		 (1L<<21)
+#define BNX2_DMA_STATUS_WRITE_DELAY_PCI_CLKS_STAT	 (1L<<22)
+#define BNX2_DMA_STATUS_BIG_WRITE_TRANSFERS_STAT	 (1L<<23)
+#define BNX2_DMA_STATUS_BIG_WRITE_DELAY_PCI_CLKS_STAT	 (1L<<24)
+#define BNX2_DMA_STATUS_BIG_WRITE_RETRY_AFTER_DATA_STAT	 (1L<<25)
+#define BNX2_DMA_STATUS_GLOBAL_ERR_XI			 (1L<<0)
+#define BNX2_DMA_STATUS_BME_XI				 (1L<<4)
+
+#define BNX2_DMA_CONFIG					0x00000c08
+#define BNX2_DMA_CONFIG_DATA_BYTE_SWAP			 (1L<<0)
+#define BNX2_DMA_CONFIG_DATA_WORD_SWAP			 (1L<<1)
+#define BNX2_DMA_CONFIG_CNTL_BYTE_SWAP			 (1L<<4)
+#define BNX2_DMA_CONFIG_CNTL_WORD_SWAP			 (1L<<5)
+#define BNX2_DMA_CONFIG_ONE_DMA				 (1L<<6)
+#define BNX2_DMA_CONFIG_CNTL_TWO_DMA			 (1L<<7)
+#define BNX2_DMA_CONFIG_CNTL_FPGA_MODE			 (1L<<8)
+#define BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA		 (1L<<10)
+#define BNX2_DMA_CONFIG_CNTL_PCI_COMP_DLY		 (1L<<11)
+#define BNX2_DMA_CONFIG_NO_RCHANS_IN_USE		 (0xfL<<12)
+#define BNX2_DMA_CONFIG_NO_WCHANS_IN_USE		 (0xfL<<16)
+#define BNX2_DMA_CONFIG_PCI_CLK_CMP_BITS		 (0x7L<<20)
+#define BNX2_DMA_CONFIG_PCI_FAST_CLK_CMP		 (1L<<23)
+#define BNX2_DMA_CONFIG_BIG_SIZE			 (0xfL<<24)
+#define BNX2_DMA_CONFIG_BIG_SIZE_NONE			 (0x0L<<24)
+#define BNX2_DMA_CONFIG_BIG_SIZE_64			 (0x1L<<24)
+#define BNX2_DMA_CONFIG_BIG_SIZE_128			 (0x2L<<24)
+#define BNX2_DMA_CONFIG_BIG_SIZE_256			 (0x4L<<24)
+#define BNX2_DMA_CONFIG_BIG_SIZE_512			 (0x8L<<24)
+#define BNX2_DMA_CONFIG_DAT_WBSWAP_MODE_XI		 (0x3L<<0)
+#define BNX2_DMA_CONFIG_CTL_WBSWAP_MODE_XI		 (0x3L<<4)
+#define BNX2_DMA_CONFIG_MAX_PL_XI			 (0x7L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_128B_XI			 (0L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_256B_XI			 (1L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_512B_XI			 (2L<<12)
+#define BNX2_DMA_CONFIG_MAX_PL_EN_XI			 (1L<<15)
+#define BNX2_DMA_CONFIG_MAX_RRS_XI			 (0x7L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_128B_XI			 (0L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_256B_XI			 (1L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_512B_XI			 (2L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_1024B_XI		 (3L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_2048B_XI		 (4L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_4096B_XI		 (5L<<16)
+#define BNX2_DMA_CONFIG_MAX_RRS_EN_XI			 (1L<<19)
+#define BNX2_DMA_CONFIG_NO_64SWAP_EN_XI			 (1L<<31)
+
+#define BNX2_DMA_BLACKOUT				0x00000c0c
+#define BNX2_DMA_BLACKOUT_RD_RETRY_BLACKOUT		 (0xffL<<0)
+#define BNX2_DMA_BLACKOUT_2ND_RD_RETRY_BLACKOUT		 (0xffL<<8)
+#define BNX2_DMA_BLACKOUT_WR_RETRY_BLACKOUT		 (0xffL<<16)
+
+#define BNX2_DMA_READ_MASTER_SETTING_0			0x00000c10
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_NO_SNOOP	 (1L<<0)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_RELAX_ORDER	 (1L<<1)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_PRIORITY	 (1L<<2)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_TRAFFIC_CLASS	 (0x7L<<4)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TBDC_PARAM_EN	 (1L<<7)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_NO_SNOOP	 (1L<<8)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_RELAX_ORDER	 (1L<<9)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_PRIORITY	 (1L<<10)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_TRAFFIC_CLASS	 (0x7L<<12)
+#define BNX2_DMA_READ_MASTER_SETTING_0_RBDC_PARAM_EN	 (1L<<15)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_NO_SNOOP	 (1L<<16)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_RELAX_ORDER	 (1L<<17)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_PRIORITY	 (1L<<18)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_TRAFFIC_CLASS	 (0x7L<<20)
+#define BNX2_DMA_READ_MASTER_SETTING_0_TDMA_PARAM_EN	 (1L<<23)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_NO_SNOOP	 (1L<<24)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_RELAX_ORDER	 (1L<<25)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_PRIORITY	 (1L<<26)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_TRAFFIC_CLASS	 (0x7L<<28)
+#define BNX2_DMA_READ_MASTER_SETTING_0_CTX_PARAM_EN	 (1L<<31)
+
+#define BNX2_DMA_READ_MASTER_SETTING_1			0x00000c14
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_NO_SNOOP	 (1L<<0)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_RELAX_ORDER	 (1L<<1)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_PRIORITY	 (1L<<2)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_TRAFFIC_CLASS	 (0x7L<<4)
+#define BNX2_DMA_READ_MASTER_SETTING_1_COM_PARAM_EN	 (1L<<7)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_NO_SNOOP	 (1L<<8)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_RELAX_ORDER	 (1L<<9)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_PRIORITY	 (1L<<10)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_TRAFFIC_CLASS	 (0x7L<<12)
+#define BNX2_DMA_READ_MASTER_SETTING_1_CP_PARAM_EN	 (1L<<15)
+
+#define BNX2_DMA_WRITE_MASTER_SETTING_0			0x00000c18
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_NO_SNOOP	 (1L<<0)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_RELAX_ORDER	 (1L<<1)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_PRIORITY	 (1L<<2)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_CS_VLD	 (1L<<3)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_TRAFFIC_CLASS	 (0x7L<<4)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_HC_PARAM_EN	 (1L<<7)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_NO_SNOOP	 (1L<<8)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_RELAX_ORDER	 (1L<<9)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_PRIORITY	 (1L<<10)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_CS_VLD	 (1L<<11)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_TRAFFIC_CLASS	 (0x7L<<12)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_RDMA_PARAM_EN	 (1L<<15)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_NO_SNOOP	 (1L<<24)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_RELAX_ORDER	 (1L<<25)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_PRIORITY	 (1L<<26)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_CS_VLD	 (1L<<27)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_TRAFFIC_CLASS	 (0x7L<<28)
+#define BNX2_DMA_WRITE_MASTER_SETTING_0_CTX_PARAM_EN	 (1L<<31)
+
+#define BNX2_DMA_WRITE_MASTER_SETTING_1			0x00000c1c
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_NO_SNOOP	 (1L<<0)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_RELAX_ORDER	 (1L<<1)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_PRIORITY	 (1L<<2)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_CS_VLD	 (1L<<3)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_TRAFFIC_CLASS	 (0x7L<<4)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_COM_PARAM_EN	 (1L<<7)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_NO_SNOOP	 (1L<<8)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_RELAX_ORDER	 (1L<<9)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_PRIORITY	 (1L<<10)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_CS_VLD	 (1L<<11)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_TRAFFIC_CLASS	 (0x7L<<12)
+#define BNX2_DMA_WRITE_MASTER_SETTING_1_CP_PARAM_EN	 (1L<<15)
+
+#define BNX2_DMA_ARBITER				0x00000c20
+#define BNX2_DMA_ARBITER_NUM_READS			 (0x7L<<0)
+#define BNX2_DMA_ARBITER_WR_ARB_MODE			 (1L<<4)
+#define BNX2_DMA_ARBITER_WR_ARB_MODE_STRICT		 (0L<<4)
+#define BNX2_DMA_ARBITER_WR_ARB_MODE_RND_RBN		 (1L<<4)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE			 (0x3L<<5)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE_STRICT		 (0L<<5)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE_RND_RBN		 (1L<<5)
+#define BNX2_DMA_ARBITER_RD_ARB_MODE_WGT_RND_RBN	 (2L<<5)
+#define BNX2_DMA_ARBITER_ALT_MODE_EN			 (1L<<8)
+#define BNX2_DMA_ARBITER_RR_MODE			 (1L<<9)
+#define BNX2_DMA_ARBITER_TIMER_MODE			 (1L<<10)
+#define BNX2_DMA_ARBITER_OUSTD_READ_REQ			 (0xfL<<12)
+
+#define BNX2_DMA_ARB_TIMERS				0x00000c24
+#define BNX2_DMA_ARB_TIMERS_RD_DRR_WAIT_TIME		 (0xffL<<0)
+#define BNX2_DMA_ARB_TIMERS_TM_MIN_TIMEOUT		 (0xffL<<12)
+#define BNX2_DMA_ARB_TIMERS_TM_MAX_TIMEOUT		 (0xfffL<<20)
+
+#define BNX2_DMA_DEBUG_VECT_PEEK			0x00000c2c
+#define BNX2_DMA_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_DMA_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_DMA_DEBUG_VECT_PEEK_1_SEL			 (0xfL<<12)
+#define BNX2_DMA_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_DMA_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_DMA_DEBUG_VECT_PEEK_2_SEL			 (0xfL<<28)
+
+#define BNX2_DMA_TAG_RAM_00				0x00000c30
+#define BNX2_DMA_TAG_RAM_00_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_00_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_00_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_00_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_00_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_00_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_00_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_00_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_00_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_01				0x00000c34
+#define BNX2_DMA_TAG_RAM_01_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_01_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_01_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_01_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_01_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_01_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_01_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_01_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_01_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_02				0x00000c38
+#define BNX2_DMA_TAG_RAM_02_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_02_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_02_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_02_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_02_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_02_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_02_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_02_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_02_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_03				0x00000c3c
+#define BNX2_DMA_TAG_RAM_03_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_03_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_03_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_03_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_03_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_03_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_03_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_03_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_03_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_04				0x00000c40
+#define BNX2_DMA_TAG_RAM_04_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_04_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_04_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_04_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_04_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_04_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_04_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_04_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_04_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_05				0x00000c44
+#define BNX2_DMA_TAG_RAM_05_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_05_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_05_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_05_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_05_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_05_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_05_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_05_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_05_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_06				0x00000c48
+#define BNX2_DMA_TAG_RAM_06_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_06_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_06_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_06_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_06_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_06_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_06_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_06_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_06_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_07				0x00000c4c
+#define BNX2_DMA_TAG_RAM_07_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_07_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_07_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_07_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_07_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_07_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_07_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_07_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_07_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_08				0x00000c50
+#define BNX2_DMA_TAG_RAM_08_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_08_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_08_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_08_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_08_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_08_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_08_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_08_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_08_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_09				0x00000c54
+#define BNX2_DMA_TAG_RAM_09_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_09_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_09_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_09_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_09_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_09_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_09_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_09_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_09_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_10				0x00000c58
+#define BNX2_DMA_TAG_RAM_10_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_10_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_10_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_10_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_10_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_10_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_10_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_10_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_10_VALID			 (1L<<10)
+
+#define BNX2_DMA_TAG_RAM_11				0x00000c5c
+#define BNX2_DMA_TAG_RAM_11_CHANNEL			 (0xfL<<0)
+#define BNX2_DMA_TAG_RAM_11_MASTER			 (0x7L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_CTX			 (0L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_RBDC			 (1L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_TBDC			 (2L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_COM			 (3L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_CP			 (4L<<4)
+#define BNX2_DMA_TAG_RAM_11_MASTER_TDMA			 (5L<<4)
+#define BNX2_DMA_TAG_RAM_11_SWAP			 (0x3L<<7)
+#define BNX2_DMA_TAG_RAM_11_SWAP_CONFIG			 (0L<<7)
+#define BNX2_DMA_TAG_RAM_11_SWAP_DATA			 (1L<<7)
+#define BNX2_DMA_TAG_RAM_11_SWAP_CONTROL		 (2L<<7)
+#define BNX2_DMA_TAG_RAM_11_FUNCTION			 (1L<<9)
+#define BNX2_DMA_TAG_RAM_11_VALID			 (1L<<10)
+
+#define BNX2_DMA_RCHAN_STAT_22				0x00000c60
+#define BNX2_DMA_RCHAN_STAT_30				0x00000c64
+#define BNX2_DMA_RCHAN_STAT_31				0x00000c68
+#define BNX2_DMA_RCHAN_STAT_32				0x00000c6c
+#define BNX2_DMA_RCHAN_STAT_40				0x00000c70
+#define BNX2_DMA_RCHAN_STAT_41				0x00000c74
+#define BNX2_DMA_RCHAN_STAT_42				0x00000c78
+#define BNX2_DMA_RCHAN_STAT_50				0x00000c7c
+#define BNX2_DMA_RCHAN_STAT_51				0x00000c80
+#define BNX2_DMA_RCHAN_STAT_52				0x00000c84
+#define BNX2_DMA_RCHAN_STAT_60				0x00000c88
+#define BNX2_DMA_RCHAN_STAT_61				0x00000c8c
+#define BNX2_DMA_RCHAN_STAT_62				0x00000c90
+#define BNX2_DMA_RCHAN_STAT_70				0x00000c94
+#define BNX2_DMA_RCHAN_STAT_71				0x00000c98
+#define BNX2_DMA_RCHAN_STAT_72				0x00000c9c
+#define BNX2_DMA_WCHAN_STAT_00				0x00000ca0
+#define BNX2_DMA_WCHAN_STAT_00_WCHAN_STA_HOST_ADDR_LOW	 (0xffffffffL<<0)
+
+#define BNX2_DMA_WCHAN_STAT_01				0x00000ca4
+#define BNX2_DMA_WCHAN_STAT_01_WCHAN_STA_HOST_ADDR_HIGH	 (0xffffffffL<<0)
+
+#define BNX2_DMA_WCHAN_STAT_02				0x00000ca8
+#define BNX2_DMA_WCHAN_STAT_02_LENGTH			 (0xffffL<<0)
+#define BNX2_DMA_WCHAN_STAT_02_WORD_SWAP		 (1L<<16)
+#define BNX2_DMA_WCHAN_STAT_02_BYTE_SWAP		 (1L<<17)
+#define BNX2_DMA_WCHAN_STAT_02_PRIORITY_LVL		 (1L<<18)
+
+#define BNX2_DMA_WCHAN_STAT_10				0x00000cac
+#define BNX2_DMA_WCHAN_STAT_11				0x00000cb0
+#define BNX2_DMA_WCHAN_STAT_12				0x00000cb4
+#define BNX2_DMA_WCHAN_STAT_20				0x00000cb8
+#define BNX2_DMA_WCHAN_STAT_21				0x00000cbc
+#define BNX2_DMA_WCHAN_STAT_22				0x00000cc0
+#define BNX2_DMA_WCHAN_STAT_30				0x00000cc4
+#define BNX2_DMA_WCHAN_STAT_31				0x00000cc8
+#define BNX2_DMA_WCHAN_STAT_32				0x00000ccc
+#define BNX2_DMA_WCHAN_STAT_40				0x00000cd0
+#define BNX2_DMA_WCHAN_STAT_41				0x00000cd4
+#define BNX2_DMA_WCHAN_STAT_42				0x00000cd8
+#define BNX2_DMA_WCHAN_STAT_50				0x00000cdc
+#define BNX2_DMA_WCHAN_STAT_51				0x00000ce0
+#define BNX2_DMA_WCHAN_STAT_52				0x00000ce4
+#define BNX2_DMA_WCHAN_STAT_60				0x00000ce8
+#define BNX2_DMA_WCHAN_STAT_61				0x00000cec
+#define BNX2_DMA_WCHAN_STAT_62				0x00000cf0
+#define BNX2_DMA_WCHAN_STAT_70				0x00000cf4
+#define BNX2_DMA_WCHAN_STAT_71				0x00000cf8
+#define BNX2_DMA_WCHAN_STAT_72				0x00000cfc
+#define BNX2_DMA_ARB_STAT_00				0x00000d00
+#define BNX2_DMA_ARB_STAT_00_MASTER			 (0xffffL<<0)
+#define BNX2_DMA_ARB_STAT_00_MASTER_ENC			 (0xffL<<16)
+#define BNX2_DMA_ARB_STAT_00_CUR_BINMSTR		 (0xffL<<24)
+
+#define BNX2_DMA_ARB_STAT_01				0x00000d04
+#define BNX2_DMA_ARB_STAT_01_LPR_RPTR			 (0xfL<<0)
+#define BNX2_DMA_ARB_STAT_01_LPR_WPTR			 (0xfL<<4)
+#define BNX2_DMA_ARB_STAT_01_LPB_RPTR			 (0xfL<<8)
+#define BNX2_DMA_ARB_STAT_01_LPB_WPTR			 (0xfL<<12)
+#define BNX2_DMA_ARB_STAT_01_HPR_RPTR			 (0xfL<<16)
+#define BNX2_DMA_ARB_STAT_01_HPR_WPTR			 (0xfL<<20)
+#define BNX2_DMA_ARB_STAT_01_HPB_RPTR			 (0xfL<<24)
+#define BNX2_DMA_ARB_STAT_01_HPB_WPTR			 (0xfL<<28)
+
+#define BNX2_DMA_FUSE_CTRL0_CMD				0x00000f00
+#define BNX2_DMA_FUSE_CTRL0_CMD_PWRUP_DONE		 (1L<<0)
+#define BNX2_DMA_FUSE_CTRL0_CMD_SHIFT_DONE		 (1L<<1)
+#define BNX2_DMA_FUSE_CTRL0_CMD_SHIFT			 (1L<<2)
+#define BNX2_DMA_FUSE_CTRL0_CMD_LOAD			 (1L<<3)
+#define BNX2_DMA_FUSE_CTRL0_CMD_SEL			 (0xfL<<8)
+
+#define BNX2_DMA_FUSE_CTRL0_DATA			0x00000f04
+#define BNX2_DMA_FUSE_CTRL1_CMD				0x00000f08
+#define BNX2_DMA_FUSE_CTRL1_CMD_PWRUP_DONE		 (1L<<0)
+#define BNX2_DMA_FUSE_CTRL1_CMD_SHIFT_DONE		 (1L<<1)
+#define BNX2_DMA_FUSE_CTRL1_CMD_SHIFT			 (1L<<2)
+#define BNX2_DMA_FUSE_CTRL1_CMD_LOAD			 (1L<<3)
+#define BNX2_DMA_FUSE_CTRL1_CMD_SEL			 (0xfL<<8)
+
+#define BNX2_DMA_FUSE_CTRL1_DATA			0x00000f0c
+#define BNX2_DMA_FUSE_CTRL2_CMD				0x00000f10
+#define BNX2_DMA_FUSE_CTRL2_CMD_PWRUP_DONE		 (1L<<0)
+#define BNX2_DMA_FUSE_CTRL2_CMD_SHIFT_DONE		 (1L<<1)
+#define BNX2_DMA_FUSE_CTRL2_CMD_SHIFT			 (1L<<2)
+#define BNX2_DMA_FUSE_CTRL2_CMD_LOAD			 (1L<<3)
+#define BNX2_DMA_FUSE_CTRL2_CMD_SEL			 (0xfL<<8)
+
+#define BNX2_DMA_FUSE_CTRL2_DATA			0x00000f14
+
+
+/*
+ *  context_reg definition
+ *  offset: 0x1000
+ */
+#define BNX2_CTX_COMMAND				0x00001000
+#define BNX2_CTX_COMMAND_ENABLED			 (1L<<0)
+#define BNX2_CTX_COMMAND_DISABLE_USAGE_CNT		 (1L<<1)
+#define BNX2_CTX_COMMAND_DISABLE_PLRU			 (1L<<2)
+#define BNX2_CTX_COMMAND_DISABLE_COMBINE_READ		 (1L<<3)
+#define BNX2_CTX_COMMAND_FLUSH_AHEAD			 (0x1fL<<8)
+#define BNX2_CTX_COMMAND_MEM_INIT			 (1L<<13)
+#define BNX2_CTX_COMMAND_PAGE_SIZE			 (0xfL<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_256			 (0L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_512			 (1L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_1K			 (2L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_2K			 (3L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_4K			 (4L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_8K			 (5L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_16K			 (6L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_32K			 (7L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_64K			 (8L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_128K			 (9L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_256K			 (10L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_512K			 (11L<<16)
+#define BNX2_CTX_COMMAND_PAGE_SIZE_1M			 (12L<<16)
+
+#define BNX2_CTX_STATUS					0x00001004
+#define BNX2_CTX_STATUS_LOCK_WAIT			 (1L<<0)
+#define BNX2_CTX_STATUS_READ_STAT			 (1L<<16)
+#define BNX2_CTX_STATUS_WRITE_STAT			 (1L<<17)
+#define BNX2_CTX_STATUS_ACC_STALL_STAT			 (1L<<18)
+#define BNX2_CTX_STATUS_LOCK_STALL_STAT			 (1L<<19)
+#define BNX2_CTX_STATUS_EXT_READ_STAT			 (1L<<20)
+#define BNX2_CTX_STATUS_EXT_WRITE_STAT			 (1L<<21)
+#define BNX2_CTX_STATUS_MISS_STAT			 (1L<<22)
+#define BNX2_CTX_STATUS_HIT_STAT			 (1L<<23)
+#define BNX2_CTX_STATUS_DEAD_LOCK			 (1L<<24)
+#define BNX2_CTX_STATUS_USAGE_CNT_ERR			 (1L<<25)
+#define BNX2_CTX_STATUS_INVALID_PAGE			 (1L<<26)
+
+#define BNX2_CTX_VIRT_ADDR				0x00001008
+#define BNX2_CTX_VIRT_ADDR_VIRT_ADDR			 (0x7fffL<<6)
+
+#define BNX2_CTX_PAGE_TBL				0x0000100c
+#define BNX2_CTX_PAGE_TBL_PAGE_TBL			 (0x3fffL<<6)
+
+#define BNX2_CTX_DATA_ADR				0x00001010
+#define BNX2_CTX_DATA_ADR_DATA_ADR			 (0x7ffffL<<2)
+
+#define BNX2_CTX_DATA					0x00001014
+#define BNX2_CTX_LOCK					0x00001018
+#define BNX2_CTX_LOCK_TYPE				 (0x7L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_VOID		 (0x0L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_PROTOCOL		 (0x1L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_TX			 (0x2L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_TIMER		 (0x4L<<0)
+#define BNX2_CTX_LOCK_TYPE_LOCK_TYPE_COMPLETE		 (0x7L<<0)
+#define BNX2_CTX_LOCK_TYPE_VOID_XI			 (0L<<0)
+#define BNX2_CTX_LOCK_TYPE_PROTOCOL_XI			 (1L<<0)
+#define BNX2_CTX_LOCK_TYPE_TX_XI			 (2L<<0)
+#define BNX2_CTX_LOCK_TYPE_TIMER_XI			 (4L<<0)
+#define BNX2_CTX_LOCK_TYPE_COMPLETE_XI			 (7L<<0)
+#define BNX2_CTX_LOCK_CID_VALUE				 (0x3fffL<<7)
+#define BNX2_CTX_LOCK_GRANTED				 (1L<<26)
+#define BNX2_CTX_LOCK_MODE				 (0x7L<<27)
+#define BNX2_CTX_LOCK_MODE_UNLOCK			 (0x0L<<27)
+#define BNX2_CTX_LOCK_MODE_IMMEDIATE			 (0x1L<<27)
+#define BNX2_CTX_LOCK_MODE_SURE				 (0x2L<<27)
+#define BNX2_CTX_LOCK_STATUS				 (1L<<30)
+#define BNX2_CTX_LOCK_REQ				 (1L<<31)
+
+#define BNX2_CTX_CTX_CTRL				0x0000101c
+#define BNX2_CTX_CTX_CTRL_CTX_ADDR			 (0x7ffffL<<2)
+#define BNX2_CTX_CTX_CTRL_MOD_USAGE_CNT			 (0x3L<<21)
+#define BNX2_CTX_CTX_CTRL_NO_RAM_ACC			 (1L<<23)
+#define BNX2_CTX_CTX_CTRL_PREFETCH_SIZE			 (0x3L<<24)
+#define BNX2_CTX_CTX_CTRL_ATTR				 (1L<<26)
+#define BNX2_CTX_CTX_CTRL_WRITE_REQ			 (1L<<30)
+#define BNX2_CTX_CTX_CTRL_READ_REQ			 (1L<<31)
+
+#define BNX2_CTX_CTX_DATA				0x00001020
+#define BNX2_CTX_ACCESS_STATUS				0x00001040
+#define BNX2_CTX_ACCESS_STATUS_MASTERENCODED		 (0xfL<<0)
+#define BNX2_CTX_ACCESS_STATUS_ACCESSMEMORYSM		 (0x3L<<10)
+#define BNX2_CTX_ACCESS_STATUS_PAGETABLEINITSM		 (0x3L<<12)
+#define BNX2_CTX_ACCESS_STATUS_ACCESSMEMORYINITSM	 (0x3L<<14)
+#define BNX2_CTX_ACCESS_STATUS_QUALIFIED_REQUEST	 (0x7ffL<<17)
+#define BNX2_CTX_ACCESS_STATUS_CAMMASTERENCODED_XI	 (0x1fL<<0)
+#define BNX2_CTX_ACCESS_STATUS_CACHEMASTERENCODED_XI	 (0x1fL<<5)
+#define BNX2_CTX_ACCESS_STATUS_REQUEST_XI		 (0x3fffffL<<10)
+
+#define BNX2_CTX_DBG_LOCK_STATUS			0x00001044
+#define BNX2_CTX_DBG_LOCK_STATUS_SM			 (0x3ffL<<0)
+#define BNX2_CTX_DBG_LOCK_STATUS_MATCH			 (0x3ffL<<22)
+
+#define BNX2_CTX_CACHE_CTRL_STATUS			0x00001048
+#define BNX2_CTX_CACHE_CTRL_STATUS_RFIFO_OVERFLOW	 (1L<<0)
+#define BNX2_CTX_CACHE_CTRL_STATUS_INVALID_READ_COMP	 (1L<<1)
+#define BNX2_CTX_CACHE_CTRL_STATUS_FLUSH_START		 (1L<<6)
+#define BNX2_CTX_CACHE_CTRL_STATUS_FREE_ENTRY_CNT	 (0x3fL<<7)
+#define BNX2_CTX_CACHE_CTRL_STATUS_CACHE_ENTRY_NEEDED	 (0x3fL<<13)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN0_ACTIVE	 (1L<<19)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN1_ACTIVE	 (1L<<20)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN2_ACTIVE	 (1L<<21)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN3_ACTIVE	 (1L<<22)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN4_ACTIVE	 (1L<<23)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN5_ACTIVE	 (1L<<24)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN6_ACTIVE	 (1L<<25)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN7_ACTIVE	 (1L<<26)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN8_ACTIVE	 (1L<<27)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN9_ACTIVE	 (1L<<28)
+#define BNX2_CTX_CACHE_CTRL_STATUS_RD_CHAN10_ACTIVE	 (1L<<29)
+
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS			0x0000104c
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_DWC		 (0x7L<<0)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_WFIFOC		 (0x7L<<3)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_RTAGC		 (0x7L<<6)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_CS_RFIFOC		 (0x7L<<9)
+#define BNX2_CTX_CACHE_CTRL_SM_STATUS_INVALID_BLK_ADDR	 (0x7fffL<<16)
+
+#define BNX2_CTX_CACHE_STATUS				0x00001050
+#define BNX2_CTX_CACHE_STATUS_HELD_ENTRIES		 (0x3ffL<<0)
+#define BNX2_CTX_CACHE_STATUS_MAX_HELD_ENTRIES		 (0x3ffL<<16)
+
+#define BNX2_CTX_DMA_STATUS				0x00001054
+#define BNX2_CTX_DMA_STATUS_RD_CHAN0_STATUS		 (0x3L<<0)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN1_STATUS		 (0x3L<<2)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN2_STATUS		 (0x3L<<4)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN3_STATUS		 (0x3L<<6)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN4_STATUS		 (0x3L<<8)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN5_STATUS		 (0x3L<<10)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN6_STATUS		 (0x3L<<12)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN7_STATUS		 (0x3L<<14)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN8_STATUS		 (0x3L<<16)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN9_STATUS		 (0x3L<<18)
+#define BNX2_CTX_DMA_STATUS_RD_CHAN10_STATUS		 (0x3L<<20)
+
+#define BNX2_CTX_REP_STATUS				0x00001058
+#define BNX2_CTX_REP_STATUS_ERROR_ENTRY			 (0x3ffL<<0)
+#define BNX2_CTX_REP_STATUS_ERROR_CLIENT_ID		 (0x1fL<<10)
+#define BNX2_CTX_REP_STATUS_USAGE_CNT_MAX_ERR		 (1L<<16)
+#define BNX2_CTX_REP_STATUS_USAGE_CNT_MIN_ERR		 (1L<<17)
+#define BNX2_CTX_REP_STATUS_USAGE_CNT_MISS_ERR		 (1L<<18)
+
+#define BNX2_CTX_CKSUM_ERROR_STATUS			0x0000105c
+#define BNX2_CTX_CKSUM_ERROR_STATUS_CALCULATED		 (0xffffL<<0)
+#define BNX2_CTX_CKSUM_ERROR_STATUS_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_CTX_CHNL_LOCK_STATUS_0			0x00001080
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_CID			 (0x3fffL<<0)
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_TYPE		 (0x3L<<14)
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_MODE		 (1L<<16)
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_MODE_XI		 (1L<<14)
+#define BNX2_CTX_CHNL_LOCK_STATUS_0_TYPE_XI		 (0x7L<<15)
+
+#define BNX2_CTX_CHNL_LOCK_STATUS_1			0x00001084
+#define BNX2_CTX_CHNL_LOCK_STATUS_2			0x00001088
+#define BNX2_CTX_CHNL_LOCK_STATUS_3			0x0000108c
+#define BNX2_CTX_CHNL_LOCK_STATUS_4			0x00001090
+#define BNX2_CTX_CHNL_LOCK_STATUS_5			0x00001094
+#define BNX2_CTX_CHNL_LOCK_STATUS_6			0x00001098
+#define BNX2_CTX_CHNL_LOCK_STATUS_7			0x0000109c
+#define BNX2_CTX_CHNL_LOCK_STATUS_8			0x000010a0
+#define BNX2_CTX_CHNL_LOCK_STATUS_9			0x000010a4
+
+#define BNX2_CTX_CACHE_DATA				0x000010c4
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL			0x000010c8
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL_PAGE_TBL_ADDR	 (0x1ffL<<0)
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ		 (1L<<30)
+#define BNX2_CTX_HOST_PAGE_TBL_CTRL_READ_REQ		 (1L<<31)
+
+#define BNX2_CTX_HOST_PAGE_TBL_DATA0			0x000010cc
+#define BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID		 (1L<<0)
+#define BNX2_CTX_HOST_PAGE_TBL_DATA0_VALUE		 (0xffffffL<<8)
+
+#define BNX2_CTX_HOST_PAGE_TBL_DATA1			0x000010d0
+#define BNX2_CTX_CAM_CTRL				0x000010d4
+#define BNX2_CTX_CAM_CTRL_CAM_ADDR			 (0x3ffL<<0)
+#define BNX2_CTX_CAM_CTRL_RESET				 (1L<<27)
+#define BNX2_CTX_CAM_CTRL_INVALIDATE			 (1L<<28)
+#define BNX2_CTX_CAM_CTRL_SEARCH			 (1L<<29)
+#define BNX2_CTX_CAM_CTRL_WRITE_REQ			 (1L<<30)
+#define BNX2_CTX_CAM_CTRL_READ_REQ			 (1L<<31)
+
+
+/*
+ *  emac_reg definition
+ *  offset: 0x1400
+ */
+#define BNX2_EMAC_MODE					0x00001400
+#define BNX2_EMAC_MODE_RESET				 (1L<<0)
+#define BNX2_EMAC_MODE_HALF_DUPLEX			 (1L<<1)
+#define BNX2_EMAC_MODE_PORT				 (0x3L<<2)
+#define BNX2_EMAC_MODE_PORT_NONE			 (0L<<2)
+#define BNX2_EMAC_MODE_PORT_MII				 (1L<<2)
+#define BNX2_EMAC_MODE_PORT_GMII			 (2L<<2)
+#define BNX2_EMAC_MODE_PORT_MII_10M			 (3L<<2)
+#define BNX2_EMAC_MODE_MAC_LOOP				 (1L<<4)
+#define BNX2_EMAC_MODE_25G_MODE				 (1L<<5)
+#define BNX2_EMAC_MODE_TAGGED_MAC_CTL			 (1L<<7)
+#define BNX2_EMAC_MODE_TX_BURST				 (1L<<8)
+#define BNX2_EMAC_MODE_MAX_DEFER_DROP_ENA		 (1L<<9)
+#define BNX2_EMAC_MODE_EXT_LINK_POL			 (1L<<10)
+#define BNX2_EMAC_MODE_FORCE_LINK			 (1L<<11)
+#define BNX2_EMAC_MODE_SERDES_MODE			 (1L<<12)
+#define BNX2_EMAC_MODE_BOND_OVRD			 (1L<<13)
+#define BNX2_EMAC_MODE_MPKT				 (1L<<18)
+#define BNX2_EMAC_MODE_MPKT_RCVD			 (1L<<19)
+#define BNX2_EMAC_MODE_ACPI_RCVD			 (1L<<20)
+
+#define BNX2_EMAC_STATUS				0x00001404
+#define BNX2_EMAC_STATUS_LINK				 (1L<<11)
+#define BNX2_EMAC_STATUS_LINK_CHANGE			 (1L<<12)
+#define BNX2_EMAC_STATUS_SERDES_AUTONEG_COMPLETE	 (1L<<13)
+#define BNX2_EMAC_STATUS_SERDES_AUTONEG_CHANGE		 (1L<<14)
+#define BNX2_EMAC_STATUS_SERDES_NXT_PG_CHANGE		 (1L<<16)
+#define BNX2_EMAC_STATUS_SERDES_RX_CONFIG_IS_0		 (1L<<17)
+#define BNX2_EMAC_STATUS_SERDES_RX_CONFIG_IS_0_CHANGE	 (1L<<18)
+#define BNX2_EMAC_STATUS_MI_COMPLETE			 (1L<<22)
+#define BNX2_EMAC_STATUS_MI_INT				 (1L<<23)
+#define BNX2_EMAC_STATUS_AP_ERROR			 (1L<<24)
+#define BNX2_EMAC_STATUS_PARITY_ERROR_STATE		 (1L<<31)
+
+#define BNX2_EMAC_ATTENTION_ENA				0x00001408
+#define BNX2_EMAC_ATTENTION_ENA_LINK			 (1L<<11)
+#define BNX2_EMAC_ATTENTION_ENA_AUTONEG_CHANGE		 (1L<<14)
+#define BNX2_EMAC_ATTENTION_ENA_NXT_PG_CHANGE		 (1L<<16)
+#define BNX2_EMAC_ATTENTION_ENA_SERDES_RX_CONFIG_IS_0_CHANGE	 (1L<<18)
+#define BNX2_EMAC_ATTENTION_ENA_MI_COMPLETE		 (1L<<22)
+#define BNX2_EMAC_ATTENTION_ENA_MI_INT			 (1L<<23)
+#define BNX2_EMAC_ATTENTION_ENA_AP_ERROR		 (1L<<24)
+
+#define BNX2_EMAC_LED					0x0000140c
+#define BNX2_EMAC_LED_OVERRIDE				 (1L<<0)
+#define BNX2_EMAC_LED_1000MB_OVERRIDE			 (1L<<1)
+#define BNX2_EMAC_LED_100MB_OVERRIDE			 (1L<<2)
+#define BNX2_EMAC_LED_10MB_OVERRIDE			 (1L<<3)
+#define BNX2_EMAC_LED_TRAFFIC_OVERRIDE			 (1L<<4)
+#define BNX2_EMAC_LED_BLNK_TRAFFIC			 (1L<<5)
+#define BNX2_EMAC_LED_TRAFFIC				 (1L<<6)
+#define BNX2_EMAC_LED_1000MB				 (1L<<7)
+#define BNX2_EMAC_LED_100MB				 (1L<<8)
+#define BNX2_EMAC_LED_10MB				 (1L<<9)
+#define BNX2_EMAC_LED_TRAFFIC_STAT			 (1L<<10)
+#define BNX2_EMAC_LED_2500MB				 (1L<<11)
+#define BNX2_EMAC_LED_2500MB_OVERRIDE			 (1L<<12)
+#define BNX2_EMAC_LED_ACTIVITY_SEL			 (0x3L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_0			 (0L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_1			 (1L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_2			 (2L<<17)
+#define BNX2_EMAC_LED_ACTIVITY_SEL_3			 (3L<<17)
+#define BNX2_EMAC_LED_BLNK_RATE				 (0xfffL<<19)
+#define BNX2_EMAC_LED_BLNK_RATE_ENA			 (1L<<31)
+
+#define BNX2_EMAC_MAC_MATCH0				0x00001410
+#define BNX2_EMAC_MAC_MATCH1				0x00001414
+#define BNX2_EMAC_MAC_MATCH2				0x00001418
+#define BNX2_EMAC_MAC_MATCH3				0x0000141c
+#define BNX2_EMAC_MAC_MATCH4				0x00001420
+#define BNX2_EMAC_MAC_MATCH5				0x00001424
+#define BNX2_EMAC_MAC_MATCH6				0x00001428
+#define BNX2_EMAC_MAC_MATCH7				0x0000142c
+#define BNX2_EMAC_MAC_MATCH8				0x00001430
+#define BNX2_EMAC_MAC_MATCH9				0x00001434
+#define BNX2_EMAC_MAC_MATCH10				0x00001438
+#define BNX2_EMAC_MAC_MATCH11				0x0000143c
+#define BNX2_EMAC_MAC_MATCH12				0x00001440
+#define BNX2_EMAC_MAC_MATCH13				0x00001444
+#define BNX2_EMAC_MAC_MATCH14				0x00001448
+#define BNX2_EMAC_MAC_MATCH15				0x0000144c
+#define BNX2_EMAC_MAC_MATCH16				0x00001450
+#define BNX2_EMAC_MAC_MATCH17				0x00001454
+#define BNX2_EMAC_MAC_MATCH18				0x00001458
+#define BNX2_EMAC_MAC_MATCH19				0x0000145c
+#define BNX2_EMAC_MAC_MATCH20				0x00001460
+#define BNX2_EMAC_MAC_MATCH21				0x00001464
+#define BNX2_EMAC_MAC_MATCH22				0x00001468
+#define BNX2_EMAC_MAC_MATCH23				0x0000146c
+#define BNX2_EMAC_MAC_MATCH24				0x00001470
+#define BNX2_EMAC_MAC_MATCH25				0x00001474
+#define BNX2_EMAC_MAC_MATCH26				0x00001478
+#define BNX2_EMAC_MAC_MATCH27				0x0000147c
+#define BNX2_EMAC_MAC_MATCH28				0x00001480
+#define BNX2_EMAC_MAC_MATCH29				0x00001484
+#define BNX2_EMAC_MAC_MATCH30				0x00001488
+#define BNX2_EMAC_MAC_MATCH31				0x0000148c
+#define BNX2_EMAC_BACKOFF_SEED				0x00001498
+#define BNX2_EMAC_BACKOFF_SEED_EMAC_BACKOFF_SEED	 (0x3ffL<<0)
+
+#define BNX2_EMAC_RX_MTU_SIZE				0x0000149c
+#define BNX2_EMAC_RX_MTU_SIZE_MTU_SIZE			 (0xffffL<<0)
+#define BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA			 (1L<<31)
+
+#define BNX2_EMAC_SERDES_CNTL				0x000014a4
+#define BNX2_EMAC_SERDES_CNTL_RXR			 (0x7L<<0)
+#define BNX2_EMAC_SERDES_CNTL_RXG			 (0x3L<<3)
+#define BNX2_EMAC_SERDES_CNTL_RXCKSEL			 (1L<<6)
+#define BNX2_EMAC_SERDES_CNTL_TXBIAS			 (0x7L<<7)
+#define BNX2_EMAC_SERDES_CNTL_BGMAX			 (1L<<10)
+#define BNX2_EMAC_SERDES_CNTL_BGMIN			 (1L<<11)
+#define BNX2_EMAC_SERDES_CNTL_TXMODE			 (1L<<12)
+#define BNX2_EMAC_SERDES_CNTL_TXEDGE			 (1L<<13)
+#define BNX2_EMAC_SERDES_CNTL_SERDES_MODE		 (1L<<14)
+#define BNX2_EMAC_SERDES_CNTL_PLLTEST			 (1L<<15)
+#define BNX2_EMAC_SERDES_CNTL_CDET_EN			 (1L<<16)
+#define BNX2_EMAC_SERDES_CNTL_TBI_LBK			 (1L<<17)
+#define BNX2_EMAC_SERDES_CNTL_REMOTE_LBK		 (1L<<18)
+#define BNX2_EMAC_SERDES_CNTL_REV_PHASE			 (1L<<19)
+#define BNX2_EMAC_SERDES_CNTL_REGCTL12			 (0x3L<<20)
+#define BNX2_EMAC_SERDES_CNTL_REGCTL25			 (0x3L<<22)
+
+#define BNX2_EMAC_SERDES_STATUS				0x000014a8
+#define BNX2_EMAC_SERDES_STATUS_RX_STAT			 (0xffL<<0)
+#define BNX2_EMAC_SERDES_STATUS_COMMA_DET		 (1L<<8)
+
+#define BNX2_EMAC_MDIO_COMM				0x000014ac
+#define BNX2_EMAC_MDIO_COMM_DATA			 (0xffffL<<0)
+#define BNX2_EMAC_MDIO_COMM_REG_ADDR			 (0x1fL<<16)
+#define BNX2_EMAC_MDIO_COMM_PHY_ADDR			 (0x1fL<<21)
+#define BNX2_EMAC_MDIO_COMM_COMMAND			 (0x3L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_UNDEFINED_0		 (0L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_ADDRESS		 (0L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE		 (1L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_READ		 (2L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE_22_XI		 (1L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_WRITE_45_XI		 (1L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_22_XI		 (2L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_INC_45_XI	 (2L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_UNDEFINED_3		 (3L<<26)
+#define BNX2_EMAC_MDIO_COMM_COMMAND_READ_45		 (3L<<26)
+#define BNX2_EMAC_MDIO_COMM_FAIL			 (1L<<28)
+#define BNX2_EMAC_MDIO_COMM_START_BUSY			 (1L<<29)
+#define BNX2_EMAC_MDIO_COMM_DISEXT			 (1L<<30)
+
+#define BNX2_EMAC_MDIO_STATUS				0x000014b0
+#define BNX2_EMAC_MDIO_STATUS_LINK			 (1L<<0)
+#define BNX2_EMAC_MDIO_STATUS_10MB			 (1L<<1)
+
+#define BNX2_EMAC_MDIO_MODE				0x000014b4
+#define BNX2_EMAC_MDIO_MODE_SHORT_PREAMBLE		 (1L<<1)
+#define BNX2_EMAC_MDIO_MODE_AUTO_POLL			 (1L<<4)
+#define BNX2_EMAC_MDIO_MODE_BIT_BANG			 (1L<<8)
+#define BNX2_EMAC_MDIO_MODE_MDIO			 (1L<<9)
+#define BNX2_EMAC_MDIO_MODE_MDIO_OE			 (1L<<10)
+#define BNX2_EMAC_MDIO_MODE_MDC				 (1L<<11)
+#define BNX2_EMAC_MDIO_MODE_MDINT			 (1L<<12)
+#define BNX2_EMAC_MDIO_MODE_EXT_MDINT			 (1L<<13)
+#define BNX2_EMAC_MDIO_MODE_CLOCK_CNT			 (0x1fL<<16)
+#define BNX2_EMAC_MDIO_MODE_CLOCK_CNT_XI		 (0x3fL<<16)
+#define BNX2_EMAC_MDIO_MODE_CLAUSE_45_XI		 (1L<<31)
+
+#define BNX2_EMAC_MDIO_AUTO_STATUS			0x000014b8
+#define BNX2_EMAC_MDIO_AUTO_STATUS_AUTO_ERR		 (1L<<0)
+
+#define BNX2_EMAC_TX_MODE				0x000014bc
+#define BNX2_EMAC_TX_MODE_RESET				 (1L<<0)
+#define BNX2_EMAC_TX_MODE_CS16_TEST			 (1L<<2)
+#define BNX2_EMAC_TX_MODE_EXT_PAUSE_EN			 (1L<<3)
+#define BNX2_EMAC_TX_MODE_FLOW_EN			 (1L<<4)
+#define BNX2_EMAC_TX_MODE_BIG_BACKOFF			 (1L<<5)
+#define BNX2_EMAC_TX_MODE_LONG_PAUSE			 (1L<<6)
+#define BNX2_EMAC_TX_MODE_LINK_AWARE			 (1L<<7)
+
+#define BNX2_EMAC_TX_STATUS				0x000014c0
+#define BNX2_EMAC_TX_STATUS_XOFFED			 (1L<<0)
+#define BNX2_EMAC_TX_STATUS_XOFF_SENT			 (1L<<1)
+#define BNX2_EMAC_TX_STATUS_XON_SENT			 (1L<<2)
+#define BNX2_EMAC_TX_STATUS_LINK_UP			 (1L<<3)
+#define BNX2_EMAC_TX_STATUS_UNDERRUN			 (1L<<4)
+#define BNX2_EMAC_TX_STATUS_CS16_ERROR			 (1L<<5)
+
+#define BNX2_EMAC_TX_LENGTHS				0x000014c4
+#define BNX2_EMAC_TX_LENGTHS_SLOT			 (0xffL<<0)
+#define BNX2_EMAC_TX_LENGTHS_IPG			 (0xfL<<8)
+#define BNX2_EMAC_TX_LENGTHS_IPG_CRS			 (0x3L<<12)
+
+#define BNX2_EMAC_RX_MODE				0x000014c8
+#define BNX2_EMAC_RX_MODE_RESET				 (1L<<0)
+#define BNX2_EMAC_RX_MODE_FLOW_EN			 (1L<<2)
+#define BNX2_EMAC_RX_MODE_KEEP_MAC_CONTROL		 (1L<<3)
+#define BNX2_EMAC_RX_MODE_KEEP_PAUSE			 (1L<<4)
+#define BNX2_EMAC_RX_MODE_ACCEPT_OVERSIZE		 (1L<<5)
+#define BNX2_EMAC_RX_MODE_ACCEPT_RUNTS			 (1L<<6)
+#define BNX2_EMAC_RX_MODE_LLC_CHK			 (1L<<7)
+#define BNX2_EMAC_RX_MODE_PROMISCUOUS			 (1L<<8)
+#define BNX2_EMAC_RX_MODE_NO_CRC_CHK			 (1L<<9)
+#define BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG			 (1L<<10)
+#define BNX2_EMAC_RX_MODE_FILT_BROADCAST		 (1L<<11)
+#define BNX2_EMAC_RX_MODE_SORT_MODE			 (1L<<12)
+
+#define BNX2_EMAC_RX_STATUS				0x000014cc
+#define BNX2_EMAC_RX_STATUS_FFED			 (1L<<0)
+#define BNX2_EMAC_RX_STATUS_FF_RECEIVED			 (1L<<1)
+#define BNX2_EMAC_RX_STATUS_N_RECEIVED			 (1L<<2)
+
+#define BNX2_EMAC_MULTICAST_HASH0			0x000014d0
+#define BNX2_EMAC_MULTICAST_HASH1			0x000014d4
+#define BNX2_EMAC_MULTICAST_HASH2			0x000014d8
+#define BNX2_EMAC_MULTICAST_HASH3			0x000014dc
+#define BNX2_EMAC_MULTICAST_HASH4			0x000014e0
+#define BNX2_EMAC_MULTICAST_HASH5			0x000014e4
+#define BNX2_EMAC_MULTICAST_HASH6			0x000014e8
+#define BNX2_EMAC_MULTICAST_HASH7			0x000014ec
+#define BNX2_EMAC_CKSUM_ERROR_STATUS			0x000014f0
+#define BNX2_EMAC_CKSUM_ERROR_STATUS_CALCULATED		 (0xffffL<<0)
+#define BNX2_EMAC_CKSUM_ERROR_STATUS_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_EMAC_RX_STAT_IFHCINOCTETS			0x00001500
+#define BNX2_EMAC_RX_STAT_IFHCINBADOCTETS		0x00001504
+#define BNX2_EMAC_RX_STAT_ETHERSTATSFRAGMENTS		0x00001508
+#define BNX2_EMAC_RX_STAT_IFHCINUCASTPKTS		0x0000150c
+#define BNX2_EMAC_RX_STAT_IFHCINMULTICASTPKTS		0x00001510
+#define BNX2_EMAC_RX_STAT_IFHCINBROADCASTPKTS		0x00001514
+#define BNX2_EMAC_RX_STAT_DOT3STATSFCSERRORS		0x00001518
+#define BNX2_EMAC_RX_STAT_DOT3STATSALIGNMENTERRORS	0x0000151c
+#define BNX2_EMAC_RX_STAT_DOT3STATSCARRIERSENSEERRORS	0x00001520
+#define BNX2_EMAC_RX_STAT_XONPAUSEFRAMESRECEIVED	0x00001524
+#define BNX2_EMAC_RX_STAT_XOFFPAUSEFRAMESRECEIVED	0x00001528
+#define BNX2_EMAC_RX_STAT_MACCONTROLFRAMESRECEIVED	0x0000152c
+#define BNX2_EMAC_RX_STAT_XOFFSTATEENTERED		0x00001530
+#define BNX2_EMAC_RX_STAT_DOT3STATSFRAMESTOOLONG	0x00001534
+#define BNX2_EMAC_RX_STAT_ETHERSTATSJABBERS		0x00001538
+#define BNX2_EMAC_RX_STAT_ETHERSTATSUNDERSIZEPKTS	0x0000153c
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS64OCTETS	0x00001540
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS65OCTETSTO127OCTETS	0x00001544
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS128OCTETSTO255OCTETS	0x00001548
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS256OCTETSTO511OCTETS	0x0000154c
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS512OCTETSTO1023OCTETS	0x00001550
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTS1024OCTETSTO1522OCTETS	0x00001554
+#define BNX2_EMAC_RX_STAT_ETHERSTATSPKTSOVER1522OCTETS	0x00001558
+#define BNX2_EMAC_RXMAC_DEBUG0				0x0000155c
+#define BNX2_EMAC_RXMAC_DEBUG1				0x00001560
+#define BNX2_EMAC_RXMAC_DEBUG1_LENGTH_NE_BYTE_COUNT	 (1L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG1_LENGTH_OUT_RANGE		 (1L<<1)
+#define BNX2_EMAC_RXMAC_DEBUG1_BAD_CRC			 (1L<<2)
+#define BNX2_EMAC_RXMAC_DEBUG1_RX_ERROR			 (1L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG1_ALIGN_ERROR		 (1L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG1_LAST_DATA		 (1L<<5)
+#define BNX2_EMAC_RXMAC_DEBUG1_ODD_BYTE_START		 (1L<<6)
+#define BNX2_EMAC_RXMAC_DEBUG1_BYTE_COUNT		 (0xffffL<<7)
+#define BNX2_EMAC_RXMAC_DEBUG1_SLOT_TIME		 (0xffL<<23)
+
+#define BNX2_EMAC_RXMAC_DEBUG2				0x00001564
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE			 (0x7L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_IDLE		 (0x0L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SFD		 (0x1L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_DATA		 (0x2L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SKEEP		 (0x3L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_EXT		 (0x4L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_DROP		 (0x5L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_SDROP		 (0x6L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_SM_STATE_FC		 (0x7L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE		 (0xfL<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_IDLE		 (0x0L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA0		 (0x1L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA1		 (0x2L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA2		 (0x3L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_DATA3		 (0x4L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_ABORT		 (0x5L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_WAIT		 (0x6L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_STATUS		 (0x7L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_IDI_STATE_LAST		 (0x8L<<3)
+#define BNX2_EMAC_RXMAC_DEBUG2_BYTE_IN			 (0xffL<<7)
+#define BNX2_EMAC_RXMAC_DEBUG2_FALSEC			 (1L<<15)
+#define BNX2_EMAC_RXMAC_DEBUG2_TAGGED			 (1L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE		 (1L<<18)
+#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE_IDLE		 (0L<<18)
+#define BNX2_EMAC_RXMAC_DEBUG2_PAUSE_STATE_PAUSED	 (1L<<18)
+#define BNX2_EMAC_RXMAC_DEBUG2_SE_COUNTER		 (0xfL<<19)
+#define BNX2_EMAC_RXMAC_DEBUG2_QUANTA			 (0x1fL<<23)
+
+#define BNX2_EMAC_RXMAC_DEBUG3				0x00001568
+#define BNX2_EMAC_RXMAC_DEBUG3_PAUSE_CTR		 (0xffffL<<0)
+#define BNX2_EMAC_RXMAC_DEBUG3_TMP_PAUSE_CTR		 (0xffffL<<16)
+
+#define BNX2_EMAC_RXMAC_DEBUG4				0x0000156c
+#define BNX2_EMAC_RXMAC_DEBUG4_TYPE_FIELD		 (0xffffL<<0)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE		 (0x3fL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_IDLE		 (0x0L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UMAC2		 (0x1L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UMAC3		 (0x2L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UNI		 (0x3L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MMAC3		 (0x5L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA1		 (0x6L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MMAC2		 (0x7L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA2		 (0x7L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PSA3		 (0x8L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC2		 (0x9L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC3		 (0xaL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MWAIT1	 (0xeL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MWAIT2	 (0xfL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MCHECK	 (0x10L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MC		 (0x11L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC2		 (0x12L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC3		 (0x13L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA1		 (0x14L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA2		 (0x15L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BSA3		 (0x16L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BTYPE		 (0x17L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_BC		 (0x18L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PTYPE		 (0x19L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_CMD		 (0x1aL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MAC		 (0x1bL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_LATCH		 (0x1cL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_XOFF		 (0x1dL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_XON		 (0x1eL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_PAUSED	 (0x1fL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_NPAUSED	 (0x20L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_TTYPE		 (0x21L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_TVAL		 (0x22L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA1		 (0x23L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA2		 (0x24L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_USA3		 (0x25L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTYPE		 (0x26L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTTYPE	 (0x27L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_UTVAL		 (0x28L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_MTYPE		 (0x29L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_FILT_STATE_DROP		 (0x2aL<<16)
+#define BNX2_EMAC_RXMAC_DEBUG4_DROP_PKT			 (1L<<22)
+#define BNX2_EMAC_RXMAC_DEBUG4_SLOT_FILLED		 (1L<<23)
+#define BNX2_EMAC_RXMAC_DEBUG4_FALSE_CARRIER		 (1L<<24)
+#define BNX2_EMAC_RXMAC_DEBUG4_LAST_DATA		 (1L<<25)
+#define BNX2_EMAC_RXMAC_DEBUG4_SFD_FOUND		 (1L<<26)
+#define BNX2_EMAC_RXMAC_DEBUG4_ADVANCE			 (1L<<27)
+#define BNX2_EMAC_RXMAC_DEBUG4_START			 (1L<<28)
+
+#define BNX2_EMAC_RXMAC_DEBUG5				0x00001570
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM			 (0x7L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_IDLE		 (0L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_WAIT_EOF	 (1L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_WAIT_STAT	 (2L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4FCRC	 (3L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4RDE	 (4L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_SET_EOF4ALL	 (5L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_PS_IDISM_1WD_WAIT_STAT	 (6L<<0)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1		 (0x7L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_VDW		 (0x0L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_STAT		 (0x1L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_AEOF		 (0x2L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_NEOF		 (0x3L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SOF		 (0x4L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SAEOF		 (0x6L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF1_SNEOF		 (0x7L<<4)
+#define BNX2_EMAC_RXMAC_DEBUG5_EOF_DETECTED		 (1L<<7)
+#define BNX2_EMAC_RXMAC_DEBUG5_CCODE_BUF0		 (0x7L<<8)
+#define BNX2_EMAC_RXMAC_DEBUG5_RPM_IDI_FIFO_FULL	 (1L<<11)
+#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_CCODE		 (1L<<12)
+#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_DATA		 (1L<<13)
+#define BNX2_EMAC_RXMAC_DEBUG5_LOAD_STAT		 (1L<<14)
+#define BNX2_EMAC_RXMAC_DEBUG5_CLR_STAT			 (1L<<15)
+#define BNX2_EMAC_RXMAC_DEBUG5_IDI_RPM_CCODE		 (0x3L<<16)
+#define BNX2_EMAC_RXMAC_DEBUG5_IDI_RPM_ACCEPT		 (1L<<19)
+#define BNX2_EMAC_RXMAC_DEBUG5_FMLEN			 (0xfffL<<20)
+
+#define BNX2_EMAC_RX_STAT_FALSECARRIERERRORS		0x00001574
+#define BNX2_EMAC_RX_STAT_AC0				0x00001580
+#define BNX2_EMAC_RX_STAT_AC1				0x00001584
+#define BNX2_EMAC_RX_STAT_AC2				0x00001588
+#define BNX2_EMAC_RX_STAT_AC3				0x0000158c
+#define BNX2_EMAC_RX_STAT_AC4				0x00001590
+#define BNX2_EMAC_RX_STAT_AC5				0x00001594
+#define BNX2_EMAC_RX_STAT_AC6				0x00001598
+#define BNX2_EMAC_RX_STAT_AC7				0x0000159c
+#define BNX2_EMAC_RX_STAT_AC8				0x000015a0
+#define BNX2_EMAC_RX_STAT_AC9				0x000015a4
+#define BNX2_EMAC_RX_STAT_AC10				0x000015a8
+#define BNX2_EMAC_RX_STAT_AC11				0x000015ac
+#define BNX2_EMAC_RX_STAT_AC12				0x000015b0
+#define BNX2_EMAC_RX_STAT_AC13				0x000015b4
+#define BNX2_EMAC_RX_STAT_AC14				0x000015b8
+#define BNX2_EMAC_RX_STAT_AC15				0x000015bc
+#define BNX2_EMAC_RX_STAT_AC16				0x000015c0
+#define BNX2_EMAC_RX_STAT_AC17				0x000015c4
+#define BNX2_EMAC_RX_STAT_AC18				0x000015c8
+#define BNX2_EMAC_RX_STAT_AC19				0x000015cc
+#define BNX2_EMAC_RX_STAT_AC20				0x000015d0
+#define BNX2_EMAC_RX_STAT_AC21				0x000015d4
+#define BNX2_EMAC_RX_STAT_AC22				0x000015d8
+#define BNX2_EMAC_RXMAC_SUC_DBG_OVERRUNVEC		0x000015dc
+#define BNX2_EMAC_RX_STAT_AC_28				0x000015f4
+#define BNX2_EMAC_TX_STAT_IFHCOUTOCTETS			0x00001600
+#define BNX2_EMAC_TX_STAT_IFHCOUTBADOCTETS		0x00001604
+#define BNX2_EMAC_TX_STAT_ETHERSTATSCOLLISIONS		0x00001608
+#define BNX2_EMAC_TX_STAT_OUTXONSENT			0x0000160c
+#define BNX2_EMAC_TX_STAT_OUTXOFFSENT			0x00001610
+#define BNX2_EMAC_TX_STAT_FLOWCONTROLDONE		0x00001614
+#define BNX2_EMAC_TX_STAT_DOT3STATSSINGLECOLLISIONFRAMES	0x00001618
+#define BNX2_EMAC_TX_STAT_DOT3STATSMULTIPLECOLLISIONFRAMES	0x0000161c
+#define BNX2_EMAC_TX_STAT_DOT3STATSDEFERREDTRANSMISSIONS	0x00001620
+#define BNX2_EMAC_TX_STAT_DOT3STATSEXCESSIVECOLLISIONS	0x00001624
+#define BNX2_EMAC_TX_STAT_DOT3STATSLATECOLLISIONS	0x00001628
+#define BNX2_EMAC_TX_STAT_IFHCOUTUCASTPKTS		0x0000162c
+#define BNX2_EMAC_TX_STAT_IFHCOUTMULTICASTPKTS		0x00001630
+#define BNX2_EMAC_TX_STAT_IFHCOUTBROADCASTPKTS		0x00001634
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS64OCTETS	0x00001638
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS65OCTETSTO127OCTETS	0x0000163c
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS128OCTETSTO255OCTETS	0x00001640
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS256OCTETSTO511OCTETS	0x00001644
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS512OCTETSTO1023OCTETS	0x00001648
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTS1024OCTETSTO1522OCTETS	0x0000164c
+#define BNX2_EMAC_TX_STAT_ETHERSTATSPKTSOVER1522OCTETS	0x00001650
+#define BNX2_EMAC_TX_STAT_DOT3STATSINTERNALMACTRANSMITERRORS	0x00001654
+#define BNX2_EMAC_TXMAC_DEBUG0				0x00001658
+#define BNX2_EMAC_TXMAC_DEBUG1				0x0000165c
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE		 (0xfL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_IDLE		 (0x0L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_START0		 (0x1L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA0		 (0x4L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA1		 (0x5L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA2		 (0x6L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_DATA3		 (0x7L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_WAIT0		 (0x8L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_ODI_STATE_WAIT1		 (0x9L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG1_CRS_ENABLE		 (1L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG1_BAD_CRC			 (1L<<5)
+#define BNX2_EMAC_TXMAC_DEBUG1_SE_COUNTER		 (0xfL<<6)
+#define BNX2_EMAC_TXMAC_DEBUG1_SEND_PAUSE		 (1L<<10)
+#define BNX2_EMAC_TXMAC_DEBUG1_LATE_COLLISION		 (1L<<11)
+#define BNX2_EMAC_TXMAC_DEBUG1_MAX_DEFER		 (1L<<12)
+#define BNX2_EMAC_TXMAC_DEBUG1_DEFERRED			 (1L<<13)
+#define BNX2_EMAC_TXMAC_DEBUG1_ONE_BYTE			 (1L<<14)
+#define BNX2_EMAC_TXMAC_DEBUG1_IPG_TIME			 (0xfL<<15)
+#define BNX2_EMAC_TXMAC_DEBUG1_SLOT_TIME		 (0xffL<<19)
+
+#define BNX2_EMAC_TXMAC_DEBUG2				0x00001660
+#define BNX2_EMAC_TXMAC_DEBUG2_BACK_OFF			 (0x3ffL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG2_BYTE_COUNT		 (0xffffL<<10)
+#define BNX2_EMAC_TXMAC_DEBUG2_COL_COUNT		 (0x1fL<<26)
+#define BNX2_EMAC_TXMAC_DEBUG2_COL_BIT			 (1L<<31)
+
+#define BNX2_EMAC_TXMAC_DEBUG3				0x00001664
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE			 (0xfL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_IDLE		 (0x0L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_PRE1		 (0x1L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_PRE2		 (0x2L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_SFD		 (0x3L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_DATA		 (0x4L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_CRC1		 (0x5L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_CRC2		 (0x6L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_EXT		 (0x7L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_STATB		 (0x8L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_STATG		 (0x9L<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_JAM		 (0xaL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_EJAM		 (0xbL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_BJAM		 (0xcL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_SWAIT		 (0xdL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_SM_STATE_BACKOFF		 (0xeL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE		 (0x7L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_IDLE		 (0x0L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_WAIT		 (0x1L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_UNI		 (0x2L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_MC		 (0x3L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC2		 (0x4L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC3		 (0x5L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_FILT_STATE_BC		 (0x6L<<4)
+#define BNX2_EMAC_TXMAC_DEBUG3_CRS_DONE			 (1L<<7)
+#define BNX2_EMAC_TXMAC_DEBUG3_XOFF			 (1L<<8)
+#define BNX2_EMAC_TXMAC_DEBUG3_SE_COUNTER		 (0xfL<<9)
+#define BNX2_EMAC_TXMAC_DEBUG3_QUANTA_COUNTER		 (0x1fL<<13)
+
+#define BNX2_EMAC_TXMAC_DEBUG4				0x00001668
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_COUNTER		 (0xffffL<<0)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE		 (0xfL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_IDLE		 (0x0L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA1		 (0x2L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA2		 (0x3L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC3		 (0x4L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC2		 (0x5L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_MCA3		 (0x6L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_SRC1		 (0x7L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CRC1		 (0x8L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CRC2		 (0x9L<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TIME		 (0xaL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_TYPE		 (0xcL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_WAIT		 (0xdL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_PAUSE_STATE_CMD		 (0xeL<<16)
+#define BNX2_EMAC_TXMAC_DEBUG4_STATS0_VALID		 (1L<<20)
+#define BNX2_EMAC_TXMAC_DEBUG4_APPEND_CRC		 (1L<<21)
+#define BNX2_EMAC_TXMAC_DEBUG4_SLOT_FILLED		 (1L<<22)
+#define BNX2_EMAC_TXMAC_DEBUG4_MAX_DEFER		 (1L<<23)
+#define BNX2_EMAC_TXMAC_DEBUG4_SEND_EXTEND		 (1L<<24)
+#define BNX2_EMAC_TXMAC_DEBUG4_SEND_PADDING		 (1L<<25)
+#define BNX2_EMAC_TXMAC_DEBUG4_EOF_LOC			 (1L<<26)
+#define BNX2_EMAC_TXMAC_DEBUG4_COLLIDING		 (1L<<27)
+#define BNX2_EMAC_TXMAC_DEBUG4_COL_IN			 (1L<<28)
+#define BNX2_EMAC_TXMAC_DEBUG4_BURSTING			 (1L<<29)
+#define BNX2_EMAC_TXMAC_DEBUG4_ADVANCE			 (1L<<30)
+#define BNX2_EMAC_TXMAC_DEBUG4_GO			 (1L<<31)
+
+#define BNX2_EMAC_TX_STAT_AC0				0x00001680
+#define BNX2_EMAC_TX_STAT_AC1				0x00001684
+#define BNX2_EMAC_TX_STAT_AC2				0x00001688
+#define BNX2_EMAC_TX_STAT_AC3				0x0000168c
+#define BNX2_EMAC_TX_STAT_AC4				0x00001690
+#define BNX2_EMAC_TX_STAT_AC5				0x00001694
+#define BNX2_EMAC_TX_STAT_AC6				0x00001698
+#define BNX2_EMAC_TX_STAT_AC7				0x0000169c
+#define BNX2_EMAC_TX_STAT_AC8				0x000016a0
+#define BNX2_EMAC_TX_STAT_AC9				0x000016a4
+#define BNX2_EMAC_TX_STAT_AC10				0x000016a8
+#define BNX2_EMAC_TX_STAT_AC11				0x000016ac
+#define BNX2_EMAC_TX_STAT_AC12				0x000016b0
+#define BNX2_EMAC_TX_STAT_AC13				0x000016b4
+#define BNX2_EMAC_TX_STAT_AC14				0x000016b8
+#define BNX2_EMAC_TX_STAT_AC15				0x000016bc
+#define BNX2_EMAC_TX_STAT_AC16				0x000016c0
+#define BNX2_EMAC_TX_STAT_AC17				0x000016c4
+#define BNX2_EMAC_TX_STAT_AC18				0x000016c8
+#define BNX2_EMAC_TX_STAT_AC19				0x000016cc
+#define BNX2_EMAC_TX_STAT_AC20				0x000016d0
+#define BNX2_EMAC_TXMAC_SUC_DBG_OVERRUNVEC		0x000016d8
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL			0x000016fc
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_TX_THROTTLE_INC	 (0x7fL<<0)
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_TX_THROTTLE_NUM	 (0x7fL<<16)
+#define BNX2_EMAC_TX_RATE_LIMIT_CTRL_RATE_LIMITER_EN	 (1L<<31)
+
+
+/*
+ *  rpm_reg definition
+ *  offset: 0x1800
+ */
+#define BNX2_RPM_COMMAND				0x00001800
+#define BNX2_RPM_COMMAND_ENABLED			 (1L<<0)
+#define BNX2_RPM_COMMAND_OVERRUN_ABORT			 (1L<<4)
+
+#define BNX2_RPM_STATUS					0x00001804
+#define BNX2_RPM_STATUS_MBUF_WAIT			 (1L<<0)
+#define BNX2_RPM_STATUS_FREE_WAIT			 (1L<<1)
+
+#define BNX2_RPM_CONFIG					0x00001808
+#define BNX2_RPM_CONFIG_NO_PSD_HDR_CKSUM		 (1L<<0)
+#define BNX2_RPM_CONFIG_ACPI_ENA			 (1L<<1)
+#define BNX2_RPM_CONFIG_ACPI_KEEP			 (1L<<2)
+#define BNX2_RPM_CONFIG_MP_KEEP				 (1L<<3)
+#define BNX2_RPM_CONFIG_SORT_VECT_VAL			 (0xfL<<4)
+#define BNX2_RPM_CONFIG_DISABLE_WOL_ASSERT		 (1L<<30)
+#define BNX2_RPM_CONFIG_IGNORE_VLAN			 (1L<<31)
+
+#define BNX2_RPM_MGMT_PKT_CTRL				0x0000180c
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_SORT		 (0xfL<<0)
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_RULE		 (0xfL<<4)
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_DISCARD_EN		 (1L<<30)
+#define BNX2_RPM_MGMT_PKT_CTRL_MGMT_EN			 (1L<<31)
+
+#define BNX2_RPM_VLAN_MATCH0				0x00001810
+#define BNX2_RPM_VLAN_MATCH0_RPM_VLAN_MTCH0_VALUE	 (0xfffL<<0)
+
+#define BNX2_RPM_VLAN_MATCH1				0x00001814
+#define BNX2_RPM_VLAN_MATCH1_RPM_VLAN_MTCH1_VALUE	 (0xfffL<<0)
+
+#define BNX2_RPM_VLAN_MATCH2				0x00001818
+#define BNX2_RPM_VLAN_MATCH2_RPM_VLAN_MTCH2_VALUE	 (0xfffL<<0)
+
+#define BNX2_RPM_VLAN_MATCH3				0x0000181c
+#define BNX2_RPM_VLAN_MATCH3_RPM_VLAN_MTCH3_VALUE	 (0xfffL<<0)
+
+#define BNX2_RPM_SORT_USER0				0x00001820
+#define BNX2_RPM_SORT_USER0_PM_EN			 (0xffffL<<0)
+#define BNX2_RPM_SORT_USER0_BC_EN			 (1L<<16)
+#define BNX2_RPM_SORT_USER0_MC_EN			 (1L<<17)
+#define BNX2_RPM_SORT_USER0_MC_HSH_EN			 (1L<<18)
+#define BNX2_RPM_SORT_USER0_PROM_EN			 (1L<<19)
+#define BNX2_RPM_SORT_USER0_VLAN_EN			 (0xfL<<20)
+#define BNX2_RPM_SORT_USER0_PROM_VLAN			 (1L<<24)
+#define BNX2_RPM_SORT_USER0_VLAN_NOTMATCH		 (1L<<25)
+#define BNX2_RPM_SORT_USER0_ENA				 (1L<<31)
+
+#define BNX2_RPM_SORT_USER1				0x00001824
+#define BNX2_RPM_SORT_USER1_PM_EN			 (0xffffL<<0)
+#define BNX2_RPM_SORT_USER1_BC_EN			 (1L<<16)
+#define BNX2_RPM_SORT_USER1_MC_EN			 (1L<<17)
+#define BNX2_RPM_SORT_USER1_MC_HSH_EN			 (1L<<18)
+#define BNX2_RPM_SORT_USER1_PROM_EN			 (1L<<19)
+#define BNX2_RPM_SORT_USER1_VLAN_EN			 (0xfL<<20)
+#define BNX2_RPM_SORT_USER1_PROM_VLAN			 (1L<<24)
+#define BNX2_RPM_SORT_USER1_ENA				 (1L<<31)
+
+#define BNX2_RPM_SORT_USER2				0x00001828
+#define BNX2_RPM_SORT_USER2_PM_EN			 (0xffffL<<0)
+#define BNX2_RPM_SORT_USER2_BC_EN			 (1L<<16)
+#define BNX2_RPM_SORT_USER2_MC_EN			 (1L<<17)
+#define BNX2_RPM_SORT_USER2_MC_HSH_EN			 (1L<<18)
+#define BNX2_RPM_SORT_USER2_PROM_EN			 (1L<<19)
+#define BNX2_RPM_SORT_USER2_VLAN_EN			 (0xfL<<20)
+#define BNX2_RPM_SORT_USER2_PROM_VLAN			 (1L<<24)
+#define BNX2_RPM_SORT_USER2_ENA				 (1L<<31)
+
+#define BNX2_RPM_SORT_USER3				0x0000182c
+#define BNX2_RPM_SORT_USER3_PM_EN			 (0xffffL<<0)
+#define BNX2_RPM_SORT_USER3_BC_EN			 (1L<<16)
+#define BNX2_RPM_SORT_USER3_MC_EN			 (1L<<17)
+#define BNX2_RPM_SORT_USER3_MC_HSH_EN			 (1L<<18)
+#define BNX2_RPM_SORT_USER3_PROM_EN			 (1L<<19)
+#define BNX2_RPM_SORT_USER3_VLAN_EN			 (0xfL<<20)
+#define BNX2_RPM_SORT_USER3_PROM_VLAN			 (1L<<24)
+#define BNX2_RPM_SORT_USER3_ENA				 (1L<<31)
+
+#define BNX2_RPM_STAT_L2_FILTER_DISCARDS		0x00001840
+#define BNX2_RPM_STAT_RULE_CHECKER_DISCARDS		0x00001844
+#define BNX2_RPM_STAT_IFINFTQDISCARDS			0x00001848
+#define BNX2_RPM_STAT_IFINMBUFDISCARD			0x0000184c
+#define BNX2_RPM_STAT_RULE_CHECKER_P4_HIT		0x00001850
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0		0x00001854
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION0_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1		0x00001858
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION1_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2		0x0000185c
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION2_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3		0x00001860
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION3_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4		0x00001864
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION4_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5		0x00001868
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION5_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6		0x0000186c
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION6_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7		0x00001870
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_LEN	 (0xffL<<0)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER	 (0xffL<<16)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_LEN_TYPE	 (1L<<30)
+#define BNX2_RPM_IPV6_PROGRAMMABLE_EXTENSION7_NEXT_HEADER_EN	 (1L<<31)
+
+#define BNX2_RPM_STAT_AC0				0x00001880
+#define BNX2_RPM_STAT_AC1				0x00001884
+#define BNX2_RPM_STAT_AC2				0x00001888
+#define BNX2_RPM_STAT_AC3				0x0000188c
+#define BNX2_RPM_STAT_AC4				0x00001890
+#define BNX2_RPM_RC_CNTL_16				0x000018e0
+#define BNX2_RPM_RC_CNTL_16_OFFSET			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_16_CLASS			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_16_PRIORITY			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_16_P4				 (1L<<12)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_START		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_IP			 (1L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_TCP		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_UDP		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_DATA		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_TCP_UDP		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_16_HDR_TYPE_ICMPV6		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_16_COMP			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_EQUAL			 (0L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_NEQUAL			 (1L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_GREATER		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_16_COMP_LESS			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_16_MAP				 (1L<<18)
+#define BNX2_RPM_RC_CNTL_16_SBIT			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_16_CMDSEL			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_16_DISCARD			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_16_MASK			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_16_P1				 (1L<<27)
+#define BNX2_RPM_RC_CNTL_16_P2				 (1L<<28)
+#define BNX2_RPM_RC_CNTL_16_P3				 (1L<<29)
+#define BNX2_RPM_RC_CNTL_16_NBIT			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_16			0x000018e4
+#define BNX2_RPM_RC_VALUE_MASK_16_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_16_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_17				0x000018e8
+#define BNX2_RPM_RC_CNTL_17_OFFSET			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_17_CLASS			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_17_PRIORITY			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_17_P4				 (1L<<12)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_START		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_IP			 (1L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_TCP		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_UDP		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_DATA		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_TCP_UDP		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_17_HDR_TYPE_ICMPV6		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_17_COMP			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_EQUAL			 (0L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_NEQUAL			 (1L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_GREATER		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_17_COMP_LESS			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_17_MAP				 (1L<<18)
+#define BNX2_RPM_RC_CNTL_17_SBIT			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_17_CMDSEL			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_17_DISCARD			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_17_MASK			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_17_P1				 (1L<<27)
+#define BNX2_RPM_RC_CNTL_17_P2				 (1L<<28)
+#define BNX2_RPM_RC_CNTL_17_P3				 (1L<<29)
+#define BNX2_RPM_RC_CNTL_17_NBIT			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_17			0x000018ec
+#define BNX2_RPM_RC_VALUE_MASK_17_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_17_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_18				0x000018f0
+#define BNX2_RPM_RC_CNTL_18_OFFSET			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_18_CLASS			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_18_PRIORITY			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_18_P4				 (1L<<12)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_START		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_IP			 (1L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_TCP		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_UDP		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_DATA		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_TCP_UDP		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_18_HDR_TYPE_ICMPV6		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_18_COMP			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_EQUAL			 (0L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_NEQUAL			 (1L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_GREATER		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_18_COMP_LESS			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_18_MAP				 (1L<<18)
+#define BNX2_RPM_RC_CNTL_18_SBIT			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_18_CMDSEL			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_18_DISCARD			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_18_MASK			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_18_P1				 (1L<<27)
+#define BNX2_RPM_RC_CNTL_18_P2				 (1L<<28)
+#define BNX2_RPM_RC_CNTL_18_P3				 (1L<<29)
+#define BNX2_RPM_RC_CNTL_18_NBIT			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_18			0x000018f4
+#define BNX2_RPM_RC_VALUE_MASK_18_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_18_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_19				0x000018f8
+#define BNX2_RPM_RC_CNTL_19_OFFSET			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_19_CLASS			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_19_PRIORITY			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_19_P4				 (1L<<12)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_START		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_IP			 (1L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_TCP		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_UDP		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_DATA		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_TCP_UDP		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_19_HDR_TYPE_ICMPV6		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_19_COMP			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_EQUAL			 (0L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_NEQUAL			 (1L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_GREATER		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_19_COMP_LESS			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_19_MAP				 (1L<<18)
+#define BNX2_RPM_RC_CNTL_19_SBIT			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_19_CMDSEL			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_19_DISCARD			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_19_MASK			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_19_P1				 (1L<<27)
+#define BNX2_RPM_RC_CNTL_19_P2				 (1L<<28)
+#define BNX2_RPM_RC_CNTL_19_P3				 (1L<<29)
+#define BNX2_RPM_RC_CNTL_19_NBIT			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_19			0x000018fc
+#define BNX2_RPM_RC_VALUE_MASK_19_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_19_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_0				0x00001900
+#define BNX2_RPM_RC_CNTL_0_OFFSET			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_0_CLASS			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_0_PRIORITY			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_0_P4				 (1L<<12)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_START		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_IP			 (1L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_TCP			 (2L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_UDP			 (3L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_DATA		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_TCP_UDP		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_0_HDR_TYPE_ICMPV6		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_0_COMP				 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_0_COMP_EQUAL			 (0L<<16)
+#define BNX2_RPM_RC_CNTL_0_COMP_NEQUAL			 (1L<<16)
+#define BNX2_RPM_RC_CNTL_0_COMP_GREATER			 (2L<<16)
+#define BNX2_RPM_RC_CNTL_0_COMP_LESS			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_0_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_0_SBIT				 (1L<<19)
+#define BNX2_RPM_RC_CNTL_0_CMDSEL			 (0xfL<<20)
+#define BNX2_RPM_RC_CNTL_0_MAP				 (1L<<24)
+#define BNX2_RPM_RC_CNTL_0_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_0_DISCARD			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_0_MASK				 (1L<<26)
+#define BNX2_RPM_RC_CNTL_0_P1				 (1L<<27)
+#define BNX2_RPM_RC_CNTL_0_P2				 (1L<<28)
+#define BNX2_RPM_RC_CNTL_0_P3				 (1L<<29)
+#define BNX2_RPM_RC_CNTL_0_NBIT				 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_0			0x00001904
+#define BNX2_RPM_RC_VALUE_MASK_0_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_0_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_1				0x00001908
+#define BNX2_RPM_RC_CNTL_1_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_1_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_1_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_1_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_1_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_1_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_1_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_1_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_1_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_1_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_1_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_1_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_1_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_1_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_1_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_1_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_1_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_1_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_1			0x0000190c
+#define BNX2_RPM_RC_VALUE_MASK_1_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_1_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_2				0x00001910
+#define BNX2_RPM_RC_CNTL_2_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_2_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_2_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_2_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_2_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_2_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_2_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_2_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_2_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_2_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_2_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_2_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_2_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_2_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_2_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_2_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_2_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_2_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_2			0x00001914
+#define BNX2_RPM_RC_VALUE_MASK_2_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_2_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_3				0x00001918
+#define BNX2_RPM_RC_CNTL_3_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_3_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_3_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_3_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_3_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_3_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_3_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_3_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_3_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_3_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_3_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_3_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_3_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_3_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_3_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_3_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_3_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_3_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_3			0x0000191c
+#define BNX2_RPM_RC_VALUE_MASK_3_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_3_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_4				0x00001920
+#define BNX2_RPM_RC_CNTL_4_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_4_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_4_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_4_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_4_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_4_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_4_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_4_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_4_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_4_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_4_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_4_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_4_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_4_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_4_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_4_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_4_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_4_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_4			0x00001924
+#define BNX2_RPM_RC_VALUE_MASK_4_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_4_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_5				0x00001928
+#define BNX2_RPM_RC_CNTL_5_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_5_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_5_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_5_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_5_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_5_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_5_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_5_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_5_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_5_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_5_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_5_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_5_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_5_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_5_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_5_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_5_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_5_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_5			0x0000192c
+#define BNX2_RPM_RC_VALUE_MASK_5_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_5_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_6				0x00001930
+#define BNX2_RPM_RC_CNTL_6_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_6_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_6_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_6_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_6_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_6_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_6_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_6_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_6_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_6_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_6_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_6_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_6_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_6_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_6_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_6_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_6_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_6_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_6			0x00001934
+#define BNX2_RPM_RC_VALUE_MASK_6_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_6_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_7				0x00001938
+#define BNX2_RPM_RC_CNTL_7_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_7_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_7_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_7_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_7_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_7_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_7_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_7_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_7_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_7_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_7_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_7_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_7_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_7_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_7_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_7_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_7_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_7_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_7			0x0000193c
+#define BNX2_RPM_RC_VALUE_MASK_7_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_7_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_8				0x00001940
+#define BNX2_RPM_RC_CNTL_8_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_8_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_8_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_8_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_8_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_8_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_8_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_8_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_8_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_8_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_8_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_8_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_8_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_8_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_8_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_8_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_8_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_8_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_8			0x00001944
+#define BNX2_RPM_RC_VALUE_MASK_8_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_8_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_9				0x00001948
+#define BNX2_RPM_RC_CNTL_9_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_9_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_9_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_9_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_9_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_9_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_9_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_9_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_9_COMP_LESS_XI			 (3L<<16)
+#define BNX2_RPM_RC_CNTL_9_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_9_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_9_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_9_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_9_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_9_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_9_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_9_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_9_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_9			0x0000194c
+#define BNX2_RPM_RC_VALUE_MASK_9_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_9_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_10				0x00001950
+#define BNX2_RPM_RC_CNTL_10_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_10_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_10_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_10_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_10_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_10_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_10_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_10_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_10_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_10_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_10_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_10_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_10_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_10_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_10_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_10_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_10_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_10_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_10			0x00001954
+#define BNX2_RPM_RC_VALUE_MASK_10_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_10_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_11				0x00001958
+#define BNX2_RPM_RC_CNTL_11_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_11_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_11_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_11_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_11_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_11_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_11_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_11_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_11_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_11_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_11_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_11_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_11_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_11_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_11_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_11_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_11_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_11_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_11			0x0000195c
+#define BNX2_RPM_RC_VALUE_MASK_11_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_11_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_12				0x00001960
+#define BNX2_RPM_RC_CNTL_12_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_12_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_12_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_12_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_12_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_12_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_12_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_12_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_12_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_12_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_12_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_12_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_12_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_12_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_12_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_12_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_12_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_12_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_12			0x00001964
+#define BNX2_RPM_RC_VALUE_MASK_12_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_12_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_13				0x00001968
+#define BNX2_RPM_RC_CNTL_13_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_13_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_13_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_13_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_13_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_13_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_13_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_13_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_13_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_13_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_13_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_13_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_13_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_13_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_13_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_13_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_13_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_13_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_13			0x0000196c
+#define BNX2_RPM_RC_VALUE_MASK_13_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_13_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_14				0x00001970
+#define BNX2_RPM_RC_CNTL_14_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_14_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_14_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_14_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_14_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_14_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_14_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_14_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_14_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_14_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_14_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_14_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_14_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_14_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_14_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_14_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_14_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_14_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_14			0x00001974
+#define BNX2_RPM_RC_VALUE_MASK_14_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_14_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CNTL_15				0x00001978
+#define BNX2_RPM_RC_CNTL_15_A				 (0x3ffffL<<0)
+#define BNX2_RPM_RC_CNTL_15_B				 (0xfffL<<19)
+#define BNX2_RPM_RC_CNTL_15_OFFSET_XI			 (0xffL<<0)
+#define BNX2_RPM_RC_CNTL_15_CLASS_XI			 (0x7L<<8)
+#define BNX2_RPM_RC_CNTL_15_PRIORITY_XI			 (1L<<11)
+#define BNX2_RPM_RC_CNTL_15_P4_XI			 (1L<<12)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_XI			 (0x7L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_START_XI		 (0L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_IP_XI		 (1L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_TCP_XI		 (2L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_UDP_XI		 (3L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_DATA_XI		 (4L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_TCP_UDP_XI		 (5L<<13)
+#define BNX2_RPM_RC_CNTL_15_HDR_TYPE_ICMPV6_XI		 (6L<<13)
+#define BNX2_RPM_RC_CNTL_15_COMP_XI			 (0x3L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_EQUAL_XI		 (0L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_NEQUAL_XI		 (1L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_GREATER_XI		 (2L<<16)
+#define BNX2_RPM_RC_CNTL_15_COMP_LESS_XI		 (3L<<16)
+#define BNX2_RPM_RC_CNTL_15_MAP_XI			 (1L<<18)
+#define BNX2_RPM_RC_CNTL_15_SBIT_XI			 (1L<<19)
+#define BNX2_RPM_RC_CNTL_15_CMDSEL_XI			 (0x1fL<<20)
+#define BNX2_RPM_RC_CNTL_15_DISCARD_XI			 (1L<<25)
+#define BNX2_RPM_RC_CNTL_15_MASK_XI			 (1L<<26)
+#define BNX2_RPM_RC_CNTL_15_P1_XI			 (1L<<27)
+#define BNX2_RPM_RC_CNTL_15_P2_XI			 (1L<<28)
+#define BNX2_RPM_RC_CNTL_15_P3_XI			 (1L<<29)
+#define BNX2_RPM_RC_CNTL_15_NBIT_XI			 (1L<<30)
+
+#define BNX2_RPM_RC_VALUE_MASK_15			0x0000197c
+#define BNX2_RPM_RC_VALUE_MASK_15_VALUE			 (0xffffL<<0)
+#define BNX2_RPM_RC_VALUE_MASK_15_MASK			 (0xffffL<<16)
+
+#define BNX2_RPM_RC_CONFIG				0x00001980
+#define BNX2_RPM_RC_CONFIG_RULE_ENABLE			 (0xffffL<<0)
+#define BNX2_RPM_RC_CONFIG_RULE_ENABLE_XI		 (0xfffffL<<0)
+#define BNX2_RPM_RC_CONFIG_DEF_CLASS			 (0x7L<<24)
+#define BNX2_RPM_RC_CONFIG_KNUM_OVERWRITE		 (1L<<31)
+
+#define BNX2_RPM_DEBUG0					0x00001984
+#define BNX2_RPM_DEBUG0_FM_BCNT				 (0xffffL<<0)
+#define BNX2_RPM_DEBUG0_T_DATA_OFST_VLD			 (1L<<16)
+#define BNX2_RPM_DEBUG0_T_UDP_OFST_VLD			 (1L<<17)
+#define BNX2_RPM_DEBUG0_T_TCP_OFST_VLD			 (1L<<18)
+#define BNX2_RPM_DEBUG0_T_IP_OFST_VLD			 (1L<<19)
+#define BNX2_RPM_DEBUG0_IP_MORE_FRGMT			 (1L<<20)
+#define BNX2_RPM_DEBUG0_T_IP_NO_TCP_UDP_HDR		 (1L<<21)
+#define BNX2_RPM_DEBUG0_LLC_SNAP			 (1L<<22)
+#define BNX2_RPM_DEBUG0_FM_STARTED			 (1L<<23)
+#define BNX2_RPM_DEBUG0_DONE				 (1L<<24)
+#define BNX2_RPM_DEBUG0_WAIT_4_DONE			 (1L<<25)
+#define BNX2_RPM_DEBUG0_USE_TPBUF_CKSUM			 (1L<<26)
+#define BNX2_RPM_DEBUG0_RX_NO_PSD_HDR_CKSUM		 (1L<<27)
+#define BNX2_RPM_DEBUG0_IGNORE_VLAN			 (1L<<28)
+#define BNX2_RPM_DEBUG0_RP_ENA_ACTIVE			 (1L<<31)
+
+#define BNX2_RPM_DEBUG1					0x00001988
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST			 (0xffffL<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IDLE			 (0L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B6_ALL		 (1L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B2_IPLLC	 (2L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B6_IP		 (4L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ETYPE_B2_IP		 (8L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IP_START		 (16L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_IP			 (32L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_TCP			 (64L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_UDP			 (128L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_AH			 (256L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ESP			 (512L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ESP_PAYLOAD		 (1024L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_DATA			 (2048L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ADD_CARRY		 (0x2000L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_ADD_CARRYOUT		 (0x4000L<<0)
+#define BNX2_RPM_DEBUG1_FSM_CUR_ST_LATCH_RESULT		 (0x8000L<<0)
+#define BNX2_RPM_DEBUG1_HDR_BCNT			 (0x7ffL<<16)
+#define BNX2_RPM_DEBUG1_UNKNOWN_ETYPE_D			 (1L<<28)
+#define BNX2_RPM_DEBUG1_VLAN_REMOVED_D2			 (1L<<29)
+#define BNX2_RPM_DEBUG1_VLAN_REMOVED_D1			 (1L<<30)
+#define BNX2_RPM_DEBUG1_EOF_0XTRA_WD			 (1L<<31)
+
+#define BNX2_RPM_DEBUG2					0x0000198c
+#define BNX2_RPM_DEBUG2_CMD_HIT_VEC			 (0xffffL<<0)
+#define BNX2_RPM_DEBUG2_IP_BCNT				 (0xffL<<16)
+#define BNX2_RPM_DEBUG2_THIS_CMD_M4			 (1L<<24)
+#define BNX2_RPM_DEBUG2_THIS_CMD_M3			 (1L<<25)
+#define BNX2_RPM_DEBUG2_THIS_CMD_M2			 (1L<<26)
+#define BNX2_RPM_DEBUG2_THIS_CMD_M1			 (1L<<27)
+#define BNX2_RPM_DEBUG2_IPIPE_EMPTY			 (1L<<28)
+#define BNX2_RPM_DEBUG2_FM_DISCARD			 (1L<<29)
+#define BNX2_RPM_DEBUG2_LAST_RULE_IN_FM_D2		 (1L<<30)
+#define BNX2_RPM_DEBUG2_LAST_RULE_IN_FM_D1		 (1L<<31)
+
+#define BNX2_RPM_DEBUG3					0x00001990
+#define BNX2_RPM_DEBUG3_AVAIL_MBUF_PTR			 (0x1ffL<<0)
+#define BNX2_RPM_DEBUG3_RDE_RLUPQ_WR_REQ_INT		 (1L<<9)
+#define BNX2_RPM_DEBUG3_RDE_RBUF_WR_LAST_INT		 (1L<<10)
+#define BNX2_RPM_DEBUG3_RDE_RBUF_WR_REQ_INT		 (1L<<11)
+#define BNX2_RPM_DEBUG3_RDE_RBUF_FREE_REQ		 (1L<<12)
+#define BNX2_RPM_DEBUG3_RDE_RBUF_ALLOC_REQ		 (1L<<13)
+#define BNX2_RPM_DEBUG3_DFSM_MBUF_NOTAVAIL		 (1L<<14)
+#define BNX2_RPM_DEBUG3_RBUF_RDE_SOF_DROP		 (1L<<15)
+#define BNX2_RPM_DEBUG3_DFIFO_VLD_ENTRY_CT		 (0xfL<<16)
+#define BNX2_RPM_DEBUG3_RDE_SRC_FIFO_ALMFULL		 (1L<<21)
+#define BNX2_RPM_DEBUG3_DROP_NXT_VLD			 (1L<<22)
+#define BNX2_RPM_DEBUG3_DROP_NXT			 (1L<<23)
+#define BNX2_RPM_DEBUG3_FTQ_FSM				 (0x3L<<24)
+#define BNX2_RPM_DEBUG3_FTQ_FSM_IDLE			 (0x0L<<24)
+#define BNX2_RPM_DEBUG3_FTQ_FSM_WAIT_ACK		 (0x1L<<24)
+#define BNX2_RPM_DEBUG3_FTQ_FSM_WAIT_FREE		 (0x2L<<24)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM			 (0x3L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_SOF		 (0x0L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_GET_MBUF		 (0x1L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_DMA_DATA		 (0x2L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_DATA		 (0x3L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_EOF		 (0x4L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_MF_ACK		 (0x5L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_WAIT_DROP_NXT_VLD	 (0x6L<<26)
+#define BNX2_RPM_DEBUG3_MBWRITE_FSM_DONE		 (0x7L<<26)
+#define BNX2_RPM_DEBUG3_MBFREE_FSM			 (1L<<29)
+#define BNX2_RPM_DEBUG3_MBFREE_FSM_IDLE			 (0L<<29)
+#define BNX2_RPM_DEBUG3_MBFREE_FSM_WAIT_ACK		 (1L<<29)
+#define BNX2_RPM_DEBUG3_MBALLOC_FSM			 (1L<<30)
+#define BNX2_RPM_DEBUG3_MBALLOC_FSM_ET_MBUF		 (0x0L<<30)
+#define BNX2_RPM_DEBUG3_MBALLOC_FSM_IVE_MBUF		 (0x1L<<30)
+#define BNX2_RPM_DEBUG3_CCODE_EOF_ERROR			 (1L<<31)
+
+#define BNX2_RPM_DEBUG4					0x00001994
+#define BNX2_RPM_DEBUG4_DFSM_MBUF_CLUSTER		 (0x1ffffffL<<0)
+#define BNX2_RPM_DEBUG4_DFIFO_CUR_CCODE			 (0x7L<<25)
+#define BNX2_RPM_DEBUG4_MBWRITE_FSM			 (0x7L<<28)
+#define BNX2_RPM_DEBUG4_DFIFO_EMPTY			 (1L<<31)
+
+#define BNX2_RPM_DEBUG5					0x00001998
+#define BNX2_RPM_DEBUG5_RDROP_WPTR			 (0x1fL<<0)
+#define BNX2_RPM_DEBUG5_RDROP_ACPI_RPTR			 (0x1fL<<5)
+#define BNX2_RPM_DEBUG5_RDROP_MC_RPTR			 (0x1fL<<10)
+#define BNX2_RPM_DEBUG5_RDROP_RC_RPTR			 (0x1fL<<15)
+#define BNX2_RPM_DEBUG5_RDROP_ACPI_EMPTY		 (1L<<20)
+#define BNX2_RPM_DEBUG5_RDROP_MC_EMPTY			 (1L<<21)
+#define BNX2_RPM_DEBUG5_RDROP_AEOF_VEC_AT_RDROP_MC_RPTR	 (1L<<22)
+#define BNX2_RPM_DEBUG5_HOLDREG_WOL_DROP_INT		 (1L<<23)
+#define BNX2_RPM_DEBUG5_HOLDREG_DISCARD			 (1L<<24)
+#define BNX2_RPM_DEBUG5_HOLDREG_MBUF_NOTAVAIL		 (1L<<25)
+#define BNX2_RPM_DEBUG5_HOLDREG_MC_EMPTY		 (1L<<26)
+#define BNX2_RPM_DEBUG5_HOLDREG_RC_EMPTY		 (1L<<27)
+#define BNX2_RPM_DEBUG5_HOLDREG_FC_EMPTY		 (1L<<28)
+#define BNX2_RPM_DEBUG5_HOLDREG_ACPI_EMPTY		 (1L<<29)
+#define BNX2_RPM_DEBUG5_HOLDREG_FULL_T			 (1L<<30)
+#define BNX2_RPM_DEBUG5_HOLDREG_RD			 (1L<<31)
+
+#define BNX2_RPM_DEBUG6					0x0000199c
+#define BNX2_RPM_DEBUG6_ACPI_VEC			 (0xffffL<<0)
+#define BNX2_RPM_DEBUG6_VEC				 (0xffffL<<16)
+
+#define BNX2_RPM_DEBUG7					0x000019a0
+#define BNX2_RPM_DEBUG7_RPM_DBG7_LAST_CRC		 (0xffffffffL<<0)
+
+#define BNX2_RPM_DEBUG8					0x000019a4
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM			 (0xfL<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_IDLE		 (0L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W1_ADDR		 (1L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W2_ADDR		 (2L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_W3_ADDR		 (3L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_SOF_WAIT_THBUF	 (4L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W3_DATA		 (5L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W0_ADDR		 (6L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W1_ADDR		 (7L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W2_ADDR		 (8L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_W3_ADDR		 (9L<<0)
+#define BNX2_RPM_DEBUG8_PS_ACPI_FSM_WAIT_THBUF		 (10L<<0)
+#define BNX2_RPM_DEBUG8_COMPARE_AT_W0			 (1L<<4)
+#define BNX2_RPM_DEBUG8_COMPARE_AT_W3_DATA		 (1L<<5)
+#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_WAIT		 (1L<<6)
+#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_W3		 (1L<<7)
+#define BNX2_RPM_DEBUG8_COMPARE_AT_SOF_W2		 (1L<<8)
+#define BNX2_RPM_DEBUG8_EOF_W_LTEQ6_VLDBYTES		 (1L<<9)
+#define BNX2_RPM_DEBUG8_EOF_W_LTEQ4_VLDBYTES		 (1L<<10)
+#define BNX2_RPM_DEBUG8_NXT_EOF_W_12_VLDBYTES		 (1L<<11)
+#define BNX2_RPM_DEBUG8_EOF_DET				 (1L<<12)
+#define BNX2_RPM_DEBUG8_SOF_DET				 (1L<<13)
+#define BNX2_RPM_DEBUG8_WAIT_4_SOF			 (1L<<14)
+#define BNX2_RPM_DEBUG8_ALL_DONE			 (1L<<15)
+#define BNX2_RPM_DEBUG8_THBUF_ADDR			 (0x7fL<<16)
+#define BNX2_RPM_DEBUG8_BYTE_CTR			 (0xffL<<24)
+
+#define BNX2_RPM_DEBUG9					0x000019a8
+#define BNX2_RPM_DEBUG9_OUTFIFO_COUNT			 (0x7L<<0)
+#define BNX2_RPM_DEBUG9_RDE_ACPI_RDY			 (1L<<3)
+#define BNX2_RPM_DEBUG9_VLD_RD_ENTRY_CT			 (0x7L<<4)
+#define BNX2_RPM_DEBUG9_OUTFIFO_OVERRUN_OCCURRED	 (1L<<28)
+#define BNX2_RPM_DEBUG9_INFIFO_OVERRUN_OCCURRED		 (1L<<29)
+#define BNX2_RPM_DEBUG9_ACPI_MATCH_INT			 (1L<<30)
+#define BNX2_RPM_DEBUG9_ACPI_ENABLE_SYN			 (1L<<31)
+#define BNX2_RPM_DEBUG9_BEMEM_R_XI			 (0x1fL<<0)
+#define BNX2_RPM_DEBUG9_EO_XI				 (1L<<5)
+#define BNX2_RPM_DEBUG9_AEOF_DE_XI			 (1L<<6)
+#define BNX2_RPM_DEBUG9_SO_XI				 (1L<<7)
+#define BNX2_RPM_DEBUG9_WD64_CT_XI			 (0x1fL<<8)
+#define BNX2_RPM_DEBUG9_EOF_VLDBYTE_XI			 (0x7L<<13)
+#define BNX2_RPM_DEBUG9_ACPI_RDE_PAT_ID_XI		 (0xfL<<16)
+#define BNX2_RPM_DEBUG9_CALCRC_RESULT_XI		 (0x3ffL<<20)
+#define BNX2_RPM_DEBUG9_DATA_IN_VL_XI			 (1L<<30)
+#define BNX2_RPM_DEBUG9_CALCRC_BUFFER_VLD_XI		 (1L<<31)
+
+#define BNX2_RPM_ACPI_DBG_BUF_W00			0x000019c0
+#define BNX2_RPM_ACPI_DBG_BUF_W01			0x000019c4
+#define BNX2_RPM_ACPI_DBG_BUF_W02			0x000019c8
+#define BNX2_RPM_ACPI_DBG_BUF_W03			0x000019cc
+#define BNX2_RPM_ACPI_DBG_BUF_W10			0x000019d0
+#define BNX2_RPM_ACPI_DBG_BUF_W11			0x000019d4
+#define BNX2_RPM_ACPI_DBG_BUF_W12			0x000019d8
+#define BNX2_RPM_ACPI_DBG_BUF_W13			0x000019dc
+#define BNX2_RPM_ACPI_DBG_BUF_W20			0x000019e0
+#define BNX2_RPM_ACPI_DBG_BUF_W21			0x000019e4
+#define BNX2_RPM_ACPI_DBG_BUF_W22			0x000019e8
+#define BNX2_RPM_ACPI_DBG_BUF_W23			0x000019ec
+#define BNX2_RPM_ACPI_DBG_BUF_W30			0x000019f0
+#define BNX2_RPM_ACPI_DBG_BUF_W31			0x000019f4
+#define BNX2_RPM_ACPI_DBG_BUF_W32			0x000019f8
+#define BNX2_RPM_ACPI_DBG_BUF_W33			0x000019fc
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL			0x00001a00
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_BYTE_ADDRESS	 (0xffffL<<0)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_DEBUGRD		 (1L<<28)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_MODE		 (1L<<29)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_INIT		 (1L<<30)
+#define BNX2_RPM_ACPI_BYTE_ENABLE_CTRL_WR		 (1L<<31)
+
+#define BNX2_RPM_ACPI_PATTERN_CTRL			0x00001a04
+#define BNX2_RPM_ACPI_PATTERN_CTRL_PATTERN_ID		 (0xfL<<0)
+#define BNX2_RPM_ACPI_PATTERN_CTRL_CRC_SM_CLR		 (1L<<30)
+#define BNX2_RPM_ACPI_PATTERN_CTRL_WR			 (1L<<31)
+
+#define BNX2_RPM_ACPI_DATA				0x00001a08
+#define BNX2_RPM_ACPI_DATA_PATTERN_BE			 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_LEN0			0x00001a0c
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN3		 (0xffL<<0)
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN2		 (0xffL<<8)
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN1		 (0xffL<<16)
+#define BNX2_RPM_ACPI_PATTERN_LEN0_PATTERN_LEN0		 (0xffL<<24)
+
+#define BNX2_RPM_ACPI_PATTERN_LEN1			0x00001a10
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN7		 (0xffL<<0)
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN6		 (0xffL<<8)
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN5		 (0xffL<<16)
+#define BNX2_RPM_ACPI_PATTERN_LEN1_PATTERN_LEN4		 (0xffL<<24)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC0			0x00001a18
+#define BNX2_RPM_ACPI_PATTERN_CRC0_PATTERN_CRC0		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC1			0x00001a1c
+#define BNX2_RPM_ACPI_PATTERN_CRC1_PATTERN_CRC1		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC2			0x00001a20
+#define BNX2_RPM_ACPI_PATTERN_CRC2_PATTERN_CRC2		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC3			0x00001a24
+#define BNX2_RPM_ACPI_PATTERN_CRC3_PATTERN_CRC3		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC4			0x00001a28
+#define BNX2_RPM_ACPI_PATTERN_CRC4_PATTERN_CRC4		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC5			0x00001a2c
+#define BNX2_RPM_ACPI_PATTERN_CRC5_PATTERN_CRC5		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC6			0x00001a30
+#define BNX2_RPM_ACPI_PATTERN_CRC6_PATTERN_CRC6		 (0xffffffffL<<0)
+
+#define BNX2_RPM_ACPI_PATTERN_CRC7			0x00001a34
+#define BNX2_RPM_ACPI_PATTERN_CRC7_PATTERN_CRC7		 (0xffffffffL<<0)
+
+
+/*
+ *  rlup_reg definition
+ *  offset: 0x2000
+ */
+#define BNX2_RLUP_RSS_CONFIG				0x0000201c
+#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_XI		 (0x3L<<0)
+#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_OFF_XI	 (0L<<0)
+#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI	 (1L<<0)
+#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_IP_ONLY_XI	 (2L<<0)
+#define BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_RES_XI	 (3L<<0)
+#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_XI		 (0x3L<<2)
+#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_OFF_XI	 (0L<<2)
+#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI	 (1L<<2)
+#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_IP_ONLY_XI	 (2L<<2)
+#define BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_RES_XI	 (3L<<2)
+
+#define BNX2_RLUP_RSS_COMMAND				0x00002048
+#define BNX2_RLUP_RSS_COMMAND_RSS_IND_TABLE_ADDR	 (0xfUL<<0)
+#define BNX2_RLUP_RSS_COMMAND_RSS_WRITE_MASK		 (0xffUL<<4)
+#define BNX2_RLUP_RSS_COMMAND_WRITE			 (1UL<<12)
+#define BNX2_RLUP_RSS_COMMAND_READ			 (1UL<<13)
+#define BNX2_RLUP_RSS_COMMAND_HASH_MASK			 (0x7UL<<14)
+
+#define BNX2_RLUP_RSS_DATA				0x0000204c
+
+
+/*
+ *  rbuf_reg definition
+ *  offset: 0x200000
+ */
+#define BNX2_RBUF_COMMAND				0x00200000
+#define BNX2_RBUF_COMMAND_ENABLED			 (1L<<0)
+#define BNX2_RBUF_COMMAND_FREE_INIT			 (1L<<1)
+#define BNX2_RBUF_COMMAND_RAM_INIT			 (1L<<2)
+#define BNX2_RBUF_COMMAND_PKT_OFFSET_OVFL		 (1L<<3)
+#define BNX2_RBUF_COMMAND_OVER_FREE			 (1L<<4)
+#define BNX2_RBUF_COMMAND_ALLOC_REQ			 (1L<<5)
+#define BNX2_RBUF_COMMAND_EN_PRI_CHNGE_TE		 (1L<<6)
+#define BNX2_RBUF_COMMAND_CU_ISOLATE_XI			 (1L<<5)
+#define BNX2_RBUF_COMMAND_EN_PRI_CHANGE_XI		 (1L<<6)
+#define BNX2_RBUF_COMMAND_GRC_ENDIAN_CONV_DIS_XI	 (1L<<7)
+
+#define BNX2_RBUF_STATUS1				0x00200004
+#define BNX2_RBUF_STATUS1_FREE_COUNT			 (0x3ffL<<0)
+
+#define BNX2_RBUF_STATUS2				0x00200008
+#define BNX2_RBUF_STATUS2_FREE_TAIL			 (0x1ffL<<0)
+#define BNX2_RBUF_STATUS2_FREE_HEAD			 (0x1ffL<<16)
+
+#define BNX2_RBUF_CONFIG				0x0020000c
+#define BNX2_RBUF_CONFIG_XOFF_TRIP			 (0x3ffL<<0)
+#define BNX2_RBUF_CONFIG_XOFF_TRIP_VAL(mtu)		 \
+	((((mtu) - 1500) * 31 / 1000) + 54)
+#define BNX2_RBUF_CONFIG_XON_TRIP			 (0x3ffL<<16)
+#define BNX2_RBUF_CONFIG_XON_TRIP_VAL(mtu)		 \
+	((((mtu) - 1500) * 39 / 1000) + 66)
+#define BNX2_RBUF_CONFIG_VAL(mtu)			 \
+	(BNX2_RBUF_CONFIG_XOFF_TRIP_VAL(mtu) |		 \
+	(BNX2_RBUF_CONFIG_XON_TRIP_VAL(mtu) << 16))
+
+#define BNX2_RBUF_FW_BUF_ALLOC				0x00200010
+#define BNX2_RBUF_FW_BUF_ALLOC_VALUE			 (0x1ffL<<7)
+#define BNX2_RBUF_FW_BUF_ALLOC_TYPE			 (1L<<16)
+#define BNX2_RBUF_FW_BUF_ALLOC_ALLOC_REQ		 (1L<<31)
+
+#define BNX2_RBUF_FW_BUF_FREE				0x00200014
+#define BNX2_RBUF_FW_BUF_FREE_COUNT			 (0x7fL<<0)
+#define BNX2_RBUF_FW_BUF_FREE_TAIL			 (0x1ffL<<7)
+#define BNX2_RBUF_FW_BUF_FREE_HEAD			 (0x1ffL<<16)
+#define BNX2_RBUF_FW_BUF_FREE_TYPE			 (1L<<25)
+#define BNX2_RBUF_FW_BUF_FREE_FREE_REQ			 (1L<<31)
+
+#define BNX2_RBUF_FW_BUF_SEL				0x00200018
+#define BNX2_RBUF_FW_BUF_SEL_COUNT			 (0x7fL<<0)
+#define BNX2_RBUF_FW_BUF_SEL_TAIL			 (0x1ffL<<7)
+#define BNX2_RBUF_FW_BUF_SEL_HEAD			 (0x1ffL<<16)
+#define BNX2_RBUF_FW_BUF_SEL_SEL_REQ			 (1L<<31)
+
+#define BNX2_RBUF_CONFIG2				0x0020001c
+#define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP			 (0x3ffL<<0)
+#define BNX2_RBUF_CONFIG2_MAC_DROP_TRIP_VAL(mtu)	 \
+	((((mtu) - 1500) * 4 / 1000) + 5)
+#define BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP			 (0x3ffL<<16)
+#define BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP_VAL(mtu)	 \
+	((((mtu) - 1500) * 2 / 100) + 30)
+#define BNX2_RBUF_CONFIG2_VAL(mtu)			 \
+	(BNX2_RBUF_CONFIG2_MAC_DROP_TRIP_VAL(mtu) |	 \
+	(BNX2_RBUF_CONFIG2_MAC_KEEP_TRIP_VAL(mtu) << 16))
+
+#define BNX2_RBUF_CONFIG3				0x00200020
+#define BNX2_RBUF_CONFIG3_CU_DROP_TRIP			 (0x3ffL<<0)
+#define BNX2_RBUF_CONFIG3_CU_DROP_TRIP_VAL(mtu)		 \
+	((((mtu) - 1500) * 12 / 1000) + 18)
+#define BNX2_RBUF_CONFIG3_CU_KEEP_TRIP			 (0x3ffL<<16)
+#define BNX2_RBUF_CONFIG3_CU_KEEP_TRIP_VAL(mtu)		 \
+	((((mtu) - 1500) * 2 / 100) + 30)
+#define BNX2_RBUF_CONFIG3_VAL(mtu)			 \
+	(BNX2_RBUF_CONFIG3_CU_DROP_TRIP_VAL(mtu) |	 \
+	(BNX2_RBUF_CONFIG3_CU_KEEP_TRIP_VAL(mtu) << 16))
+
+#define BNX2_RBUF_PKT_DATA				0x00208000
+#define BNX2_RBUF_CLIST_DATA				0x00210000
+#define BNX2_RBUF_BUF_DATA				0x00220000
+
+
+/*
+ *  rv2p_reg definition
+ *  offset: 0x2800
+ */
+#define BNX2_RV2P_COMMAND				0x00002800
+#define BNX2_RV2P_COMMAND_ENABLED			 (1L<<0)
+#define BNX2_RV2P_COMMAND_PROC1_INTRPT			 (1L<<1)
+#define BNX2_RV2P_COMMAND_PROC2_INTRPT			 (1L<<2)
+#define BNX2_RV2P_COMMAND_ABORT0			 (1L<<4)
+#define BNX2_RV2P_COMMAND_ABORT1			 (1L<<5)
+#define BNX2_RV2P_COMMAND_ABORT2			 (1L<<6)
+#define BNX2_RV2P_COMMAND_ABORT3			 (1L<<7)
+#define BNX2_RV2P_COMMAND_ABORT4			 (1L<<8)
+#define BNX2_RV2P_COMMAND_ABORT5			 (1L<<9)
+#define BNX2_RV2P_COMMAND_PROC1_RESET			 (1L<<16)
+#define BNX2_RV2P_COMMAND_PROC2_RESET			 (1L<<17)
+#define BNX2_RV2P_COMMAND_CTXIF_RESET			 (1L<<18)
+
+#define BNX2_RV2P_STATUS				0x00002804
+#define BNX2_RV2P_STATUS_ALWAYS_0			 (1L<<0)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT0_CNT		 (1L<<8)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT1_CNT		 (1L<<9)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT2_CNT		 (1L<<10)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT3_CNT		 (1L<<11)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT4_CNT		 (1L<<12)
+#define BNX2_RV2P_STATUS_RV2P_GEN_STAT5_CNT		 (1L<<13)
+
+#define BNX2_RV2P_CONFIG				0x00002808
+#define BNX2_RV2P_CONFIG_STALL_PROC1			 (1L<<0)
+#define BNX2_RV2P_CONFIG_STALL_PROC2			 (1L<<1)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT0		 (1L<<8)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT1		 (1L<<9)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT2		 (1L<<10)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT3		 (1L<<11)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT4		 (1L<<12)
+#define BNX2_RV2P_CONFIG_PROC1_STALL_ON_ABORT5		 (1L<<13)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT0		 (1L<<16)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT1		 (1L<<17)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT2		 (1L<<18)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT3		 (1L<<19)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT4		 (1L<<20)
+#define BNX2_RV2P_CONFIG_PROC2_STALL_ON_ABORT5		 (1L<<21)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE			 (0xfL<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_256			 (0L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_512			 (1L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_1K			 (2L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_2K			 (3L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_4K			 (4L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_8K			 (5L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_16K			 (6L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_32K			 (7L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_64K			 (8L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_128K			 (9L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_256K			 (10L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_512K			 (11L<<24)
+#define BNX2_RV2P_CONFIG_PAGE_SIZE_1M			 (12L<<24)
+
+#define BNX2_RV2P_GEN_BFR_ADDR_0			0x00002810
+#define BNX2_RV2P_GEN_BFR_ADDR_0_VALUE			 (0xffffL<<16)
+
+#define BNX2_RV2P_GEN_BFR_ADDR_1			0x00002814
+#define BNX2_RV2P_GEN_BFR_ADDR_1_VALUE			 (0xffffL<<16)
+
+#define BNX2_RV2P_GEN_BFR_ADDR_2			0x00002818
+#define BNX2_RV2P_GEN_BFR_ADDR_2_VALUE			 (0xffffL<<16)
+
+#define BNX2_RV2P_GEN_BFR_ADDR_3			0x0000281c
+#define BNX2_RV2P_GEN_BFR_ADDR_3_VALUE			 (0xffffL<<16)
+
+#define BNX2_RV2P_INSTR_HIGH				0x00002830
+#define BNX2_RV2P_INSTR_HIGH_HIGH			 (0x1fL<<0)
+
+#define BNX2_RV2P_INSTR_LOW				0x00002834
+#define BNX2_RV2P_INSTR_LOW_LOW				 (0xffffffffL<<0)
+
+#define BNX2_RV2P_PROC1_ADDR_CMD			0x00002838
+#define BNX2_RV2P_PROC1_ADDR_CMD_ADD			 (0x3ffL<<0)
+#define BNX2_RV2P_PROC1_ADDR_CMD_RDWR			 (1L<<31)
+
+#define BNX2_RV2P_PROC2_ADDR_CMD			0x0000283c
+#define BNX2_RV2P_PROC2_ADDR_CMD_ADD			 (0x3ffL<<0)
+#define BNX2_RV2P_PROC2_ADDR_CMD_RDWR			 (1L<<31)
+
+#define BNX2_RV2P_PROC1_GRC_DEBUG			0x00002840
+#define BNX2_RV2P_PROC2_GRC_DEBUG			0x00002844
+#define BNX2_RV2P_GRC_PROC_DEBUG			0x00002848
+#define BNX2_RV2P_DEBUG_VECT_PEEK			0x0000284c
+#define BNX2_RV2P_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_RV2P_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_RV2P_DEBUG_VECT_PEEK_1_SEL			 (0xfL<<12)
+#define BNX2_RV2P_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_RV2P_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_RV2P_DEBUG_VECT_PEEK_2_SEL			 (0xfL<<28)
+
+#define BNX2_RV2P_MPFE_PFE_CTL				0x00002afc
+#define BNX2_RV2P_MPFE_PFE_CTL_INC_USAGE_CNT		 (1L<<0)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE			 (0xfL<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_0		 (0L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_1		 (1L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_2		 (2L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_3		 (3L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_4		 (4L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_5		 (5L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_6		 (6L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_7		 (7L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_8		 (8L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_9		 (9L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_10		 (10L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_11		 (11L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_12		 (12L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_13		 (13L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_14		 (14L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_SIZE_15		 (15L<<4)
+#define BNX2_RV2P_MPFE_PFE_CTL_PFE_COUNT		 (0xfL<<12)
+#define BNX2_RV2P_MPFE_PFE_CTL_OFFSET			 (0x1ffL<<16)
+
+#define BNX2_RV2P_RV2PPQ				0x00002b40
+#define BNX2_RV2P_PFTQ_CMD				0x00002b78
+#define BNX2_RV2P_PFTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_RV2P_PFTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_RV2P_PFTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_RV2P_PFTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_RV2P_PFTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_RV2P_PFTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_RV2P_PFTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_RV2P_PFTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_RV2P_PFTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_RV2P_PFTQ_CMD_POP				 (1L<<30)
+#define BNX2_RV2P_PFTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_RV2P_PFTQ_CTL				0x00002b7c
+#define BNX2_RV2P_PFTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_RV2P_PFTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_RV2P_PFTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_RV2P_PFTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_RV2P_PFTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_RV2P_RV2PTQ				0x00002b80
+#define BNX2_RV2P_TFTQ_CMD				0x00002bb8
+#define BNX2_RV2P_TFTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_RV2P_TFTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_RV2P_TFTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_RV2P_TFTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_RV2P_TFTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_RV2P_TFTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_RV2P_TFTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_RV2P_TFTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_RV2P_TFTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_RV2P_TFTQ_CMD_POP				 (1L<<30)
+#define BNX2_RV2P_TFTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_RV2P_TFTQ_CTL				0x00002bbc
+#define BNX2_RV2P_TFTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_RV2P_TFTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_RV2P_TFTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_RV2P_TFTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_RV2P_TFTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_RV2P_RV2PMQ				0x00002bc0
+#define BNX2_RV2P_MFTQ_CMD				0x00002bf8
+#define BNX2_RV2P_MFTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_RV2P_MFTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_RV2P_MFTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_RV2P_MFTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_RV2P_MFTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_RV2P_MFTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_RV2P_MFTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_RV2P_MFTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_RV2P_MFTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_RV2P_MFTQ_CMD_POP				 (1L<<30)
+#define BNX2_RV2P_MFTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_RV2P_MFTQ_CTL				0x00002bfc
+#define BNX2_RV2P_MFTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_RV2P_MFTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_RV2P_MFTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_RV2P_MFTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_RV2P_MFTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+
+
+/*
+ *  mq_reg definition
+ *  offset: 0x3c00
+ */
+#define BNX2_MQ_COMMAND					0x00003c00
+#define BNX2_MQ_COMMAND_ENABLED				 (1L<<0)
+#define BNX2_MQ_COMMAND_INIT				 (1L<<1)
+#define BNX2_MQ_COMMAND_OVERFLOW			 (1L<<4)
+#define BNX2_MQ_COMMAND_WR_ERROR			 (1L<<5)
+#define BNX2_MQ_COMMAND_RD_ERROR			 (1L<<6)
+#define BNX2_MQ_COMMAND_IDB_CFG_ERROR			 (1L<<7)
+#define BNX2_MQ_COMMAND_IDB_OVERFLOW			 (1L<<10)
+#define BNX2_MQ_COMMAND_NO_BIN_ERROR			 (1L<<11)
+#define BNX2_MQ_COMMAND_NO_MAP_ERROR			 (1L<<12)
+
+#define BNX2_MQ_STATUS					0x00003c04
+#define BNX2_MQ_STATUS_CTX_ACCESS_STAT			 (1L<<16)
+#define BNX2_MQ_STATUS_CTX_ACCESS64_STAT		 (1L<<17)
+#define BNX2_MQ_STATUS_PCI_STALL_STAT			 (1L<<18)
+#define BNX2_MQ_STATUS_IDB_OFLOW_STAT			 (1L<<19)
+
+#define BNX2_MQ_CONFIG					0x00003c08
+#define BNX2_MQ_CONFIG_TX_HIGH_PRI			 (1L<<0)
+#define BNX2_MQ_CONFIG_HALT_DIS				 (1L<<1)
+#define BNX2_MQ_CONFIG_BIN_MQ_MODE			 (1L<<2)
+#define BNX2_MQ_CONFIG_DIS_IDB_DROP			 (1L<<3)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE			 (0x7L<<4)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256		 (0L<<4)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_512		 (1L<<4)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_1K		 (2L<<4)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_2K		 (3L<<4)
+#define BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_4K		 (4L<<4)
+#define BNX2_MQ_CONFIG_MAX_DEPTH			 (0x7fL<<8)
+#define BNX2_MQ_CONFIG_CUR_DEPTH			 (0x7fL<<20)
+
+#define BNX2_MQ_ENQUEUE1				0x00003c0c
+#define BNX2_MQ_ENQUEUE1_OFFSET				 (0x3fL<<2)
+#define BNX2_MQ_ENQUEUE1_CID				 (0x3fffL<<8)
+#define BNX2_MQ_ENQUEUE1_BYTE_MASK			 (0xfL<<24)
+#define BNX2_MQ_ENQUEUE1_KNL_MODE			 (1L<<28)
+
+#define BNX2_MQ_ENQUEUE2				0x00003c10
+#define BNX2_MQ_BAD_WR_ADDR				0x00003c14
+#define BNX2_MQ_BAD_RD_ADDR				0x00003c18
+#define BNX2_MQ_KNL_BYP_WIND_START			0x00003c1c
+#define BNX2_MQ_KNL_BYP_WIND_START_VALUE		 (0xfffffL<<12)
+
+#define BNX2_MQ_KNL_WIND_END				0x00003c20
+#define BNX2_MQ_KNL_WIND_END_VALUE			 (0xffffffL<<8)
+
+#define BNX2_MQ_KNL_WRITE_MASK1				0x00003c24
+#define BNX2_MQ_KNL_TX_MASK1				0x00003c28
+#define BNX2_MQ_KNL_CMD_MASK1				0x00003c2c
+#define BNX2_MQ_KNL_COND_ENQUEUE_MASK1			0x00003c30
+#define BNX2_MQ_KNL_RX_V2P_MASK1			0x00003c34
+#define BNX2_MQ_KNL_WRITE_MASK2				0x00003c38
+#define BNX2_MQ_KNL_TX_MASK2				0x00003c3c
+#define BNX2_MQ_KNL_CMD_MASK2				0x00003c40
+#define BNX2_MQ_KNL_COND_ENQUEUE_MASK2			0x00003c44
+#define BNX2_MQ_KNL_RX_V2P_MASK2			0x00003c48
+#define BNX2_MQ_KNL_BYP_WRITE_MASK1			0x00003c4c
+#define BNX2_MQ_KNL_BYP_TX_MASK1			0x00003c50
+#define BNX2_MQ_KNL_BYP_CMD_MASK1			0x00003c54
+#define BNX2_MQ_KNL_BYP_COND_ENQUEUE_MASK1		0x00003c58
+#define BNX2_MQ_KNL_BYP_RX_V2P_MASK1			0x00003c5c
+#define BNX2_MQ_KNL_BYP_WRITE_MASK2			0x00003c60
+#define BNX2_MQ_KNL_BYP_TX_MASK2			0x00003c64
+#define BNX2_MQ_KNL_BYP_CMD_MASK2			0x00003c68
+#define BNX2_MQ_KNL_BYP_COND_ENQUEUE_MASK2		0x00003c6c
+#define BNX2_MQ_KNL_BYP_RX_V2P_MASK2			0x00003c70
+#define BNX2_MQ_MEM_WR_ADDR				0x00003c74
+#define BNX2_MQ_MEM_WR_ADDR_VALUE			 (0x3fL<<0)
+
+#define BNX2_MQ_MEM_WR_DATA0				0x00003c78
+#define BNX2_MQ_MEM_WR_DATA0_VALUE			 (0xffffffffL<<0)
+
+#define BNX2_MQ_MEM_WR_DATA1				0x00003c7c
+#define BNX2_MQ_MEM_WR_DATA1_VALUE			 (0xffffffffL<<0)
+
+#define BNX2_MQ_MEM_WR_DATA2				0x00003c80
+#define BNX2_MQ_MEM_WR_DATA2_VALUE			 (0x3fffffffL<<0)
+#define BNX2_MQ_MEM_WR_DATA2_VALUE_XI			 (0x7fffffffL<<0)
+
+#define BNX2_MQ_MEM_RD_ADDR				0x00003c84
+#define BNX2_MQ_MEM_RD_ADDR_VALUE			 (0x3fL<<0)
+
+#define BNX2_MQ_MEM_RD_DATA0				0x00003c88
+#define BNX2_MQ_MEM_RD_DATA0_VALUE			 (0xffffffffL<<0)
+
+#define BNX2_MQ_MEM_RD_DATA1				0x00003c8c
+#define BNX2_MQ_MEM_RD_DATA1_VALUE			 (0xffffffffL<<0)
+
+#define BNX2_MQ_MEM_RD_DATA2				0x00003c90
+#define BNX2_MQ_MEM_RD_DATA2_VALUE			 (0x3fffffffL<<0)
+#define BNX2_MQ_MEM_RD_DATA2_VALUE_XI			 (0x7fffffffL<<0)
+
+#define BNX2_MQ_MAP_L2_3				0x00003d2c
+#define BNX2_MQ_MAP_L2_3_MQ_OFFSET			 (0xffL<<0)
+#define BNX2_MQ_MAP_L2_3_SZ				 (0x3L<<8)
+#define BNX2_MQ_MAP_L2_3_CTX_OFFSET			 (0x2ffL<<10)
+#define BNX2_MQ_MAP_L2_3_BIN_OFFSET			 (0x7L<<23)
+#define BNX2_MQ_MAP_L2_3_ARM				 (0x3L<<26)
+#define BNX2_MQ_MAP_L2_3_ENA				 (0x1L<<31)
+#define BNX2_MQ_MAP_L2_3_DEFAULT			 0x82004646
+
+#define BNX2_MQ_MAP_L2_5				0x00003d34
+#define BNX2_MQ_MAP_L2_5_ARM				 (0x3L<<26)
+
+/*
+ *  tsch_reg definition
+ *  offset: 0x4c00
+ */
+#define BNX2_TSCH_TSS_CFG				0x00004c1c
+#define BNX2_TSCH_TSS_CFG_TSS_START_CID			 (0x7ffL<<8)
+#define BNX2_TSCH_TSS_CFG_NUM_OF_TSS_CON		 (0xfL<<24)
+
+
+
+/*
+ *  tbdr_reg definition
+ *  offset: 0x5000
+ */
+#define BNX2_TBDR_COMMAND				0x00005000
+#define BNX2_TBDR_COMMAND_ENABLE			 (1L<<0)
+#define BNX2_TBDR_COMMAND_SOFT_RST			 (1L<<1)
+#define BNX2_TBDR_COMMAND_MSTR_ABORT			 (1L<<4)
+
+#define BNX2_TBDR_STATUS				0x00005004
+#define BNX2_TBDR_STATUS_DMA_WAIT			 (1L<<0)
+#define BNX2_TBDR_STATUS_FTQ_WAIT			 (1L<<1)
+#define BNX2_TBDR_STATUS_FIFO_OVERFLOW			 (1L<<2)
+#define BNX2_TBDR_STATUS_FIFO_UNDERFLOW			 (1L<<3)
+#define BNX2_TBDR_STATUS_SEARCHMISS_ERROR		 (1L<<4)
+#define BNX2_TBDR_STATUS_FTQ_ENTRY_CNT			 (1L<<5)
+#define BNX2_TBDR_STATUS_BURST_CNT			 (1L<<6)
+
+#define BNX2_TBDR_CONFIG				0x00005008
+#define BNX2_TBDR_CONFIG_MAX_BDS			 (0xffL<<0)
+#define BNX2_TBDR_CONFIG_SWAP_MODE			 (1L<<8)
+#define BNX2_TBDR_CONFIG_PRIORITY			 (1L<<9)
+#define BNX2_TBDR_CONFIG_CACHE_NEXT_PAGE_PTRS		 (1L<<10)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE			 (0xfL<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_256			 (0L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_512			 (1L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_1K			 (2L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_2K			 (3L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_4K			 (4L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_8K			 (5L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_16K			 (6L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_32K			 (7L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_64K			 (8L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_128K			 (9L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_256K			 (10L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_512K			 (11L<<24)
+#define BNX2_TBDR_CONFIG_PAGE_SIZE_1M			 (12L<<24)
+
+#define BNX2_TBDR_DEBUG_VECT_PEEK			0x0000500c
+#define BNX2_TBDR_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_TBDR_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_TBDR_DEBUG_VECT_PEEK_1_SEL			 (0xfL<<12)
+#define BNX2_TBDR_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_TBDR_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_TBDR_DEBUG_VECT_PEEK_2_SEL			 (0xfL<<28)
+
+#define BNX2_TBDR_CKSUM_ERROR_STATUS			0x00005010
+#define BNX2_TBDR_CKSUM_ERROR_STATUS_CALCULATED		 (0xffffL<<0)
+#define BNX2_TBDR_CKSUM_ERROR_STATUS_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_TBDR_TBDRQ					0x000053c0
+#define BNX2_TBDR_FTQ_CMD				0x000053f8
+#define BNX2_TBDR_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_TBDR_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_TBDR_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_TBDR_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_TBDR_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_TBDR_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_TBDR_FTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_TBDR_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_TBDR_FTQ_CMD_INTERVENE_CLR			 (1L<<29)
+#define BNX2_TBDR_FTQ_CMD_POP				 (1L<<30)
+#define BNX2_TBDR_FTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_TBDR_FTQ_CTL				0x000053fc
+#define BNX2_TBDR_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_TBDR_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_TBDR_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_TBDR_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_TBDR_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+
+
+/*
+ *  tdma_reg definition
+ *  offset: 0x5c00
+ */
+#define BNX2_TDMA_COMMAND				0x00005c00
+#define BNX2_TDMA_COMMAND_ENABLED			 (1L<<0)
+#define BNX2_TDMA_COMMAND_MASTER_ABORT			 (1L<<4)
+#define BNX2_TDMA_COMMAND_CS16_ERR			 (1L<<5)
+#define BNX2_TDMA_COMMAND_BAD_L2_LENGTH_ABORT		 (1L<<7)
+#define BNX2_TDMA_COMMAND_MASK_CS1			 (1L<<20)
+#define BNX2_TDMA_COMMAND_MASK_CS2			 (1L<<21)
+#define BNX2_TDMA_COMMAND_MASK_CS3			 (1L<<22)
+#define BNX2_TDMA_COMMAND_MASK_CS4			 (1L<<23)
+#define BNX2_TDMA_COMMAND_FORCE_ILOCK_CKERR		 (1L<<24)
+#define BNX2_TDMA_COMMAND_OFIFO_CLR			 (1L<<30)
+#define BNX2_TDMA_COMMAND_IFIFO_CLR			 (1L<<31)
+
+#define BNX2_TDMA_STATUS				0x00005c04
+#define BNX2_TDMA_STATUS_DMA_WAIT			 (1L<<0)
+#define BNX2_TDMA_STATUS_PAYLOAD_WAIT			 (1L<<1)
+#define BNX2_TDMA_STATUS_PATCH_FTQ_WAIT			 (1L<<2)
+#define BNX2_TDMA_STATUS_LOCK_WAIT			 (1L<<3)
+#define BNX2_TDMA_STATUS_FTQ_ENTRY_CNT			 (1L<<16)
+#define BNX2_TDMA_STATUS_BURST_CNT			 (1L<<17)
+#define BNX2_TDMA_STATUS_MAX_IFIFO_DEPTH		 (0x3fL<<20)
+#define BNX2_TDMA_STATUS_OFIFO_OVERFLOW			 (1L<<30)
+#define BNX2_TDMA_STATUS_IFIFO_OVERFLOW			 (1L<<31)
+
+#define BNX2_TDMA_CONFIG				0x00005c08
+#define BNX2_TDMA_CONFIG_ONE_DMA			 (1L<<0)
+#define BNX2_TDMA_CONFIG_ONE_RECORD			 (1L<<1)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN			 (0x3L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_0			 (0L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_1			 (1L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_2			 (2L<<2)
+#define BNX2_TDMA_CONFIG_NUM_DMA_CHAN_3			 (3L<<2)
+#define BNX2_TDMA_CONFIG_LIMIT_SZ			 (0xfL<<4)
+#define BNX2_TDMA_CONFIG_LIMIT_SZ_64			 (0L<<4)
+#define BNX2_TDMA_CONFIG_LIMIT_SZ_128			 (0x4L<<4)
+#define BNX2_TDMA_CONFIG_LIMIT_SZ_256			 (0x6L<<4)
+#define BNX2_TDMA_CONFIG_LIMIT_SZ_512			 (0x8L<<4)
+#define BNX2_TDMA_CONFIG_LINE_SZ			 (0xfL<<8)
+#define BNX2_TDMA_CONFIG_LINE_SZ_64			 (0L<<8)
+#define BNX2_TDMA_CONFIG_LINE_SZ_128			 (4L<<8)
+#define BNX2_TDMA_CONFIG_LINE_SZ_256			 (6L<<8)
+#define BNX2_TDMA_CONFIG_LINE_SZ_512			 (8L<<8)
+#define BNX2_TDMA_CONFIG_ALIGN_ENA			 (1L<<15)
+#define BNX2_TDMA_CONFIG_CHK_L2_BD			 (1L<<16)
+#define BNX2_TDMA_CONFIG_CMPL_ENTRY			 (1L<<17)
+#define BNX2_TDMA_CONFIG_OFIFO_CMP			 (1L<<19)
+#define BNX2_TDMA_CONFIG_OFIFO_CMP_3			 (0L<<19)
+#define BNX2_TDMA_CONFIG_OFIFO_CMP_2			 (1L<<19)
+#define BNX2_TDMA_CONFIG_FIFO_CMP			 (0xfL<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_XI			 (0x7L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_0_XI		 (0L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_4_XI		 (1L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_8_XI		 (2L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_16_XI		 (3L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_32_XI		 (4L<<20)
+#define BNX2_TDMA_CONFIG_IFIFO_DEPTH_64_XI		 (5L<<20)
+#define BNX2_TDMA_CONFIG_FIFO_CMP_EN_XI			 (1L<<23)
+#define BNX2_TDMA_CONFIG_BYTES_OST_XI			 (0x7L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_512_XI		 (0L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_1024_XI		 (1L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_2048_XI		 (2L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_4096_XI		 (3L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_8192_XI		 (4L<<24)
+#define BNX2_TDMA_CONFIG_BYTES_OST_16384_XI		 (5L<<24)
+#define BNX2_TDMA_CONFIG_HC_BYPASS_XI			 (1L<<27)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_XI			 (0x7L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_128_XI		 (0L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_256_XI		 (1L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_512_XI		 (2L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_1024_XI		 (3L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_2048_XI		 (4L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_4096_XI		 (5L<<28)
+#define BNX2_TDMA_CONFIG_LCL_MRRS_EN_XI			 (1L<<31)
+
+#define BNX2_TDMA_PAYLOAD_PROD				0x00005c0c
+#define BNX2_TDMA_PAYLOAD_PROD_VALUE			 (0x1fffL<<3)
+
+#define BNX2_TDMA_DBG_WATCHDOG				0x00005c10
+#define BNX2_TDMA_DBG_TRIGGER				0x00005c14
+#define BNX2_TDMA_DMAD_FSM				0x00005c80
+#define BNX2_TDMA_DMAD_FSM_BD_INVLD			 (1L<<0)
+#define BNX2_TDMA_DMAD_FSM_PUSH				 (0xfL<<4)
+#define BNX2_TDMA_DMAD_FSM_ARB_TBDC			 (0x3L<<8)
+#define BNX2_TDMA_DMAD_FSM_ARB_CTX			 (1L<<12)
+#define BNX2_TDMA_DMAD_FSM_DR_INTF			 (1L<<16)
+#define BNX2_TDMA_DMAD_FSM_DMAD				 (0x7L<<20)
+#define BNX2_TDMA_DMAD_FSM_BD				 (0xfL<<24)
+
+#define BNX2_TDMA_DMAD_STATUS				0x00005c84
+#define BNX2_TDMA_DMAD_STATUS_RHOLD_PUSH_ENTRY		 (0x3L<<0)
+#define BNX2_TDMA_DMAD_STATUS_RHOLD_DMAD_ENTRY		 (0x3L<<4)
+#define BNX2_TDMA_DMAD_STATUS_RHOLD_BD_ENTRY		 (0x3L<<8)
+#define BNX2_TDMA_DMAD_STATUS_IFTQ_ENUM			 (0xfL<<12)
+
+#define BNX2_TDMA_DR_INTF_FSM				0x00005c88
+#define BNX2_TDMA_DR_INTF_FSM_L2_COMP			 (0x3L<<0)
+#define BNX2_TDMA_DR_INTF_FSM_TPATQ			 (0x7L<<4)
+#define BNX2_TDMA_DR_INTF_FSM_TPBUF			 (0x3L<<8)
+#define BNX2_TDMA_DR_INTF_FSM_DR_BUF			 (0x7L<<12)
+#define BNX2_TDMA_DR_INTF_FSM_DMAD			 (0x7L<<16)
+
+#define BNX2_TDMA_DR_INTF_STATUS			0x00005c8c
+#define BNX2_TDMA_DR_INTF_STATUS_HOLE_PHASE		 (0x7L<<0)
+#define BNX2_TDMA_DR_INTF_STATUS_DATA_AVAIL		 (0x3L<<4)
+#define BNX2_TDMA_DR_INTF_STATUS_SHIFT_ADDR		 (0x7L<<8)
+#define BNX2_TDMA_DR_INTF_STATUS_NXT_PNTR		 (0xfL<<12)
+#define BNX2_TDMA_DR_INTF_STATUS_BYTE_COUNT		 (0x7L<<16)
+
+#define BNX2_TDMA_PUSH_FSM				0x00005c90
+#define BNX2_TDMA_BD_IF_DEBUG				0x00005c94
+#define BNX2_TDMA_DMAD_IF_DEBUG				0x00005c98
+#define BNX2_TDMA_CTX_IF_DEBUG				0x00005c9c
+#define BNX2_TDMA_TPBUF_IF_DEBUG			0x00005ca0
+#define BNX2_TDMA_DR_IF_DEBUG				0x00005ca4
+#define BNX2_TDMA_TPATQ_IF_DEBUG			0x00005ca8
+#define BNX2_TDMA_TDMA_ILOCK_CKSUM			0x00005cac
+#define BNX2_TDMA_TDMA_ILOCK_CKSUM_CALCULATED		 (0xffffL<<0)
+#define BNX2_TDMA_TDMA_ILOCK_CKSUM_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_TDMA_TDMA_PCIE_CKSUM			0x00005cb0
+#define BNX2_TDMA_TDMA_PCIE_CKSUM_CALCULATED		 (0xffffL<<0)
+#define BNX2_TDMA_TDMA_PCIE_CKSUM_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_TDMA_TDMAQ					0x00005fc0
+#define BNX2_TDMA_FTQ_CMD				0x00005ff8
+#define BNX2_TDMA_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_TDMA_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_TDMA_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_TDMA_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_TDMA_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_TDMA_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_TDMA_FTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_TDMA_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_TDMA_FTQ_CMD_INTERVENE_CLR			 (1L<<29)
+#define BNX2_TDMA_FTQ_CMD_POP				 (1L<<30)
+#define BNX2_TDMA_FTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_TDMA_FTQ_CTL				0x00005ffc
+#define BNX2_TDMA_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_TDMA_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_TDMA_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_TDMA_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_TDMA_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+
+
+/*
+ *  hc_reg definition
+ *  offset: 0x6800
+ */
+#define BNX2_HC_COMMAND					0x00006800
+#define BNX2_HC_COMMAND_ENABLE				 (1L<<0)
+#define BNX2_HC_COMMAND_SKIP_ABORT			 (1L<<4)
+#define BNX2_HC_COMMAND_COAL_NOW			 (1L<<16)
+#define BNX2_HC_COMMAND_COAL_NOW_WO_INT			 (1L<<17)
+#define BNX2_HC_COMMAND_STATS_NOW			 (1L<<18)
+#define BNX2_HC_COMMAND_FORCE_INT			 (0x3L<<19)
+#define BNX2_HC_COMMAND_FORCE_INT_NULL			 (0L<<19)
+#define BNX2_HC_COMMAND_FORCE_INT_HIGH			 (1L<<19)
+#define BNX2_HC_COMMAND_FORCE_INT_LOW			 (2L<<19)
+#define BNX2_HC_COMMAND_FORCE_INT_FREE			 (3L<<19)
+#define BNX2_HC_COMMAND_CLR_STAT_NOW			 (1L<<21)
+#define BNX2_HC_COMMAND_MAIN_PWR_INT			 (1L<<22)
+#define BNX2_HC_COMMAND_COAL_ON_NEXT_EVENT		 (1L<<27)
+
+#define BNX2_HC_STATUS					0x00006804
+#define BNX2_HC_STATUS_MASTER_ABORT			 (1L<<0)
+#define BNX2_HC_STATUS_PARITY_ERROR_STATE		 (1L<<1)
+#define BNX2_HC_STATUS_PCI_CLK_CNT_STAT			 (1L<<16)
+#define BNX2_HC_STATUS_CORE_CLK_CNT_STAT		 (1L<<17)
+#define BNX2_HC_STATUS_NUM_STATUS_BLOCKS_STAT		 (1L<<18)
+#define BNX2_HC_STATUS_NUM_INT_GEN_STAT			 (1L<<19)
+#define BNX2_HC_STATUS_NUM_INT_MBOX_WR_STAT		 (1L<<20)
+#define BNX2_HC_STATUS_CORE_CLKS_TO_HW_INTACK_STAT	 (1L<<23)
+#define BNX2_HC_STATUS_CORE_CLKS_TO_SW_INTACK_STAT	 (1L<<24)
+#define BNX2_HC_STATUS_CORE_CLKS_DURING_SW_INTACK_STAT	 (1L<<25)
+
+#define BNX2_HC_CONFIG					0x00006808
+#define BNX2_HC_CONFIG_COLLECT_STATS			 (1L<<0)
+#define BNX2_HC_CONFIG_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_CONFIG_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_CONFIG_COM_TMR_MODE			 (1L<<3)
+#define BNX2_HC_CONFIG_CMD_TMR_MODE			 (1L<<4)
+#define BNX2_HC_CONFIG_STATISTIC_PRIORITY		 (1L<<5)
+#define BNX2_HC_CONFIG_STATUS_PRIORITY			 (1L<<6)
+#define BNX2_HC_CONFIG_STAT_MEM_ADDR			 (0xffL<<8)
+#define BNX2_HC_CONFIG_PER_MODE				 (1L<<16)
+#define BNX2_HC_CONFIG_ONE_SHOT				 (1L<<17)
+#define BNX2_HC_CONFIG_USE_INT_PARAM			 (1L<<18)
+#define BNX2_HC_CONFIG_SET_MASK_AT_RD			 (1L<<19)
+#define BNX2_HC_CONFIG_PER_COLLECT_LIMIT		 (0xfL<<20)
+#define BNX2_HC_CONFIG_SB_ADDR_INC			 (0x7L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_64B			 (0L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_128B			 (1L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_256B			 (2L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_512B			 (3L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_1024B		 (4L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_2048B		 (5L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_4096B		 (6L<<24)
+#define BNX2_HC_CONFIG_SB_ADDR_INC_8192B		 (7L<<24)
+#define BNX2_HC_CONFIG_GEN_STAT_AVG_INTR		 (1L<<29)
+#define BNX2_HC_CONFIG_UNMASK_ALL			 (1L<<30)
+#define BNX2_HC_CONFIG_TX_SEL				 (1L<<31)
+
+#define BNX2_HC_ATTN_BITS_ENABLE			0x0000680c
+#define BNX2_HC_STATUS_ADDR_L				0x00006810
+#define BNX2_HC_STATUS_ADDR_H				0x00006814
+#define BNX2_HC_STATISTICS_ADDR_L			0x00006818
+#define BNX2_HC_STATISTICS_ADDR_H			0x0000681c
+#define BNX2_HC_TX_QUICK_CONS_TRIP			0x00006820
+#define BNX2_HC_TX_QUICK_CONS_TRIP_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_INT			 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP				0x00006824
+#define BNX2_HC_COMP_PROD_TRIP_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP			0x00006828
+#define BNX2_HC_RX_QUICK_CONS_TRIP_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS				0x0000682c
+#define BNX2_HC_RX_TICKS_VALUE				 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS				0x00006830
+#define BNX2_HC_TX_TICKS_VALUE				 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS				0x00006834
+#define BNX2_HC_COM_TICKS_VALUE				 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS				0x00006838
+#define BNX2_HC_CMD_TICKS_VALUE				 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS				0x0000683c
+#define BNX2_HC_PERIODIC_TICKS_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_STAT_COLLECT_TICKS			0x00006840
+#define BNX2_HC_STAT_COLLECT_TICKS_HC_STAT_COLL_TICKS	 (0xffL<<4)
+
+#define BNX2_HC_STATS_TICKS				0x00006844
+#define BNX2_HC_STATS_TICKS_HC_STAT_TICKS		 (0xffffL<<8)
+
+#define BNX2_HC_STATS_INTERRUPT_STATUS			0x00006848
+#define BNX2_HC_STATS_INTERRUPT_STATUS_SB_STATUS	 (0x1ffL<<0)
+#define BNX2_HC_STATS_INTERRUPT_STATUS_INT_STATUS	 (0x1ffL<<16)
+
+#define BNX2_HC_STAT_MEM_DATA				0x0000684c
+#define BNX2_HC_STAT_GEN_SEL_0				0x00006850
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0		 (0x7fL<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT0	 (0L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT1	 (1L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT2	 (2L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT3	 (3L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT4	 (4L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT5	 (5L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT6	 (6L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT7	 (7L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT8	 (8L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT9	 (9L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT10	 (10L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXP_STAT11	 (11L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT0	 (12L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT1	 (13L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT2	 (14L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT3	 (15L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT4	 (16L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT5	 (17L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT6	 (18L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXP_STAT7	 (19L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT0	 (20L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT1	 (21L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT2	 (22L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT3	 (23L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT4	 (24L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT5	 (25L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT6	 (26L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT7	 (27L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT8	 (28L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT9	 (29L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT10	 (30L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COM_STAT11	 (31L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT0	 (32L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT1	 (33L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT2	 (34L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPAT_STAT3	 (35L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT0	 (36L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT1	 (37L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT2	 (38L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT3	 (39L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT4	 (40L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT5	 (41L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT6	 (42L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CP_STAT7	 (43L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT0	 (44L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT1	 (45L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT2	 (46L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT3	 (47L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT4	 (48L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT5	 (49L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT6	 (50L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MCP_STAT7	 (51L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_PCI_CLK_CNT	 (52L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CORE_CLK_CNT	 (53L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS	 (54L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN	 (55L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR	 (56L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK	 (59L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK	 (60L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK	 (61L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCH_CMD_CNT	 (62L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCH_SLOT_CNT	 (63L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSCH_CMD_CNT	 (64L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSCH_SLOT_CNT	 (65L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUPQ_VALID_CNT	 (66L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPQ_VALID_CNT	 (67L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RXPCQ_VALID_CNT	 (68L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PPQ_VALID_CNT	 (69L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PMQ_VALID_CNT	 (70L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PTQ_VALID_CNT	 (71L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMAQ_VALID_CNT	 (72L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TSCHQ_VALID_CNT	 (73L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDRQ_VALID_CNT	 (74L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TXPQ_VALID_CNT	 (75L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMAQ_VALID_CNT	 (76L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TPATQ_VALID_CNT	 (77L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TASQ_VALID_CNT	 (78L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CSQ_VALID_CNT	 (79L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CPQ_VALID_CNT	 (80L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMXQ_VALID_CNT	 (81L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMTQ_VALID_CNT	 (82L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_COMQ_VALID_CNT	 (83L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MGMQ_VALID_CNT	 (84L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_READ_TRANSFERS_CNT	 (85L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_READ_DELAY_PCI_CLKS_CNT	 (86L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_TRANSFERS_CNT	 (87L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_DELAY_PCI_CLKS_CNT	 (88L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_READ_RETRY_AFTER_DATA_CNT	 (89L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_WRITE_TRANSFERS_CNT	 (90L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_WRITE_DELAY_PCI_CLKS_CNT	 (91L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_TRANSFERS_CNT	 (92L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_DELAY_PCI_CLKS_CNT	 (93L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_DMAE_BIG_WRITE_RETRY_AFTER_DATA_CNT	 (94L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_WR_CNT64	 (95L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_RD_CNT64	 (96L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_ACC_STALL_CLKS	 (97L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_LOCK_STALL_CLKS	 (98L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_CTX_ACCESS_STAT	 (99L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_CTX_ACCESS64_STAT	 (100L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MBQ_PCI_STALL_STAT	 (101L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDR_FTQ_ENTRY_CNT	 (102L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TBDR_BURST_CNT	 (103L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMA_FTQ_ENTRY_CNT	 (104L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TDMA_BURST_CNT	 (105L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMA_FTQ_ENTRY_CNT	 (106L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RDMA_BURST_CNT	 (107L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RLUP_MATCH_CNT	 (108L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_POLL_PASS_CNT	 (109L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR1_CNT	 (110L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR2_CNT	 (111L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR3_CNT	 (112L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR4_CNT	 (113L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_TMR_TMR5_CNT	 (114L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT0	 (115L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT1	 (116L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT2	 (117L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT3	 (118L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT4	 (119L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2P_STAT5	 (120L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_PROC1_MISS	 (121L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_PROC2_MISS	 (122L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RBDC_BURST_CNT	 (127L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_1		 (0x7fL<<8)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_2		 (0x7fL<<16)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_3		 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_XI		 (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UMP_RX_FRAME_DROP_XI	 (52L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S0_XI	 (57L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S1_XI	 (58L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S2_XI	 (85L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S3_XI	 (86L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S4_XI	 (87L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S5_XI	 (88L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S6_XI	 (89L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S7_XI	 (90L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S8_XI	 (91L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S9_XI	 (92L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_UNUSED_S10_XI	 (93L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_MQ_IDB_OFLOW_XI	 (94L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_BLK_RD_CNT_XI	 (123L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_BLK_WR_CNT_XI	 (124L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_HITS_XI	 (125L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_CTX_MISSES_XI	 (126L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC1_XI	 (128L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC1_XI	 (129L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC1_XI	 (130L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC1_XI	 (131L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC1_XI	 (132L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC1_XI	 (133L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC2_XI	 (134L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC2_XI	 (135L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC2_XI	 (136L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC2_XI	 (137L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC2_XI	 (138L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC2_XI	 (139L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC3_XI	 (140L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC3_XI	 (141L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC3_XI	 (142L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC3_XI	 (143L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC3_XI	 (144L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC3_XI	 (145L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC4_XI	 (146L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC4_XI	 (147L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC4_XI	 (148L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC4_XI	 (149L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC4_XI	 (150L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC4_XI	 (151L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC5_XI	 (152L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC5_XI	 (153L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC5_XI	 (154L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC5_XI	 (155L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC5_XI	 (156L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC5_XI	 (157L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC6_XI	 (158L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC6_XI	 (159L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC6_XI	 (160L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC6_XI	 (161L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC6_XI	 (162L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC6_XI	 (163L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC7_XI	 (164L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC7_XI	 (165L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC7_XI	 (166L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC7_XI	 (167L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC7_XI	 (168L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC7_XI	 (169L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_STATUS_BLOCKS_VEC8_XI	 (170L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_GEN_VEC8_XI	 (171L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_NUM_INT_MBOX_WR_VEC8_XI	 (172L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_HW_INTACK_VEC8_XI	 (173L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_TO_SW_INTACK_VEC8_XI	 (174L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_HC_CORE_CLKS_DURING_SW_INTACK_VEC8_XI	 (175L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCS_CMD_CNT_XI	 (176L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCS_SLOT_CNT_XI	 (177L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_0_RV2PCSQ_VALID_CNT_XI	 (178L<<0)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_1_XI		 (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_2_XI		 (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_0_GEN_SEL_3_XI		 (0xffL<<24)
+
+#define BNX2_HC_STAT_GEN_SEL_1				0x00006854
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_4		 (0x7fL<<0)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_5		 (0x7fL<<8)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_6		 (0x7fL<<16)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_7		 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_4_XI		 (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_5_XI		 (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_6_XI		 (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_1_GEN_SEL_7_XI		 (0xffL<<24)
+
+#define BNX2_HC_STAT_GEN_SEL_2				0x00006858
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_8		 (0x7fL<<0)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_9		 (0x7fL<<8)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_10		 (0x7fL<<16)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_11		 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_8_XI		 (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_9_XI		 (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_10_XI		 (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_2_GEN_SEL_11_XI		 (0xffL<<24)
+
+#define BNX2_HC_STAT_GEN_SEL_3				0x0000685c
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_12		 (0x7fL<<0)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_13		 (0x7fL<<8)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_14		 (0x7fL<<16)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_15		 (0x7fL<<24)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_12_XI		 (0xffL<<0)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_13_XI		 (0xffL<<8)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_14_XI		 (0xffL<<16)
+#define BNX2_HC_STAT_GEN_SEL_3_GEN_SEL_15_XI		 (0xffL<<24)
+
+#define BNX2_HC_STAT_GEN_STAT0				0x00006888
+#define BNX2_HC_STAT_GEN_STAT1				0x0000688c
+#define BNX2_HC_STAT_GEN_STAT2				0x00006890
+#define BNX2_HC_STAT_GEN_STAT3				0x00006894
+#define BNX2_HC_STAT_GEN_STAT4				0x00006898
+#define BNX2_HC_STAT_GEN_STAT5				0x0000689c
+#define BNX2_HC_STAT_GEN_STAT6				0x000068a0
+#define BNX2_HC_STAT_GEN_STAT7				0x000068a4
+#define BNX2_HC_STAT_GEN_STAT8				0x000068a8
+#define BNX2_HC_STAT_GEN_STAT9				0x000068ac
+#define BNX2_HC_STAT_GEN_STAT10				0x000068b0
+#define BNX2_HC_STAT_GEN_STAT11				0x000068b4
+#define BNX2_HC_STAT_GEN_STAT12				0x000068b8
+#define BNX2_HC_STAT_GEN_STAT13				0x000068bc
+#define BNX2_HC_STAT_GEN_STAT14				0x000068c0
+#define BNX2_HC_STAT_GEN_STAT15				0x000068c4
+#define BNX2_HC_STAT_GEN_STAT_AC0			0x000068c8
+#define BNX2_HC_STAT_GEN_STAT_AC1			0x000068cc
+#define BNX2_HC_STAT_GEN_STAT_AC2			0x000068d0
+#define BNX2_HC_STAT_GEN_STAT_AC3			0x000068d4
+#define BNX2_HC_STAT_GEN_STAT_AC4			0x000068d8
+#define BNX2_HC_STAT_GEN_STAT_AC5			0x000068dc
+#define BNX2_HC_STAT_GEN_STAT_AC6			0x000068e0
+#define BNX2_HC_STAT_GEN_STAT_AC7			0x000068e4
+#define BNX2_HC_STAT_GEN_STAT_AC8			0x000068e8
+#define BNX2_HC_STAT_GEN_STAT_AC9			0x000068ec
+#define BNX2_HC_STAT_GEN_STAT_AC10			0x000068f0
+#define BNX2_HC_STAT_GEN_STAT_AC11			0x000068f4
+#define BNX2_HC_STAT_GEN_STAT_AC12			0x000068f8
+#define BNX2_HC_STAT_GEN_STAT_AC13			0x000068fc
+#define BNX2_HC_STAT_GEN_STAT_AC14			0x00006900
+#define BNX2_HC_STAT_GEN_STAT_AC15			0x00006904
+#define BNX2_HC_STAT_GEN_STAT_AC			0x000068c8
+#define BNX2_HC_VIS					0x00006908
+#define BNX2_HC_VIS_STAT_BUILD_STATE			 (0xfL<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_IDLE		 (0L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_START		 (1L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_REQUEST		 (2L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE64		 (3L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE32		 (4L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_UPDATE_DONE	 (5L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_DMA		 (6L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_CONTROL	 (7L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_LOW		 (8L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_HIGH		 (9L<<0)
+#define BNX2_HC_VIS_STAT_BUILD_STATE_MSI_DATA		 (10L<<0)
+#define BNX2_HC_VIS_DMA_STAT_STATE			 (0xfL<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_IDLE			 (0L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_STATUS_PARAM		 (1L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_STATUS_DMA		 (2L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP		 (3L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_COMP			 (4L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_STATISTIC_PARAM	 (5L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_STATISTIC_DMA	 (6L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP_1		 (7L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_WRITE_COMP_2		 (8L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_WAIT			 (9L<<8)
+#define BNX2_HC_VIS_DMA_STAT_STATE_ABORT		 (15L<<8)
+#define BNX2_HC_VIS_DMA_MSI_STATE			 (0x7L<<12)
+#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE		 (0x3L<<15)
+#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_IDLE		 (0L<<15)
+#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_COUNT	 (1L<<15)
+#define BNX2_HC_VIS_STATISTIC_DMA_EN_STATE_START	 (2L<<15)
+
+#define BNX2_HC_VIS_1					0x0000690c
+#define BNX2_HC_VIS_1_HW_INTACK_STATE			 (1L<<4)
+#define BNX2_HC_VIS_1_HW_INTACK_STATE_IDLE		 (0L<<4)
+#define BNX2_HC_VIS_1_HW_INTACK_STATE_COUNT		 (1L<<4)
+#define BNX2_HC_VIS_1_SW_INTACK_STATE			 (1L<<5)
+#define BNX2_HC_VIS_1_SW_INTACK_STATE_IDLE		 (0L<<5)
+#define BNX2_HC_VIS_1_SW_INTACK_STATE_COUNT		 (1L<<5)
+#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE		 (1L<<6)
+#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE_IDLE	 (0L<<6)
+#define BNX2_HC_VIS_1_DURING_SW_INTACK_STATE_COUNT	 (1L<<6)
+#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE		 (1L<<7)
+#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE_IDLE		 (0L<<7)
+#define BNX2_HC_VIS_1_MAILBOX_COUNT_STATE_COUNT		 (1L<<7)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE			 (0xfL<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_IDLE		 (0L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_DMA		 (1L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_UPDATE		 (2L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_ASSIGN		 (3L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_WAIT		 (4L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_UPDATE	 (5L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_ASSIGN	 (6L<<17)
+#define BNX2_HC_VIS_1_RAM_RD_ARB_STATE_REG_WAIT		 (7L<<17)
+#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE			 (0x3L<<21)
+#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE_NORMAL		 (0L<<21)
+#define BNX2_HC_VIS_1_RAM_WR_ARB_STATE_CLEAR		 (1L<<21)
+#define BNX2_HC_VIS_1_INT_GEN_STATE			 (1L<<23)
+#define BNX2_HC_VIS_1_INT_GEN_STATE_DLE			 (0L<<23)
+#define BNX2_HC_VIS_1_INT_GEN_STATE_NTERRUPT		 (1L<<23)
+#define BNX2_HC_VIS_1_STAT_CHAN_ID			 (0x7L<<24)
+#define BNX2_HC_VIS_1_INT_B				 (1L<<27)
+
+#define BNX2_HC_DEBUG_VECT_PEEK				0x00006910
+#define BNX2_HC_DEBUG_VECT_PEEK_1_VALUE			 (0x7ffL<<0)
+#define BNX2_HC_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_HC_DEBUG_VECT_PEEK_1_SEL			 (0xfL<<12)
+#define BNX2_HC_DEBUG_VECT_PEEK_2_VALUE			 (0x7ffL<<16)
+#define BNX2_HC_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_HC_DEBUG_VECT_PEEK_2_SEL			 (0xfL<<28)
+
+#define BNX2_HC_COALESCE_NOW				0x00006914
+#define BNX2_HC_COALESCE_NOW_COAL_NOW			 (0x1ffL<<1)
+#define BNX2_HC_COALESCE_NOW_COAL_NOW_WO_INT		 (0x1ffL<<11)
+#define BNX2_HC_COALESCE_NOW_COAL_ON_NXT_EVENT		 (0x1ffL<<21)
+
+#define BNX2_HC_MSIX_BIT_VECTOR				0x00006918
+#define BNX2_HC_MSIX_BIT_VECTOR_VAL			 (0x1ffL<<0)
+
+#define BNX2_HC_SB_CONFIG_1				0x00006a00
+#define BNX2_HC_SB_CONFIG_1_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_1_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_1_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_1_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_1_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_1_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_1_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_1_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_1			0x00006a04
+#define BNX2_HC_TX_QUICK_CONS_TRIP_1_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_1_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_1			0x00006a08
+#define BNX2_HC_COMP_PROD_TRIP_1_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_1_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_1			0x00006a0c
+#define BNX2_HC_RX_QUICK_CONS_TRIP_1_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_1_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_1				0x00006a10
+#define BNX2_HC_RX_TICKS_1_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_1_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_1				0x00006a14
+#define BNX2_HC_TX_TICKS_1_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_1_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_1				0x00006a18
+#define BNX2_HC_COM_TICKS_1_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_1_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_1				0x00006a1c
+#define BNX2_HC_CMD_TICKS_1_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_1_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_1			0x00006a20
+#define BNX2_HC_PERIODIC_TICKS_1_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_1_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_2				0x00006a24
+#define BNX2_HC_SB_CONFIG_2_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_2_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_2_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_2_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_2_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_2_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_2_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_2_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_2			0x00006a28
+#define BNX2_HC_TX_QUICK_CONS_TRIP_2_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_2_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_2			0x00006a2c
+#define BNX2_HC_COMP_PROD_TRIP_2_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_2_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_2			0x00006a30
+#define BNX2_HC_RX_QUICK_CONS_TRIP_2_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_2_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_2				0x00006a34
+#define BNX2_HC_RX_TICKS_2_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_2_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_2				0x00006a38
+#define BNX2_HC_TX_TICKS_2_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_2_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_2				0x00006a3c
+#define BNX2_HC_COM_TICKS_2_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_2_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_2				0x00006a40
+#define BNX2_HC_CMD_TICKS_2_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_2_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_2			0x00006a44
+#define BNX2_HC_PERIODIC_TICKS_2_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_2_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_3				0x00006a48
+#define BNX2_HC_SB_CONFIG_3_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_3_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_3_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_3_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_3_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_3_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_3_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_3_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_3			0x00006a4c
+#define BNX2_HC_TX_QUICK_CONS_TRIP_3_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_3_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_3			0x00006a50
+#define BNX2_HC_COMP_PROD_TRIP_3_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_3_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_3			0x00006a54
+#define BNX2_HC_RX_QUICK_CONS_TRIP_3_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_3_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_3				0x00006a58
+#define BNX2_HC_RX_TICKS_3_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_3_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_3				0x00006a5c
+#define BNX2_HC_TX_TICKS_3_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_3_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_3				0x00006a60
+#define BNX2_HC_COM_TICKS_3_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_3_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_3				0x00006a64
+#define BNX2_HC_CMD_TICKS_3_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_3_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_3			0x00006a68
+#define BNX2_HC_PERIODIC_TICKS_3_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_3_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_4				0x00006a6c
+#define BNX2_HC_SB_CONFIG_4_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_4_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_4_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_4_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_4_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_4_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_4_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_4_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_4			0x00006a70
+#define BNX2_HC_TX_QUICK_CONS_TRIP_4_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_4_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_4			0x00006a74
+#define BNX2_HC_COMP_PROD_TRIP_4_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_4_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_4			0x00006a78
+#define BNX2_HC_RX_QUICK_CONS_TRIP_4_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_4_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_4				0x00006a7c
+#define BNX2_HC_RX_TICKS_4_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_4_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_4				0x00006a80
+#define BNX2_HC_TX_TICKS_4_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_4_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_4				0x00006a84
+#define BNX2_HC_COM_TICKS_4_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_4_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_4				0x00006a88
+#define BNX2_HC_CMD_TICKS_4_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_4_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_4			0x00006a8c
+#define BNX2_HC_PERIODIC_TICKS_4_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_4_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_5				0x00006a90
+#define BNX2_HC_SB_CONFIG_5_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_5_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_5_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_5_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_5_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_5_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_5_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_5_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_5			0x00006a94
+#define BNX2_HC_TX_QUICK_CONS_TRIP_5_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_5_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_5			0x00006a98
+#define BNX2_HC_COMP_PROD_TRIP_5_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_5_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_5			0x00006a9c
+#define BNX2_HC_RX_QUICK_CONS_TRIP_5_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_5_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_5				0x00006aa0
+#define BNX2_HC_RX_TICKS_5_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_5_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_5				0x00006aa4
+#define BNX2_HC_TX_TICKS_5_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_5_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_5				0x00006aa8
+#define BNX2_HC_COM_TICKS_5_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_5_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_5				0x00006aac
+#define BNX2_HC_CMD_TICKS_5_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_5_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_5			0x00006ab0
+#define BNX2_HC_PERIODIC_TICKS_5_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_5_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_6				0x00006ab4
+#define BNX2_HC_SB_CONFIG_6_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_6_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_6_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_6_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_6_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_6_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_6_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_6_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_6			0x00006ab8
+#define BNX2_HC_TX_QUICK_CONS_TRIP_6_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_6_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_6			0x00006abc
+#define BNX2_HC_COMP_PROD_TRIP_6_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_6_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_6			0x00006ac0
+#define BNX2_HC_RX_QUICK_CONS_TRIP_6_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_6_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_6				0x00006ac4
+#define BNX2_HC_RX_TICKS_6_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_6_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_6				0x00006ac8
+#define BNX2_HC_TX_TICKS_6_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_6_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_6				0x00006acc
+#define BNX2_HC_COM_TICKS_6_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_6_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_6				0x00006ad0
+#define BNX2_HC_CMD_TICKS_6_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_6_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_6			0x00006ad4
+#define BNX2_HC_PERIODIC_TICKS_6_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_6_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_7				0x00006ad8
+#define BNX2_HC_SB_CONFIG_7_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_7_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_7_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_7_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_7_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_7_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_7_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_7_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_7			0x00006adc
+#define BNX2_HC_TX_QUICK_CONS_TRIP_7_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_7_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_7			0x00006ae0
+#define BNX2_HC_COMP_PROD_TRIP_7_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_7_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_7			0x00006ae4
+#define BNX2_HC_RX_QUICK_CONS_TRIP_7_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_7_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_7				0x00006ae8
+#define BNX2_HC_RX_TICKS_7_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_7_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_7				0x00006aec
+#define BNX2_HC_TX_TICKS_7_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_7_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_7				0x00006af0
+#define BNX2_HC_COM_TICKS_7_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_7_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_7				0x00006af4
+#define BNX2_HC_CMD_TICKS_7_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_7_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_7			0x00006af8
+#define BNX2_HC_PERIODIC_TICKS_7_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_7_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_8				0x00006afc
+#define BNX2_HC_SB_CONFIG_8_RX_TMR_MODE			 (1L<<1)
+#define BNX2_HC_SB_CONFIG_8_TX_TMR_MODE			 (1L<<2)
+#define BNX2_HC_SB_CONFIG_8_COM_TMR_MODE		 (1L<<3)
+#define BNX2_HC_SB_CONFIG_8_CMD_TMR_MODE		 (1L<<4)
+#define BNX2_HC_SB_CONFIG_8_PER_MODE			 (1L<<16)
+#define BNX2_HC_SB_CONFIG_8_ONE_SHOT			 (1L<<17)
+#define BNX2_HC_SB_CONFIG_8_USE_INT_PARAM		 (1L<<18)
+#define BNX2_HC_SB_CONFIG_8_PER_COLLECT_LIMIT		 (0xfL<<20)
+
+#define BNX2_HC_TX_QUICK_CONS_TRIP_8			0x00006b00
+#define BNX2_HC_TX_QUICK_CONS_TRIP_8_VALUE		 (0xffL<<0)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_8_INT		 (0xffL<<16)
+
+#define BNX2_HC_COMP_PROD_TRIP_8			0x00006b04
+#define BNX2_HC_COMP_PROD_TRIP_8_VALUE			 (0xffL<<0)
+#define BNX2_HC_COMP_PROD_TRIP_8_INT			 (0xffL<<16)
+
+#define BNX2_HC_RX_QUICK_CONS_TRIP_8			0x00006b08
+#define BNX2_HC_RX_QUICK_CONS_TRIP_8_VALUE		 (0xffL<<0)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_8_INT		 (0xffL<<16)
+
+#define BNX2_HC_RX_TICKS_8				0x00006b0c
+#define BNX2_HC_RX_TICKS_8_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_RX_TICKS_8_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_TX_TICKS_8				0x00006b10
+#define BNX2_HC_TX_TICKS_8_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_TX_TICKS_8_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_COM_TICKS_8				0x00006b14
+#define BNX2_HC_COM_TICKS_8_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_COM_TICKS_8_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_CMD_TICKS_8				0x00006b18
+#define BNX2_HC_CMD_TICKS_8_VALUE			 (0x3ffL<<0)
+#define BNX2_HC_CMD_TICKS_8_INT				 (0x3ffL<<16)
+
+#define BNX2_HC_PERIODIC_TICKS_8			0x00006b1c
+#define BNX2_HC_PERIODIC_TICKS_8_HC_PERIODIC_TICKS	 (0xffffL<<0)
+#define BNX2_HC_PERIODIC_TICKS_8_HC_INT_PERIODIC_TICKS	 (0xffffL<<16)
+
+#define BNX2_HC_SB_CONFIG_SIZE	(BNX2_HC_SB_CONFIG_2 - BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_COMP_PROD_TRIP_OFF	(BNX2_HC_COMP_PROD_TRIP_1 -	\
+					 BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_COM_TICKS_OFF	(BNX2_HC_COM_TICKS_1 - BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_CMD_TICKS_OFF	(BNX2_HC_CMD_TICKS_1 - BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_TX_QUICK_CONS_TRIP_OFF	(BNX2_HC_TX_QUICK_CONS_TRIP_1 -	\
+					 BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_TX_TICKS_OFF	(BNX2_HC_TX_TICKS_1 - BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_RX_QUICK_CONS_TRIP_OFF	(BNX2_HC_RX_QUICK_CONS_TRIP_1 - \
+					 BNX2_HC_SB_CONFIG_1)
+#define BNX2_HC_RX_TICKS_OFF	(BNX2_HC_RX_TICKS_1 - BNX2_HC_SB_CONFIG_1)
+
+
+/*
+ *  txp_reg definition
+ *  offset: 0x40000
+ */
+#define BNX2_TXP_CPU_MODE				0x00045000
+#define BNX2_TXP_CPU_MODE_LOCAL_RST			 (1L<<0)
+#define BNX2_TXP_CPU_MODE_STEP_ENA			 (1L<<1)
+#define BNX2_TXP_CPU_MODE_PAGE_0_DATA_ENA		 (1L<<2)
+#define BNX2_TXP_CPU_MODE_PAGE_0_INST_ENA		 (1L<<3)
+#define BNX2_TXP_CPU_MODE_MSG_BIT1			 (1L<<6)
+#define BNX2_TXP_CPU_MODE_INTERRUPT_ENA			 (1L<<7)
+#define BNX2_TXP_CPU_MODE_SOFT_HALT			 (1L<<10)
+#define BNX2_TXP_CPU_MODE_BAD_DATA_HALT_ENA		 (1L<<11)
+#define BNX2_TXP_CPU_MODE_BAD_INST_HALT_ENA		 (1L<<12)
+#define BNX2_TXP_CPU_MODE_FIO_ABORT_HALT_ENA		 (1L<<13)
+#define BNX2_TXP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA	 (1L<<15)
+
+#define BNX2_TXP_CPU_STATE				0x00045004
+#define BNX2_TXP_CPU_STATE_BREAKPOINT			 (1L<<0)
+#define BNX2_TXP_CPU_STATE_BAD_INST_HALTED		 (1L<<2)
+#define BNX2_TXP_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
+#define BNX2_TXP_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
+#define BNX2_TXP_CPU_STATE_BAD_DATA_ADDR_HALTED		 (1L<<5)
+#define BNX2_TXP_CPU_STATE_BAD_PC_HALTED		 (1L<<6)
+#define BNX2_TXP_CPU_STATE_ALIGN_HALTED			 (1L<<7)
+#define BNX2_TXP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
+#define BNX2_TXP_CPU_STATE_SOFT_HALTED			 (1L<<10)
+#define BNX2_TXP_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11)
+#define BNX2_TXP_CPU_STATE_INTERRRUPT			 (1L<<12)
+#define BNX2_TXP_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)
+#define BNX2_TXP_CPU_STATE_INST_FETCH_STALL		 (1L<<15)
+#define BNX2_TXP_CPU_STATE_BLOCKED_READ			 (1L<<31)
+
+#define BNX2_TXP_CPU_EVENT_MASK				0x00045008
+#define BNX2_TXP_CPU_EVENT_MASK_BREAKPOINT_MASK		 (1L<<0)
+#define BNX2_TXP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK	 (1L<<2)
+#define BNX2_TXP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK	 (1L<<3)
+#define BNX2_TXP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK	 (1L<<4)
+#define BNX2_TXP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK	 (1L<<5)
+#define BNX2_TXP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK	 (1L<<6)
+#define BNX2_TXP_CPU_EVENT_MASK_ALIGN_HALTED_MASK	 (1L<<7)
+#define BNX2_TXP_CPU_EVENT_MASK_FIO_ABORT_MASK		 (1L<<8)
+#define BNX2_TXP_CPU_EVENT_MASK_SOFT_HALTED_MASK	 (1L<<10)
+#define BNX2_TXP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK	 (1L<<11)
+#define BNX2_TXP_CPU_EVENT_MASK_INTERRUPT_MASK		 (1L<<12)
+
+#define BNX2_TXP_CPU_PROGRAM_COUNTER			0x0004501c
+#define BNX2_TXP_CPU_INSTRUCTION			0x00045020
+#define BNX2_TXP_CPU_DATA_ACCESS			0x00045024
+#define BNX2_TXP_CPU_INTERRUPT_ENABLE			0x00045028
+#define BNX2_TXP_CPU_INTERRUPT_VECTOR			0x0004502c
+#define BNX2_TXP_CPU_INTERRUPT_SAVED_PC			0x00045030
+#define BNX2_TXP_CPU_HW_BREAKPOINT			0x00045034
+#define BNX2_TXP_CPU_HW_BREAKPOINT_DISABLE		 (1L<<0)
+#define BNX2_TXP_CPU_HW_BREAKPOINT_ADDRESS		 (0x3fffffffL<<2)
+
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK			0x00045038
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_1_SEL		 (0xfL<<12)
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_TXP_CPU_DEBUG_VECT_PEEK_2_SEL		 (0xfL<<28)
+
+#define BNX2_TXP_CPU_LAST_BRANCH_ADDR			0x00045048
+#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE		 (1L<<1)
+#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP		 (0L<<1)
+#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH	 (1L<<1)
+#define BNX2_TXP_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
+
+#define BNX2_TXP_CPU_REG_FILE				0x00045200
+#define BNX2_TXP_TXPQ					0x000453c0
+#define BNX2_TXP_FTQ_CMD				0x000453f8
+#define BNX2_TXP_FTQ_CMD_OFFSET				 (0x3ffL<<0)
+#define BNX2_TXP_FTQ_CMD_WR_TOP				 (1L<<10)
+#define BNX2_TXP_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_TXP_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_TXP_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_TXP_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_TXP_FTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_TXP_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_TXP_FTQ_CMD_INTERVENE_CLR			 (1L<<29)
+#define BNX2_TXP_FTQ_CMD_POP				 (1L<<30)
+#define BNX2_TXP_FTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_TXP_FTQ_CTL				0x000453fc
+#define BNX2_TXP_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_TXP_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_TXP_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_TXP_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_TXP_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_TXP_SCRATCH				0x00060000
+
+
+/*
+ *  tpat_reg definition
+ *  offset: 0x80000
+ */
+#define BNX2_TPAT_CPU_MODE				0x00085000
+#define BNX2_TPAT_CPU_MODE_LOCAL_RST			 (1L<<0)
+#define BNX2_TPAT_CPU_MODE_STEP_ENA			 (1L<<1)
+#define BNX2_TPAT_CPU_MODE_PAGE_0_DATA_ENA		 (1L<<2)
+#define BNX2_TPAT_CPU_MODE_PAGE_0_INST_ENA		 (1L<<3)
+#define BNX2_TPAT_CPU_MODE_MSG_BIT1			 (1L<<6)
+#define BNX2_TPAT_CPU_MODE_INTERRUPT_ENA		 (1L<<7)
+#define BNX2_TPAT_CPU_MODE_SOFT_HALT			 (1L<<10)
+#define BNX2_TPAT_CPU_MODE_BAD_DATA_HALT_ENA		 (1L<<11)
+#define BNX2_TPAT_CPU_MODE_BAD_INST_HALT_ENA		 (1L<<12)
+#define BNX2_TPAT_CPU_MODE_FIO_ABORT_HALT_ENA		 (1L<<13)
+#define BNX2_TPAT_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA	 (1L<<15)
+
+#define BNX2_TPAT_CPU_STATE				0x00085004
+#define BNX2_TPAT_CPU_STATE_BREAKPOINT			 (1L<<0)
+#define BNX2_TPAT_CPU_STATE_BAD_INST_HALTED		 (1L<<2)
+#define BNX2_TPAT_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
+#define BNX2_TPAT_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
+#define BNX2_TPAT_CPU_STATE_BAD_DATA_ADDR_HALTED	 (1L<<5)
+#define BNX2_TPAT_CPU_STATE_BAD_PC_HALTED		 (1L<<6)
+#define BNX2_TPAT_CPU_STATE_ALIGN_HALTED		 (1L<<7)
+#define BNX2_TPAT_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
+#define BNX2_TPAT_CPU_STATE_SOFT_HALTED			 (1L<<10)
+#define BNX2_TPAT_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11)
+#define BNX2_TPAT_CPU_STATE_INTERRRUPT			 (1L<<12)
+#define BNX2_TPAT_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)
+#define BNX2_TPAT_CPU_STATE_INST_FETCH_STALL		 (1L<<15)
+#define BNX2_TPAT_CPU_STATE_BLOCKED_READ		 (1L<<31)
+
+#define BNX2_TPAT_CPU_EVENT_MASK			0x00085008
+#define BNX2_TPAT_CPU_EVENT_MASK_BREAKPOINT_MASK	 (1L<<0)
+#define BNX2_TPAT_CPU_EVENT_MASK_BAD_INST_HALTED_MASK	 (1L<<2)
+#define BNX2_TPAT_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK	 (1L<<3)
+#define BNX2_TPAT_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK	 (1L<<4)
+#define BNX2_TPAT_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK	 (1L<<5)
+#define BNX2_TPAT_CPU_EVENT_MASK_BAD_PC_HALTED_MASK	 (1L<<6)
+#define BNX2_TPAT_CPU_EVENT_MASK_ALIGN_HALTED_MASK	 (1L<<7)
+#define BNX2_TPAT_CPU_EVENT_MASK_FIO_ABORT_MASK		 (1L<<8)
+#define BNX2_TPAT_CPU_EVENT_MASK_SOFT_HALTED_MASK	 (1L<<10)
+#define BNX2_TPAT_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK	 (1L<<11)
+#define BNX2_TPAT_CPU_EVENT_MASK_INTERRUPT_MASK		 (1L<<12)
+
+#define BNX2_TPAT_CPU_PROGRAM_COUNTER			0x0008501c
+#define BNX2_TPAT_CPU_INSTRUCTION			0x00085020
+#define BNX2_TPAT_CPU_DATA_ACCESS			0x00085024
+#define BNX2_TPAT_CPU_INTERRUPT_ENABLE			0x00085028
+#define BNX2_TPAT_CPU_INTERRUPT_VECTOR			0x0008502c
+#define BNX2_TPAT_CPU_INTERRUPT_SAVED_PC		0x00085030
+#define BNX2_TPAT_CPU_HW_BREAKPOINT			0x00085034
+#define BNX2_TPAT_CPU_HW_BREAKPOINT_DISABLE		 (1L<<0)
+#define BNX2_TPAT_CPU_HW_BREAKPOINT_ADDRESS		 (0x3fffffffL<<2)
+
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK			0x00085038
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_1_SEL		 (0xfL<<12)
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_TPAT_CPU_DEBUG_VECT_PEEK_2_SEL		 (0xfL<<28)
+
+#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR			0x00085048
+#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE		 (1L<<1)
+#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE_JUMP	 (0L<<1)
+#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH	 (1L<<1)
+#define BNX2_TPAT_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
+
+#define BNX2_TPAT_CPU_REG_FILE				0x00085200
+#define BNX2_TPAT_TPATQ					0x000853c0
+#define BNX2_TPAT_FTQ_CMD				0x000853f8
+#define BNX2_TPAT_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_TPAT_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_TPAT_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_TPAT_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_TPAT_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_TPAT_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_TPAT_FTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_TPAT_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_TPAT_FTQ_CMD_INTERVENE_CLR			 (1L<<29)
+#define BNX2_TPAT_FTQ_CMD_POP				 (1L<<30)
+#define BNX2_TPAT_FTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_TPAT_FTQ_CTL				0x000853fc
+#define BNX2_TPAT_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_TPAT_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_TPAT_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_TPAT_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_TPAT_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_TPAT_SCRATCH				0x000a0000
+
+
+/*
+ *  rxp_reg definition
+ *  offset: 0xc0000
+ */
+#define BNX2_RXP_CPU_MODE				0x000c5000
+#define BNX2_RXP_CPU_MODE_LOCAL_RST			 (1L<<0)
+#define BNX2_RXP_CPU_MODE_STEP_ENA			 (1L<<1)
+#define BNX2_RXP_CPU_MODE_PAGE_0_DATA_ENA		 (1L<<2)
+#define BNX2_RXP_CPU_MODE_PAGE_0_INST_ENA		 (1L<<3)
+#define BNX2_RXP_CPU_MODE_MSG_BIT1			 (1L<<6)
+#define BNX2_RXP_CPU_MODE_INTERRUPT_ENA			 (1L<<7)
+#define BNX2_RXP_CPU_MODE_SOFT_HALT			 (1L<<10)
+#define BNX2_RXP_CPU_MODE_BAD_DATA_HALT_ENA		 (1L<<11)
+#define BNX2_RXP_CPU_MODE_BAD_INST_HALT_ENA		 (1L<<12)
+#define BNX2_RXP_CPU_MODE_FIO_ABORT_HALT_ENA		 (1L<<13)
+#define BNX2_RXP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA	 (1L<<15)
+
+#define BNX2_RXP_CPU_STATE				0x000c5004
+#define BNX2_RXP_CPU_STATE_BREAKPOINT			 (1L<<0)
+#define BNX2_RXP_CPU_STATE_BAD_INST_HALTED		 (1L<<2)
+#define BNX2_RXP_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
+#define BNX2_RXP_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
+#define BNX2_RXP_CPU_STATE_BAD_DATA_ADDR_HALTED		 (1L<<5)
+#define BNX2_RXP_CPU_STATE_BAD_PC_HALTED		 (1L<<6)
+#define BNX2_RXP_CPU_STATE_ALIGN_HALTED			 (1L<<7)
+#define BNX2_RXP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
+#define BNX2_RXP_CPU_STATE_SOFT_HALTED			 (1L<<10)
+#define BNX2_RXP_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11)
+#define BNX2_RXP_CPU_STATE_INTERRRUPT			 (1L<<12)
+#define BNX2_RXP_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)
+#define BNX2_RXP_CPU_STATE_INST_FETCH_STALL		 (1L<<15)
+#define BNX2_RXP_CPU_STATE_BLOCKED_READ			 (1L<<31)
+
+#define BNX2_RXP_CPU_EVENT_MASK				0x000c5008
+#define BNX2_RXP_CPU_EVENT_MASK_BREAKPOINT_MASK		 (1L<<0)
+#define BNX2_RXP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK	 (1L<<2)
+#define BNX2_RXP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK	 (1L<<3)
+#define BNX2_RXP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK	 (1L<<4)
+#define BNX2_RXP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK	 (1L<<5)
+#define BNX2_RXP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK	 (1L<<6)
+#define BNX2_RXP_CPU_EVENT_MASK_ALIGN_HALTED_MASK	 (1L<<7)
+#define BNX2_RXP_CPU_EVENT_MASK_FIO_ABORT_MASK		 (1L<<8)
+#define BNX2_RXP_CPU_EVENT_MASK_SOFT_HALTED_MASK	 (1L<<10)
+#define BNX2_RXP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK	 (1L<<11)
+#define BNX2_RXP_CPU_EVENT_MASK_INTERRUPT_MASK		 (1L<<12)
+
+#define BNX2_RXP_CPU_PROGRAM_COUNTER			0x000c501c
+#define BNX2_RXP_CPU_INSTRUCTION			0x000c5020
+#define BNX2_RXP_CPU_DATA_ACCESS			0x000c5024
+#define BNX2_RXP_CPU_INTERRUPT_ENABLE			0x000c5028
+#define BNX2_RXP_CPU_INTERRUPT_VECTOR			0x000c502c
+#define BNX2_RXP_CPU_INTERRUPT_SAVED_PC			0x000c5030
+#define BNX2_RXP_CPU_HW_BREAKPOINT			0x000c5034
+#define BNX2_RXP_CPU_HW_BREAKPOINT_DISABLE		 (1L<<0)
+#define BNX2_RXP_CPU_HW_BREAKPOINT_ADDRESS		 (0x3fffffffL<<2)
+
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK			0x000c5038
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_1_SEL		 (0xfL<<12)
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_RXP_CPU_DEBUG_VECT_PEEK_2_SEL		 (0xfL<<28)
+
+#define BNX2_RXP_CPU_LAST_BRANCH_ADDR			0x000c5048
+#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE		 (1L<<1)
+#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP		 (0L<<1)
+#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH	 (1L<<1)
+#define BNX2_RXP_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
+
+#define BNX2_RXP_CPU_REG_FILE				0x000c5200
+#define BNX2_RXP_PFE_PFE_CTL				0x000c537c
+#define BNX2_RXP_PFE_PFE_CTL_INC_USAGE_CNT		 (1L<<0)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE			 (0xfL<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_0			 (0L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_1			 (1L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_2			 (2L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_3			 (3L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_4			 (4L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_5			 (5L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_6			 (6L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_7			 (7L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_8			 (8L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_9			 (9L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_10		 (10L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_11		 (11L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_12		 (12L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_13		 (13L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_14		 (14L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_SIZE_15		 (15L<<4)
+#define BNX2_RXP_PFE_PFE_CTL_PFE_COUNT			 (0xfL<<12)
+#define BNX2_RXP_PFE_PFE_CTL_OFFSET			 (0x1ffL<<16)
+
+#define BNX2_RXP_RXPCQ					0x000c5380
+#define BNX2_RXP_CFTQ_CMD				0x000c53b8
+#define BNX2_RXP_CFTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_RXP_CFTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_RXP_CFTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_RXP_CFTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_RXP_CFTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_RXP_CFTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_RXP_CFTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_RXP_CFTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_RXP_CFTQ_CMD_INTERVENE_CLR			 (1L<<29)
+#define BNX2_RXP_CFTQ_CMD_POP				 (1L<<30)
+#define BNX2_RXP_CFTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_RXP_CFTQ_CTL				0x000c53bc
+#define BNX2_RXP_CFTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_RXP_CFTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_RXP_CFTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_RXP_CFTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_RXP_CFTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_RXP_RXPQ					0x000c53c0
+#define BNX2_RXP_FTQ_CMD				0x000c53f8
+#define BNX2_RXP_FTQ_CMD_OFFSET				 (0x3ffL<<0)
+#define BNX2_RXP_FTQ_CMD_WR_TOP				 (1L<<10)
+#define BNX2_RXP_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_RXP_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_RXP_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_RXP_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_RXP_FTQ_CMD_ADD_INTERVEN			 (1L<<27)
+#define BNX2_RXP_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_RXP_FTQ_CMD_INTERVENE_CLR			 (1L<<29)
+#define BNX2_RXP_FTQ_CMD_POP				 (1L<<30)
+#define BNX2_RXP_FTQ_CMD_BUSY				 (1L<<31)
+
+#define BNX2_RXP_FTQ_CTL				0x000c53fc
+#define BNX2_RXP_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_RXP_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_RXP_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_RXP_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_RXP_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_RXP_SCRATCH				0x000e0000
+#define BNX2_RXP_SCRATCH_RXP_FLOOD			 0x000e0024
+#define BNX2_RXP_SCRATCH_RSS_TBL_SZ			 0x000e0038
+#define BNX2_RXP_SCRATCH_RSS_TBL			 0x000e003c
+#define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES		 128
+
+
+/*
+ *  com_reg definition
+ *  offset: 0x100000
+ */
+#define BNX2_COM_CKSUM_ERROR_STATUS			0x00100000
+#define BNX2_COM_CKSUM_ERROR_STATUS_CALCULATED		 (0xffffL<<0)
+#define BNX2_COM_CKSUM_ERROR_STATUS_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_COM_CPU_MODE				0x00105000
+#define BNX2_COM_CPU_MODE_LOCAL_RST			 (1L<<0)
+#define BNX2_COM_CPU_MODE_STEP_ENA			 (1L<<1)
+#define BNX2_COM_CPU_MODE_PAGE_0_DATA_ENA		 (1L<<2)
+#define BNX2_COM_CPU_MODE_PAGE_0_INST_ENA		 (1L<<3)
+#define BNX2_COM_CPU_MODE_MSG_BIT1			 (1L<<6)
+#define BNX2_COM_CPU_MODE_INTERRUPT_ENA			 (1L<<7)
+#define BNX2_COM_CPU_MODE_SOFT_HALT			 (1L<<10)
+#define BNX2_COM_CPU_MODE_BAD_DATA_HALT_ENA		 (1L<<11)
+#define BNX2_COM_CPU_MODE_BAD_INST_HALT_ENA		 (1L<<12)
+#define BNX2_COM_CPU_MODE_FIO_ABORT_HALT_ENA		 (1L<<13)
+#define BNX2_COM_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA	 (1L<<15)
+
+#define BNX2_COM_CPU_STATE				0x00105004
+#define BNX2_COM_CPU_STATE_BREAKPOINT			 (1L<<0)
+#define BNX2_COM_CPU_STATE_BAD_INST_HALTED		 (1L<<2)
+#define BNX2_COM_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
+#define BNX2_COM_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
+#define BNX2_COM_CPU_STATE_BAD_DATA_ADDR_HALTED		 (1L<<5)
+#define BNX2_COM_CPU_STATE_BAD_PC_HALTED		 (1L<<6)
+#define BNX2_COM_CPU_STATE_ALIGN_HALTED			 (1L<<7)
+#define BNX2_COM_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
+#define BNX2_COM_CPU_STATE_SOFT_HALTED			 (1L<<10)
+#define BNX2_COM_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11)
+#define BNX2_COM_CPU_STATE_INTERRRUPT			 (1L<<12)
+#define BNX2_COM_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)
+#define BNX2_COM_CPU_STATE_INST_FETCH_STALL		 (1L<<15)
+#define BNX2_COM_CPU_STATE_BLOCKED_READ			 (1L<<31)
+
+#define BNX2_COM_CPU_EVENT_MASK				0x00105008
+#define BNX2_COM_CPU_EVENT_MASK_BREAKPOINT_MASK		 (1L<<0)
+#define BNX2_COM_CPU_EVENT_MASK_BAD_INST_HALTED_MASK	 (1L<<2)
+#define BNX2_COM_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK	 (1L<<3)
+#define BNX2_COM_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK	 (1L<<4)
+#define BNX2_COM_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK	 (1L<<5)
+#define BNX2_COM_CPU_EVENT_MASK_BAD_PC_HALTED_MASK	 (1L<<6)
+#define BNX2_COM_CPU_EVENT_MASK_ALIGN_HALTED_MASK	 (1L<<7)
+#define BNX2_COM_CPU_EVENT_MASK_FIO_ABORT_MASK		 (1L<<8)
+#define BNX2_COM_CPU_EVENT_MASK_SOFT_HALTED_MASK	 (1L<<10)
+#define BNX2_COM_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK	 (1L<<11)
+#define BNX2_COM_CPU_EVENT_MASK_INTERRUPT_MASK		 (1L<<12)
+
+#define BNX2_COM_CPU_PROGRAM_COUNTER			0x0010501c
+#define BNX2_COM_CPU_INSTRUCTION			0x00105020
+#define BNX2_COM_CPU_DATA_ACCESS			0x00105024
+#define BNX2_COM_CPU_INTERRUPT_ENABLE			0x00105028
+#define BNX2_COM_CPU_INTERRUPT_VECTOR			0x0010502c
+#define BNX2_COM_CPU_INTERRUPT_SAVED_PC			0x00105030
+#define BNX2_COM_CPU_HW_BREAKPOINT			0x00105034
+#define BNX2_COM_CPU_HW_BREAKPOINT_DISABLE		 (1L<<0)
+#define BNX2_COM_CPU_HW_BREAKPOINT_ADDRESS		 (0x3fffffffL<<2)
+
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK			0x00105038
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_1_SEL		 (0xfL<<12)
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_COM_CPU_DEBUG_VECT_PEEK_2_SEL		 (0xfL<<28)
+
+#define BNX2_COM_CPU_LAST_BRANCH_ADDR			0x00105048
+#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE		 (1L<<1)
+#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE_JUMP		 (0L<<1)
+#define BNX2_COM_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH	 (1L<<1)
+#define BNX2_COM_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
+
+#define BNX2_COM_CPU_REG_FILE				0x00105200
+#define BNX2_COM_COMTQ_PFE_PFE_CTL			0x001052bc
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_INC_USAGE_CNT	 (1L<<0)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE		 (0xfL<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_0		 (0L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_1		 (1L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_2		 (2L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_3		 (3L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_4		 (4L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_5		 (5L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_6		 (6L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_7		 (7L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_8		 (8L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_9		 (9L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_10		 (10L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_11		 (11L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_12		 (12L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_13		 (13L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_14		 (14L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_SIZE_15		 (15L<<4)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_PFE_COUNT		 (0xfL<<12)
+#define BNX2_COM_COMTQ_PFE_PFE_CTL_OFFSET		 (0x1ffL<<16)
+
+#define BNX2_COM_COMXQ					0x00105340
+#define BNX2_COM_COMXQ_FTQ_CMD				0x00105378
+#define BNX2_COM_COMXQ_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_COM_COMXQ_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_COM_COMXQ_FTQ_CMD_SFT_RESET		 (1L<<25)
+#define BNX2_COM_COMXQ_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_COM_COMXQ_FTQ_CMD_ADD_INTERVEN		 (1L<<27)
+#define BNX2_COM_COMXQ_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_COM_COMXQ_FTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_COM_COMXQ_FTQ_CMD_POP			 (1L<<30)
+#define BNX2_COM_COMXQ_FTQ_CMD_BUSY			 (1L<<31)
+
+#define BNX2_COM_COMXQ_FTQ_CTL				0x0010537c
+#define BNX2_COM_COMXQ_FTQ_CTL_INTERVENE		 (1L<<0)
+#define BNX2_COM_COMXQ_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_COM_COMXQ_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_COM_COMXQ_FTQ_CTL_MAX_DEPTH		 (0x3ffL<<12)
+#define BNX2_COM_COMXQ_FTQ_CTL_CUR_DEPTH		 (0x3ffL<<22)
+
+#define BNX2_COM_COMTQ					0x00105380
+#define BNX2_COM_COMTQ_FTQ_CMD				0x001053b8
+#define BNX2_COM_COMTQ_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_COM_COMTQ_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_COM_COMTQ_FTQ_CMD_SFT_RESET		 (1L<<25)
+#define BNX2_COM_COMTQ_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_COM_COMTQ_FTQ_CMD_ADD_INTERVEN		 (1L<<27)
+#define BNX2_COM_COMTQ_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_COM_COMTQ_FTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_COM_COMTQ_FTQ_CMD_POP			 (1L<<30)
+#define BNX2_COM_COMTQ_FTQ_CMD_BUSY			 (1L<<31)
+
+#define BNX2_COM_COMTQ_FTQ_CTL				0x001053bc
+#define BNX2_COM_COMTQ_FTQ_CTL_INTERVENE		 (1L<<0)
+#define BNX2_COM_COMTQ_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_COM_COMTQ_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_COM_COMTQ_FTQ_CTL_MAX_DEPTH		 (0x3ffL<<12)
+#define BNX2_COM_COMTQ_FTQ_CTL_CUR_DEPTH		 (0x3ffL<<22)
+
+#define BNX2_COM_COMQ					0x001053c0
+#define BNX2_COM_COMQ_FTQ_CMD				0x001053f8
+#define BNX2_COM_COMQ_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_COM_COMQ_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_COM_COMQ_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_COM_COMQ_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_COM_COMQ_FTQ_CMD_ADD_INTERVEN		 (1L<<27)
+#define BNX2_COM_COMQ_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_COM_COMQ_FTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_COM_COMQ_FTQ_CMD_POP			 (1L<<30)
+#define BNX2_COM_COMQ_FTQ_CMD_BUSY			 (1L<<31)
+
+#define BNX2_COM_COMQ_FTQ_CTL				0x001053fc
+#define BNX2_COM_COMQ_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_COM_COMQ_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_COM_COMQ_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_COM_COMQ_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_COM_COMQ_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_COM_SCRATCH				0x00120000
+
+#define BNX2_FW_RX_LOW_LATENCY				 0x00120058
+#define BNX2_FW_RX_DROP_COUNT				 0x00120084
+
+
+/*
+ *  cp_reg definition
+ *  offset: 0x180000
+ */
+#define BNX2_CP_CKSUM_ERROR_STATUS			0x00180000
+#define BNX2_CP_CKSUM_ERROR_STATUS_CALCULATED		 (0xffffL<<0)
+#define BNX2_CP_CKSUM_ERROR_STATUS_EXPECTED		 (0xffffL<<16)
+
+#define BNX2_CP_CPU_MODE				0x00185000
+#define BNX2_CP_CPU_MODE_LOCAL_RST			 (1L<<0)
+#define BNX2_CP_CPU_MODE_STEP_ENA			 (1L<<1)
+#define BNX2_CP_CPU_MODE_PAGE_0_DATA_ENA		 (1L<<2)
+#define BNX2_CP_CPU_MODE_PAGE_0_INST_ENA		 (1L<<3)
+#define BNX2_CP_CPU_MODE_MSG_BIT1			 (1L<<6)
+#define BNX2_CP_CPU_MODE_INTERRUPT_ENA			 (1L<<7)
+#define BNX2_CP_CPU_MODE_SOFT_HALT			 (1L<<10)
+#define BNX2_CP_CPU_MODE_BAD_DATA_HALT_ENA		 (1L<<11)
+#define BNX2_CP_CPU_MODE_BAD_INST_HALT_ENA		 (1L<<12)
+#define BNX2_CP_CPU_MODE_FIO_ABORT_HALT_ENA		 (1L<<13)
+#define BNX2_CP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA	 (1L<<15)
+
+#define BNX2_CP_CPU_STATE				0x00185004
+#define BNX2_CP_CPU_STATE_BREAKPOINT			 (1L<<0)
+#define BNX2_CP_CPU_STATE_BAD_INST_HALTED		 (1L<<2)
+#define BNX2_CP_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
+#define BNX2_CP_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
+#define BNX2_CP_CPU_STATE_BAD_DATA_ADDR_HALTED		 (1L<<5)
+#define BNX2_CP_CPU_STATE_BAD_PC_HALTED			 (1L<<6)
+#define BNX2_CP_CPU_STATE_ALIGN_HALTED			 (1L<<7)
+#define BNX2_CP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
+#define BNX2_CP_CPU_STATE_SOFT_HALTED			 (1L<<10)
+#define BNX2_CP_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11)
+#define BNX2_CP_CPU_STATE_INTERRRUPT			 (1L<<12)
+#define BNX2_CP_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)
+#define BNX2_CP_CPU_STATE_INST_FETCH_STALL		 (1L<<15)
+#define BNX2_CP_CPU_STATE_BLOCKED_READ			 (1L<<31)
+
+#define BNX2_CP_CPU_EVENT_MASK				0x00185008
+#define BNX2_CP_CPU_EVENT_MASK_BREAKPOINT_MASK		 (1L<<0)
+#define BNX2_CP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK	 (1L<<2)
+#define BNX2_CP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK	 (1L<<3)
+#define BNX2_CP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK	 (1L<<4)
+#define BNX2_CP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK	 (1L<<5)
+#define BNX2_CP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK	 (1L<<6)
+#define BNX2_CP_CPU_EVENT_MASK_ALIGN_HALTED_MASK	 (1L<<7)
+#define BNX2_CP_CPU_EVENT_MASK_FIO_ABORT_MASK		 (1L<<8)
+#define BNX2_CP_CPU_EVENT_MASK_SOFT_HALTED_MASK		 (1L<<10)
+#define BNX2_CP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK	 (1L<<11)
+#define BNX2_CP_CPU_EVENT_MASK_INTERRUPT_MASK		 (1L<<12)
+
+#define BNX2_CP_CPU_PROGRAM_COUNTER			0x0018501c
+#define BNX2_CP_CPU_INSTRUCTION				0x00185020
+#define BNX2_CP_CPU_DATA_ACCESS				0x00185024
+#define BNX2_CP_CPU_INTERRUPT_ENABLE			0x00185028
+#define BNX2_CP_CPU_INTERRUPT_VECTOR			0x0018502c
+#define BNX2_CP_CPU_INTERRUPT_SAVED_PC			0x00185030
+#define BNX2_CP_CPU_HW_BREAKPOINT			0x00185034
+#define BNX2_CP_CPU_HW_BREAKPOINT_DISABLE		 (1L<<0)
+#define BNX2_CP_CPU_HW_BREAKPOINT_ADDRESS		 (0x3fffffffL<<2)
+
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK			0x00185038
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_1_SEL		 (0xfL<<12)
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_CP_CPU_DEBUG_VECT_PEEK_2_SEL		 (0xfL<<28)
+
+#define BNX2_CP_CPU_LAST_BRANCH_ADDR			0x00185048
+#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE		 (1L<<1)
+#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP		 (0L<<1)
+#define BNX2_CP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH	 (1L<<1)
+#define BNX2_CP_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
+
+#define BNX2_CP_CPU_REG_FILE				0x00185200
+#define BNX2_CP_CPQ_PFE_PFE_CTL				0x001853bc
+#define BNX2_CP_CPQ_PFE_PFE_CTL_INC_USAGE_CNT		 (1L<<0)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE		 (0xfL<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_0		 (0L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_1		 (1L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_2		 (2L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_3		 (3L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_4		 (4L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_5		 (5L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_6		 (6L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_7		 (7L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_8		 (8L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_9		 (9L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_10		 (10L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_11		 (11L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_12		 (12L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_13		 (13L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_14		 (14L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_SIZE_15		 (15L<<4)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_PFE_COUNT		 (0xfL<<12)
+#define BNX2_CP_CPQ_PFE_PFE_CTL_OFFSET			 (0x1ffL<<16)
+
+#define BNX2_CP_CPQ					0x001853c0
+#define BNX2_CP_CPQ_FTQ_CMD				0x001853f8
+#define BNX2_CP_CPQ_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_CP_CPQ_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_CP_CPQ_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_CP_CPQ_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_CP_CPQ_FTQ_CMD_ADD_INTERVEN		 (1L<<27)
+#define BNX2_CP_CPQ_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_CP_CPQ_FTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_CP_CPQ_FTQ_CMD_POP				 (1L<<30)
+#define BNX2_CP_CPQ_FTQ_CMD_BUSY			 (1L<<31)
+
+#define BNX2_CP_CPQ_FTQ_CTL				0x001853fc
+#define BNX2_CP_CPQ_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_CP_CPQ_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_CP_CPQ_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_CP_CPQ_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_CP_CPQ_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_CP_SCRATCH					0x001a0000
+
+#define BNX2_FW_MAX_ISCSI_CONN				 0x001a0080
+
+
+/*
+ *  mcp_reg definition
+ *  offset: 0x140000
+ */
+#define BNX2_MCP_MCP_CONTROL				0x00140080
+#define BNX2_MCP_MCP_CONTROL_SMBUS_SEL			 (1L<<30)
+#define BNX2_MCP_MCP_CONTROL_MCP_ISOLATE		 (1L<<31)
+
+#define BNX2_MCP_MCP_ATTENTION_STATUS			0x00140084
+#define BNX2_MCP_MCP_ATTENTION_STATUS_DRV_DOORBELL	 (1L<<29)
+#define BNX2_MCP_MCP_ATTENTION_STATUS_WATCHDOG_TIMEOUT	 (1L<<30)
+#define BNX2_MCP_MCP_ATTENTION_STATUS_CPU_EVENT		 (1L<<31)
+
+#define BNX2_MCP_MCP_HEARTBEAT_CONTROL			0x00140088
+#define BNX2_MCP_MCP_HEARTBEAT_CONTROL_MCP_HEARTBEAT_ENABLE	 (1L<<31)
+
+#define BNX2_MCP_MCP_HEARTBEAT_STATUS			0x0014008c
+#define BNX2_MCP_MCP_HEARTBEAT_STATUS_MCP_HEARTBEAT_PERIOD	 (0x7ffL<<0)
+#define BNX2_MCP_MCP_HEARTBEAT_STATUS_VALID		 (1L<<31)
+
+#define BNX2_MCP_MCP_HEARTBEAT				0x00140090
+#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_COUNT	 (0x3fffffffL<<0)
+#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_INC	 (1L<<30)
+#define BNX2_MCP_MCP_HEARTBEAT_MCP_HEARTBEAT_RESET	 (1L<<31)
+
+#define BNX2_MCP_WATCHDOG_RESET				0x00140094
+#define BNX2_MCP_WATCHDOG_RESET_WATCHDOG_RESET		 (1L<<31)
+
+#define BNX2_MCP_WATCHDOG_CONTROL			0x00140098
+#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_TIMEOUT	 (0xfffffffL<<0)
+#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_ATTN		 (1L<<29)
+#define BNX2_MCP_WATCHDOG_CONTROL_MCP_RST_ENABLE	 (1L<<30)
+#define BNX2_MCP_WATCHDOG_CONTROL_WATCHDOG_ENABLE	 (1L<<31)
+
+#define BNX2_MCP_ACCESS_LOCK				0x0014009c
+#define BNX2_MCP_ACCESS_LOCK_LOCK			 (1L<<31)
+
+#define BNX2_MCP_TOE_ID					0x001400a0
+#define BNX2_MCP_TOE_ID_FUNCTION_ID			 (1L<<31)
+
+#define BNX2_MCP_MAILBOX_CFG				0x001400a4
+#define BNX2_MCP_MAILBOX_CFG_MAILBOX_OFFSET		 (0x3fffL<<0)
+#define BNX2_MCP_MAILBOX_CFG_MAILBOX_SIZE		 (0xfffL<<20)
+
+#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC			0x001400a8
+#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC_MAILBOX_OFFSET	 (0x3fffL<<0)
+#define BNX2_MCP_MAILBOX_CFG_OTHER_FUNC_MAILBOX_SIZE	 (0xfffL<<20)
+
+#define BNX2_MCP_MCP_DOORBELL				0x001400ac
+#define BNX2_MCP_MCP_DOORBELL_MCP_DOORBELL		 (1L<<31)
+
+#define BNX2_MCP_DRIVER_DOORBELL			0x001400b0
+#define BNX2_MCP_DRIVER_DOORBELL_DRIVER_DOORBELL	 (1L<<31)
+
+#define BNX2_MCP_DRIVER_DOORBELL_OTHER_FUNC		0x001400b4
+#define BNX2_MCP_DRIVER_DOORBELL_OTHER_FUNC_DRIVER_DOORBELL	 (1L<<31)
+
+#define BNX2_MCP_CPU_MODE				0x00145000
+#define BNX2_MCP_CPU_MODE_LOCAL_RST			 (1L<<0)
+#define BNX2_MCP_CPU_MODE_STEP_ENA			 (1L<<1)
+#define BNX2_MCP_CPU_MODE_PAGE_0_DATA_ENA		 (1L<<2)
+#define BNX2_MCP_CPU_MODE_PAGE_0_INST_ENA		 (1L<<3)
+#define BNX2_MCP_CPU_MODE_MSG_BIT1			 (1L<<6)
+#define BNX2_MCP_CPU_MODE_INTERRUPT_ENA			 (1L<<7)
+#define BNX2_MCP_CPU_MODE_SOFT_HALT			 (1L<<10)
+#define BNX2_MCP_CPU_MODE_BAD_DATA_HALT_ENA		 (1L<<11)
+#define BNX2_MCP_CPU_MODE_BAD_INST_HALT_ENA		 (1L<<12)
+#define BNX2_MCP_CPU_MODE_FIO_ABORT_HALT_ENA		 (1L<<13)
+#define BNX2_MCP_CPU_MODE_SPAD_UNDERFLOW_HALT_ENA	 (1L<<15)
+
+#define BNX2_MCP_CPU_STATE				0x00145004
+#define BNX2_MCP_CPU_STATE_BREAKPOINT			 (1L<<0)
+#define BNX2_MCP_CPU_STATE_BAD_INST_HALTED		 (1L<<2)
+#define BNX2_MCP_CPU_STATE_PAGE_0_DATA_HALTED		 (1L<<3)
+#define BNX2_MCP_CPU_STATE_PAGE_0_INST_HALTED		 (1L<<4)
+#define BNX2_MCP_CPU_STATE_BAD_DATA_ADDR_HALTED		 (1L<<5)
+#define BNX2_MCP_CPU_STATE_BAD_PC_HALTED		 (1L<<6)
+#define BNX2_MCP_CPU_STATE_ALIGN_HALTED			 (1L<<7)
+#define BNX2_MCP_CPU_STATE_FIO_ABORT_HALTED		 (1L<<8)
+#define BNX2_MCP_CPU_STATE_SOFT_HALTED			 (1L<<10)
+#define BNX2_MCP_CPU_STATE_SPAD_UNDERFLOW		 (1L<<11)
+#define BNX2_MCP_CPU_STATE_INTERRRUPT			 (1L<<12)
+#define BNX2_MCP_CPU_STATE_DATA_ACCESS_STALL		 (1L<<14)
+#define BNX2_MCP_CPU_STATE_INST_FETCH_STALL		 (1L<<15)
+#define BNX2_MCP_CPU_STATE_BLOCKED_READ			 (1L<<31)
+
+#define BNX2_MCP_CPU_EVENT_MASK				0x00145008
+#define BNX2_MCP_CPU_EVENT_MASK_BREAKPOINT_MASK		 (1L<<0)
+#define BNX2_MCP_CPU_EVENT_MASK_BAD_INST_HALTED_MASK	 (1L<<2)
+#define BNX2_MCP_CPU_EVENT_MASK_PAGE_0_DATA_HALTED_MASK	 (1L<<3)
+#define BNX2_MCP_CPU_EVENT_MASK_PAGE_0_INST_HALTED_MASK	 (1L<<4)
+#define BNX2_MCP_CPU_EVENT_MASK_BAD_DATA_ADDR_HALTED_MASK	 (1L<<5)
+#define BNX2_MCP_CPU_EVENT_MASK_BAD_PC_HALTED_MASK	 (1L<<6)
+#define BNX2_MCP_CPU_EVENT_MASK_ALIGN_HALTED_MASK	 (1L<<7)
+#define BNX2_MCP_CPU_EVENT_MASK_FIO_ABORT_MASK		 (1L<<8)
+#define BNX2_MCP_CPU_EVENT_MASK_SOFT_HALTED_MASK	 (1L<<10)
+#define BNX2_MCP_CPU_EVENT_MASK_SPAD_UNDERFLOW_MASK	 (1L<<11)
+#define BNX2_MCP_CPU_EVENT_MASK_INTERRUPT_MASK		 (1L<<12)
+
+#define BNX2_MCP_CPU_PROGRAM_COUNTER			0x0014501c
+#define BNX2_MCP_CPU_INSTRUCTION			0x00145020
+#define BNX2_MCP_CPU_DATA_ACCESS			0x00145024
+#define BNX2_MCP_CPU_INTERRUPT_ENABLE			0x00145028
+#define BNX2_MCP_CPU_INTERRUPT_VECTOR			0x0014502c
+#define BNX2_MCP_CPU_INTERRUPT_SAVED_PC			0x00145030
+#define BNX2_MCP_CPU_HW_BREAKPOINT			0x00145034
+#define BNX2_MCP_CPU_HW_BREAKPOINT_DISABLE		 (1L<<0)
+#define BNX2_MCP_CPU_HW_BREAKPOINT_ADDRESS		 (0x3fffffffL<<2)
+
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK			0x00145038
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_VALUE		 (0x7ffL<<0)
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_PEEK_EN		 (1L<<11)
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_1_SEL		 (0xfL<<12)
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_VALUE		 (0x7ffL<<16)
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_PEEK_EN		 (1L<<27)
+#define BNX2_MCP_CPU_DEBUG_VECT_PEEK_2_SEL		 (0xfL<<28)
+
+#define BNX2_MCP_CPU_LAST_BRANCH_ADDR			0x00145048
+#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE		 (1L<<1)
+#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE_JUMP		 (0L<<1)
+#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_TYPE_BRANCH	 (1L<<1)
+#define BNX2_MCP_CPU_LAST_BRANCH_ADDR_LBA		 (0x3fffffffL<<2)
+
+#define BNX2_MCP_CPU_REG_FILE				0x00145200
+#define BNX2_MCP_MCPQ					0x001453c0
+#define BNX2_MCP_MCPQ_FTQ_CMD				0x001453f8
+#define BNX2_MCP_MCPQ_FTQ_CMD_OFFSET			 (0x3ffL<<0)
+#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP			 (1L<<10)
+#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP_0			 (0L<<10)
+#define BNX2_MCP_MCPQ_FTQ_CMD_WR_TOP_1			 (1L<<10)
+#define BNX2_MCP_MCPQ_FTQ_CMD_SFT_RESET			 (1L<<25)
+#define BNX2_MCP_MCPQ_FTQ_CMD_RD_DATA			 (1L<<26)
+#define BNX2_MCP_MCPQ_FTQ_CMD_ADD_INTERVEN		 (1L<<27)
+#define BNX2_MCP_MCPQ_FTQ_CMD_ADD_DATA			 (1L<<28)
+#define BNX2_MCP_MCPQ_FTQ_CMD_INTERVENE_CLR		 (1L<<29)
+#define BNX2_MCP_MCPQ_FTQ_CMD_POP			 (1L<<30)
+#define BNX2_MCP_MCPQ_FTQ_CMD_BUSY			 (1L<<31)
+
+#define BNX2_MCP_MCPQ_FTQ_CTL				0x001453fc
+#define BNX2_MCP_MCPQ_FTQ_CTL_INTERVENE			 (1L<<0)
+#define BNX2_MCP_MCPQ_FTQ_CTL_OVERFLOW			 (1L<<1)
+#define BNX2_MCP_MCPQ_FTQ_CTL_FORCE_INTERVENE		 (1L<<2)
+#define BNX2_MCP_MCPQ_FTQ_CTL_MAX_DEPTH			 (0x3ffL<<12)
+#define BNX2_MCP_MCPQ_FTQ_CTL_CUR_DEPTH			 (0x3ffL<<22)
+
+#define BNX2_MCP_ROM					0x00150000
+#define BNX2_MCP_SCRATCH				0x00160000
+#define BNX2_MCP_STATE_P1				 0x0016f9c8
+#define BNX2_MCP_STATE_P0				 0x0016fdc8
+#define BNX2_MCP_STATE_P1_5708				 0x001699c8
+#define BNX2_MCP_STATE_P0_5708				 0x00169dc8
+
+#define BNX2_SHM_HDR_SIGNATURE				BNX2_MCP_SCRATCH
+#define BNX2_SHM_HDR_SIGNATURE_SIG_MASK			 0xffff0000
+#define BNX2_SHM_HDR_SIGNATURE_SIG			 0x53530000
+#define BNX2_SHM_HDR_SIGNATURE_VER_MASK			 0x000000ff
+#define BNX2_SHM_HDR_SIGNATURE_VER_ONE			 0x00000001
+
+#define BNX2_SHM_HDR_ADDR_0				BNX2_MCP_SCRATCH + 4
+#define BNX2_SHM_HDR_ADDR_1				BNX2_MCP_SCRATCH + 8
+
+
+#define NUM_MC_HASH_REGISTERS   8
+
+
+/* PHY_ID1: bits 31-16; PHY_ID2: bits 15-0.  */
+#define PHY_BCM5706_PHY_ID                          0x00206160
+
+#define PHY_ID(id)                                  ((id) & 0xfffffff0)
+#define PHY_REV_ID(id)                              ((id) & 0xf)
+
+/* 5708 Serdes PHY registers */
+
+#define BCM5708S_BMCR_FORCE_2500		0x20
+
+#define BCM5708S_UP1				0xb
+
+#define BCM5708S_UP1_2G5			0x1
+
+#define BCM5708S_BLK_ADDR			0x1f
+
+#define BCM5708S_BLK_ADDR_DIG			0x0000
+#define BCM5708S_BLK_ADDR_DIG3			0x0002
+#define BCM5708S_BLK_ADDR_TX_MISC		0x0005
+
+/* Digital Block */
+#define BCM5708S_1000X_CTL1			0x10
+
+#define BCM5708S_1000X_CTL1_FIBER_MODE		0x0001
+#define BCM5708S_1000X_CTL1_AUTODET_EN		0x0010
+
+#define BCM5708S_1000X_CTL2			0x11
+
+#define BCM5708S_1000X_CTL2_PLLEL_DET_EN	0x0001
+
+#define BCM5708S_1000X_STAT1			0x14
+
+#define BCM5708S_1000X_STAT1_SGMII		0x0001
+#define BCM5708S_1000X_STAT1_LINK		0x0002
+#define BCM5708S_1000X_STAT1_FD			0x0004
+#define BCM5708S_1000X_STAT1_SPEED_MASK		0x0018
+#define BCM5708S_1000X_STAT1_SPEED_10		0x0000
+#define BCM5708S_1000X_STAT1_SPEED_100		0x0008
+#define BCM5708S_1000X_STAT1_SPEED_1G		0x0010
+#define BCM5708S_1000X_STAT1_SPEED_2G5		0x0018
+#define BCM5708S_1000X_STAT1_TX_PAUSE		0x0020
+#define BCM5708S_1000X_STAT1_RX_PAUSE		0x0040
+
+/* Digital3 Block */
+#define BCM5708S_DIG_3_0			0x10
+
+#define BCM5708S_DIG_3_0_USE_IEEE		0x0001
+
+/* Tx/Misc Block */
+#define BCM5708S_TX_ACTL1			0x15
+
+#define BCM5708S_TX_ACTL1_DRIVER_VCM		0x30
+
+#define BCM5708S_TX_ACTL3			0x17
+
+#define MII_BNX2_DSP_RW_PORT			0x15
+#define MII_BNX2_DSP_ADDRESS			0x17
+#define MII_BNX2_DSP_EXPAND_REG			 0x0f00
+#define MII_EXPAND_REG1				  (MII_BNX2_DSP_EXPAND_REG | 1)
+#define MII_EXPAND_REG1_RUDI_C			   0x20
+#define MII_EXPAND_SERDES_CTL			  (MII_BNX2_DSP_EXPAND_REG | 3)
+
+#define MII_BNX2_MISC_SHADOW			0x1c
+#define MISC_SHDW_AN_DBG			 0x6800
+#define MISC_SHDW_AN_DBG_NOSYNC			  0x0002
+#define MISC_SHDW_AN_DBG_RUDI_INVALID		  0x0100
+#define MISC_SHDW_MODE_CTL			 0x7c00
+#define MISC_SHDW_MODE_CTL_SIG_DET		  0x0010
+
+#define MII_BNX2_BLK_ADDR			0x1f
+#define MII_BNX2_BLK_ADDR_IEEE0			 0x0000
+#define MII_BNX2_BLK_ADDR_GP_STATUS		 0x8120
+#define MII_BNX2_GP_TOP_AN_STATUS1		  0x1b
+#define MII_BNX2_GP_TOP_AN_SPEED_MSK		   0x3f00
+#define MII_BNX2_GP_TOP_AN_SPEED_10		   0x0000
+#define MII_BNX2_GP_TOP_AN_SPEED_100		   0x0100
+#define MII_BNX2_GP_TOP_AN_SPEED_1G		   0x0200
+#define MII_BNX2_GP_TOP_AN_SPEED_2_5G		   0x0300
+#define MII_BNX2_GP_TOP_AN_SPEED_1GKV		   0x0d00
+#define MII_BNX2_GP_TOP_AN_FD			   0x8
+#define MII_BNX2_BLK_ADDR_SERDES_DIG		 0x8300
+#define MII_BNX2_SERDES_DIG_1000XCTL1		  0x10
+#define MII_BNX2_SD_1000XCTL1_FIBER		   0x01
+#define MII_BNX2_SD_1000XCTL1_AUTODET		   0x10
+#define MII_BNX2_SERDES_DIG_MISC1		  0x18
+#define MII_BNX2_SD_MISC1_FORCE_MSK		   0xf
+#define MII_BNX2_SD_MISC1_FORCE_2_5G		   0x0
+#define MII_BNX2_SD_MISC1_FORCE			   0x10
+#define MII_BNX2_BLK_ADDR_OVER1G		 0x8320
+#define MII_BNX2_OVER1G_UP1			  0x19
+#define MII_BNX2_BLK_ADDR_BAM_NXTPG		 0x8350
+#define MII_BNX2_BAM_NXTPG_CTL			  0x10
+#define MII_BNX2_NXTPG_CTL_BAM			   0x1
+#define MII_BNX2_NXTPG_CTL_T2			   0x2
+#define MII_BNX2_BLK_ADDR_CL73_USERB0		 0x8370
+#define MII_BNX2_CL73_BAM_CTL1			  0x12
+#define MII_BNX2_CL73_BAM_EN			   0x8000
+#define MII_BNX2_CL73_BAM_STA_MGR_EN		   0x4000
+#define MII_BNX2_CL73_BAM_NP_AFT_BP_EN		   0x2000
+#define MII_BNX2_BLK_ADDR_AER			 0xffd0
+#define MII_BNX2_AER_AER			  0x1e
+#define MII_BNX2_AER_AER_AN_MMD			   0x3800
+#define MII_BNX2_BLK_ADDR_COMBO_IEEEB0		 0xffe0
+
+#define MIN_ETHERNET_PACKET_SIZE	60
+#define MAX_ETHERNET_PACKET_SIZE	1514
+#define MAX_ETHERNET_JUMBO_PACKET_SIZE	9014
+
+#define BNX2_RX_COPY_THRESH		128
+
+#define BNX2_MISC_ENABLE_DEFAULT	0x17ffffff
+
+#define BNX2_START_UNICAST_ADDRESS_INDEX	4
+#define BNX2_END_UNICAST_ADDRESS_INDEX		7
+#define BNX2_MAX_UNICAST_ADDRESSES     	(BNX2_END_UNICAST_ADDRESS_INDEX - \
+					 BNX2_START_UNICAST_ADDRESS_INDEX + 1)
+
+#define DMA_READ_CHANS	5
+#define DMA_WRITE_CHANS	3
+
+/* Use CPU native page size up to 16K for the ring sizes.  */
+#if (PAGE_SHIFT > 14)
+#define BCM_PAGE_BITS	14
+#else
+#define BCM_PAGE_BITS	PAGE_SHIFT
+#endif
+#define BCM_PAGE_SIZE	(1 << BCM_PAGE_BITS)
+
+#define TX_DESC_CNT  (BCM_PAGE_SIZE / sizeof(struct tx_bd))
+#define MAX_TX_DESC_CNT (TX_DESC_CNT - 1)
+
+#define MAX_RX_RINGS	8
+#define MAX_RX_PG_RINGS	32
+#define RX_DESC_CNT  (BCM_PAGE_SIZE / sizeof(struct rx_bd))
+#define MAX_RX_DESC_CNT (RX_DESC_CNT - 1)
+#define MAX_TOTAL_RX_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_RINGS)
+#define MAX_TOTAL_RX_PG_DESC_CNT (MAX_RX_DESC_CNT * MAX_RX_PG_RINGS)
+
+#define NEXT_TX_BD(x) (((x) & (MAX_TX_DESC_CNT - 1)) ==			\
+		(MAX_TX_DESC_CNT - 1)) ?				\
+	(x) + 2 : (x) + 1
+
+#define TX_RING_IDX(x) ((x) & MAX_TX_DESC_CNT)
+
+#define NEXT_RX_BD(x) (((x) & (MAX_RX_DESC_CNT - 1)) ==			\
+		(MAX_RX_DESC_CNT - 1)) ?				\
+	(x) + 2 : (x) + 1
+
+#define RX_RING_IDX(x) ((x) & bp->rx_max_ring_idx)
+#define RX_PG_RING_IDX(x) ((x) & bp->rx_max_pg_ring_idx)
+
+#define RX_RING(x) (((x) & ~MAX_RX_DESC_CNT) >> (BCM_PAGE_BITS - 4))
+#define RX_IDX(x) ((x) & MAX_RX_DESC_CNT)
+
+/* Context size. */
+#define CTX_SHIFT                   7
+#define CTX_SIZE                    (1 << CTX_SHIFT)
+#define CTX_MASK                    (CTX_SIZE - 1)
+#define GET_CID_ADDR(_cid)          ((_cid) << CTX_SHIFT)
+#define GET_CID(_cid_addr)          ((_cid_addr) >> CTX_SHIFT)
+
+#define PHY_CTX_SHIFT               6
+#define PHY_CTX_SIZE                (1 << PHY_CTX_SHIFT)
+#define PHY_CTX_MASK                (PHY_CTX_SIZE - 1)
+#define GET_PCID_ADDR(_pcid)        ((_pcid) << PHY_CTX_SHIFT)
+#define GET_PCID(_pcid_addr)        ((_pcid_addr) >> PHY_CTX_SHIFT)
+
+#define MB_KERNEL_CTX_SHIFT         8
+#define MB_KERNEL_CTX_SIZE          (1 << MB_KERNEL_CTX_SHIFT)
+#define MB_KERNEL_CTX_MASK          (MB_KERNEL_CTX_SIZE - 1)
+#define MB_GET_CID_ADDR(_cid)       (0x10000 + ((_cid) << MB_KERNEL_CTX_SHIFT))
+
+#define MAX_CID_CNT                 0x4000
+#define MAX_CID_ADDR                (GET_CID_ADDR(MAX_CID_CNT))
+#define INVALID_CID_ADDR            0xffffffff
+
+#define TX_CID		16
+#define TX_TSS_CID	32
+#define RX_CID		0
+#define RX_RSS_CID	4
+#define RX_MAX_RSS_RINGS	7
+#define RX_MAX_RINGS		(RX_MAX_RSS_RINGS + 1)
+#define TX_MAX_TSS_RINGS	7
+#define TX_MAX_RINGS		(TX_MAX_TSS_RINGS + 1)
+
+#define MB_TX_CID_ADDR	MB_GET_CID_ADDR(TX_CID)
+#define MB_RX_CID_ADDR	MB_GET_CID_ADDR(RX_CID)
+
+struct sw_bd {
+	struct sk_buff		*skb;
+	struct l2_fhdr		*desc;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct sw_pg {
+	struct page		*page;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct sw_tx_bd {
+	struct sk_buff		*skb;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+	unsigned short		is_gso;
+	unsigned short		nr_frags;
+};
+
+#define SW_RXBD_RING_SIZE (sizeof(struct sw_bd) * RX_DESC_CNT)
+#define SW_RXPG_RING_SIZE (sizeof(struct sw_pg) * RX_DESC_CNT)
+#define RXBD_RING_SIZE (sizeof(struct rx_bd) * RX_DESC_CNT)
+#define SW_TXBD_RING_SIZE (sizeof(struct sw_tx_bd) * TX_DESC_CNT)
+#define TXBD_RING_SIZE (sizeof(struct tx_bd) * TX_DESC_CNT)
+
+/* Buffered flash (Atmel: AT45DB011B) specific information */
+#define SEEPROM_PAGE_BITS			2
+#define SEEPROM_PHY_PAGE_SIZE			(1 << SEEPROM_PAGE_BITS)
+#define SEEPROM_BYTE_ADDR_MASK			(SEEPROM_PHY_PAGE_SIZE-1)
+#define SEEPROM_PAGE_SIZE			4
+#define SEEPROM_TOTAL_SIZE			65536
+
+#define BUFFERED_FLASH_PAGE_BITS		9
+#define BUFFERED_FLASH_PHY_PAGE_SIZE		(1 << BUFFERED_FLASH_PAGE_BITS)
+#define BUFFERED_FLASH_BYTE_ADDR_MASK		(BUFFERED_FLASH_PHY_PAGE_SIZE-1)
+#define BUFFERED_FLASH_PAGE_SIZE		264
+#define BUFFERED_FLASH_TOTAL_SIZE		0x21000
+
+#define SAIFUN_FLASH_PAGE_BITS			8
+#define SAIFUN_FLASH_PHY_PAGE_SIZE		(1 << SAIFUN_FLASH_PAGE_BITS)
+#define SAIFUN_FLASH_BYTE_ADDR_MASK		(SAIFUN_FLASH_PHY_PAGE_SIZE-1)
+#define SAIFUN_FLASH_PAGE_SIZE			256
+#define SAIFUN_FLASH_BASE_TOTAL_SIZE		65536
+
+#define ST_MICRO_FLASH_PAGE_BITS		8
+#define ST_MICRO_FLASH_PHY_PAGE_SIZE		(1 << ST_MICRO_FLASH_PAGE_BITS)
+#define ST_MICRO_FLASH_BYTE_ADDR_MASK		(ST_MICRO_FLASH_PHY_PAGE_SIZE-1)
+#define ST_MICRO_FLASH_PAGE_SIZE		256
+#define ST_MICRO_FLASH_BASE_TOTAL_SIZE		65536
+
+#define BCM5709_FLASH_PAGE_BITS			8
+#define BCM5709_FLASH_PHY_PAGE_SIZE		(1 << BCM5709_FLASH_PAGE_BITS)
+#define BCM5709_FLASH_BYTE_ADDR_MASK		(BCM5709_FLASH_PHY_PAGE_SIZE-1)
+#define BCM5709_FLASH_PAGE_SIZE			256
+
+#define NVRAM_TIMEOUT_COUNT			30000
+
+
+#define FLASH_STRAP_MASK			(BNX2_NVM_CFG1_FLASH_MODE   | \
+						 BNX2_NVM_CFG1_BUFFER_MODE  | \
+						 BNX2_NVM_CFG1_PROTECT_MODE | \
+						 BNX2_NVM_CFG1_FLASH_SIZE)
+
+#define FLASH_BACKUP_STRAP_MASK			(0xf << 26)
+
+struct flash_spec {
+	u32 strapping;
+	u32 config1;
+	u32 config2;
+	u32 config3;
+	u32 write1;
+	u32 flags;
+#define BNX2_NV_BUFFERED	0x00000001
+#define BNX2_NV_TRANSLATE	0x00000002
+#define BNX2_NV_WREN		0x00000004
+	u32 page_bits;
+	u32 page_size;
+	u32 addr_mask;
+	u32 total_size;
+	u8  *name;
+};
+
+#define BNX2_MAX_MSIX_HW_VEC	9
+#define BNX2_MAX_MSIX_VEC	9
+#ifdef BCM_CNIC
+#define BNX2_MIN_MSIX_VEC	2
+#else
+#define BNX2_MIN_MSIX_VEC	1
+#endif
+
+
+struct bnx2_irq {
+	irq_handler_t	handler;
+	unsigned int	vector;
+	u8		requested;
+	char		name[IFNAMSIZ + 2];
+};
+
+struct bnx2_tx_ring_info {
+	u32			tx_prod_bseq;
+	u16			tx_prod;
+	u32			tx_bidx_addr;
+	u32			tx_bseq_addr;
+
+	struct tx_bd		*tx_desc_ring;
+	struct sw_tx_bd		*tx_buf_ring;
+
+	u16			tx_cons;
+	u16			hw_tx_cons;
+
+	dma_addr_t		tx_desc_mapping;
+};
+
+struct bnx2_rx_ring_info {
+	u32			rx_prod_bseq;
+	u16			rx_prod;
+	u16			rx_cons;
+
+	u32			rx_bidx_addr;
+	u32			rx_bseq_addr;
+	u32			rx_pg_bidx_addr;
+
+	u16			rx_pg_prod;
+	u16			rx_pg_cons;
+
+	struct sw_bd		*rx_buf_ring;
+	struct rx_bd		*rx_desc_ring[MAX_RX_RINGS];
+	struct sw_pg		*rx_pg_ring;
+	struct rx_bd		*rx_pg_desc_ring[MAX_RX_PG_RINGS];
+
+	dma_addr_t		rx_desc_mapping[MAX_RX_RINGS];
+	dma_addr_t		rx_pg_desc_mapping[MAX_RX_PG_RINGS];
+};
+
+struct bnx2_napi {
+	struct napi_struct	napi		____cacheline_aligned;
+	struct bnx2		*bp;
+	union {
+		struct status_block		*msi;
+		struct status_block_msix	*msix;
+	} status_blk;
+	u16			*hw_tx_cons_ptr;
+	u16			*hw_rx_cons_ptr;
+	u32 			last_status_idx;
+	u32			int_num;
+
+#ifdef BCM_CNIC
+	u32			cnic_tag;
+	int			cnic_present;
+#endif
+
+	struct bnx2_rx_ring_info	rx_ring;
+	struct bnx2_tx_ring_info	tx_ring;
+};
+
+struct bnx2 {
+	/* Fields used in the tx and intr/napi performance paths are grouped */
+	/* together in the beginning of the structure. */
+	void __iomem		*regview;
+
+	struct net_device	*dev;
+	struct pci_dev		*pdev;
+
+	atomic_t		intr_sem;
+
+	u32			flags;
+#define BNX2_FLAG_PCIX			0x00000001
+#define BNX2_FLAG_PCI_32BIT		0x00000002
+#define BNX2_FLAG_MSIX_CAP		0x00000004
+#define BNX2_FLAG_NO_WOL		0x00000008
+#define BNX2_FLAG_USING_MSI		0x00000020
+#define BNX2_FLAG_ASF_ENABLE		0x00000040
+#define BNX2_FLAG_MSI_CAP		0x00000080
+#define BNX2_FLAG_ONE_SHOT_MSI		0x00000100
+#define BNX2_FLAG_PCIE			0x00000200
+#define BNX2_FLAG_USING_MSIX		0x00000400
+#define BNX2_FLAG_USING_MSI_OR_MSIX	(BNX2_FLAG_USING_MSI | \
+					 BNX2_FLAG_USING_MSIX)
+#define BNX2_FLAG_JUMBO_BROKEN		0x00000800
+#define BNX2_FLAG_CAN_KEEP_VLAN		0x00001000
+#define BNX2_FLAG_BROKEN_STATS		0x00002000
+#define BNX2_FLAG_AER_ENABLED		0x00004000
+
+	struct bnx2_napi	bnx2_napi[BNX2_MAX_MSIX_VEC];
+
+	u32			rx_buf_use_size;	/* useable size */
+	u32			rx_buf_size;		/* with alignment */
+	u32			rx_copy_thresh;
+	u32			rx_jumbo_thresh;
+	u32			rx_max_ring_idx;
+	u32			rx_max_pg_ring_idx;
+
+	/* TX constants */
+	int		tx_ring_size;
+	u32		tx_wake_thresh;
+
+#ifdef BCM_CNIC
+	struct cnic_ops	__rcu	*cnic_ops;
+	void			*cnic_data;
+#endif
+
+	/* End of fields used in the performance code paths. */
+
+	unsigned int		current_interval;
+#define BNX2_TIMER_INTERVAL		HZ
+#define BNX2_SERDES_AN_TIMEOUT		(HZ / 3)
+#define BNX2_SERDES_FORCED_TIMEOUT	(HZ / 10)
+
+	struct			timer_list timer;
+	struct work_struct	reset_task;
+
+	/* Used to synchronize phy accesses. */
+	spinlock_t		phy_lock;
+	spinlock_t		indirect_lock;
+
+	u32			phy_flags;
+#define BNX2_PHY_FLAG_SERDES			0x00000001
+#define BNX2_PHY_FLAG_CRC_FIX			0x00000002
+#define BNX2_PHY_FLAG_PARALLEL_DETECT		0x00000004
+#define BNX2_PHY_FLAG_2_5G_CAPABLE		0x00000008
+#define BNX2_PHY_FLAG_INT_MODE_MASK		0x00000300
+#define BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING	0x00000100
+#define BNX2_PHY_FLAG_INT_MODE_LINK_READY	0x00000200
+#define BNX2_PHY_FLAG_DIS_EARLY_DAC		0x00000400
+#define BNX2_PHY_FLAG_REMOTE_PHY_CAP		0x00000800
+#define BNX2_PHY_FLAG_FORCED_DOWN		0x00001000
+#define BNX2_PHY_FLAG_NO_PARALLEL		0x00002000
+
+	u32			mii_bmcr;
+	u32			mii_bmsr;
+	u32			mii_bmsr1;
+	u32			mii_adv;
+	u32			mii_lpa;
+	u32			mii_up1;
+
+	u32			chip_id;
+	/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+#define CHIP_NUM(bp)			(((bp)->chip_id) & 0xffff0000)
+#define CHIP_NUM_5706			0x57060000
+#define CHIP_NUM_5708			0x57080000
+#define CHIP_NUM_5709			0x57090000
+
+#define CHIP_REV(bp)			(((bp)->chip_id) & 0x0000f000)
+#define CHIP_REV_Ax			0x00000000
+#define CHIP_REV_Bx			0x00001000
+#define CHIP_REV_Cx			0x00002000
+
+#define CHIP_METAL(bp)			(((bp)->chip_id) & 0x00000ff0)
+#define CHIP_BONDING(bp)		(((bp)->chip_id) & 0x0000000f)
+
+#define CHIP_ID(bp)			(((bp)->chip_id) & 0xfffffff0)
+#define CHIP_ID_5706_A0			0x57060000
+#define CHIP_ID_5706_A1			0x57060010
+#define CHIP_ID_5706_A2			0x57060020
+#define CHIP_ID_5708_A0			0x57080000
+#define CHIP_ID_5708_B0			0x57081000
+#define CHIP_ID_5708_B1			0x57081010
+#define CHIP_ID_5709_A0			0x57090000
+#define CHIP_ID_5709_A1			0x57090010
+
+#define CHIP_BOND_ID(bp)		(((bp)->chip_id) & 0xf)
+
+/* A serdes chip will have the first bit of the bond id set. */
+#define CHIP_BOND_ID_SERDES_BIT		0x01
+
+	u32			phy_addr;
+	u32			phy_id;
+
+	u16			bus_speed_mhz;
+	u8			wol;
+
+	u8			pad;
+
+	u16			fw_wr_seq;
+	u16			fw_drv_pulse_wr_seq;
+
+	int			rx_max_ring;
+	int			rx_ring_size;
+
+	int			rx_max_pg_ring;
+	int			rx_pg_ring_size;
+
+	u16			tx_quick_cons_trip;
+	u16			tx_quick_cons_trip_int;
+	u16			rx_quick_cons_trip;
+	u16			rx_quick_cons_trip_int;
+	u16			comp_prod_trip;
+	u16			comp_prod_trip_int;
+	u16			tx_ticks;
+	u16			tx_ticks_int;
+	u16			com_ticks;
+	u16			com_ticks_int;
+	u16			cmd_ticks;
+	u16			cmd_ticks_int;
+	u16			rx_ticks;
+	u16			rx_ticks_int;
+
+	u32			stats_ticks;
+
+	dma_addr_t		status_blk_mapping;
+
+	struct statistics_block	*stats_blk;
+	struct statistics_block	*temp_stats_blk;
+	dma_addr_t		stats_blk_mapping;
+
+	int			ctx_pages;
+	void			*ctx_blk[4];
+	dma_addr_t		ctx_blk_mapping[4];
+
+	u32			hc_cmd;
+	u32			rx_mode;
+
+	u16			req_line_speed;
+	u8			req_duplex;
+
+	u8			phy_port;
+	u8			link_up;
+
+	u16			line_speed;
+	u8			duplex;
+	u8			flow_ctrl;	/* actual flow ctrl settings */
+						/* may be different from     */
+						/* req_flow_ctrl if autoneg  */
+	u32			advertising;
+
+	u8			req_flow_ctrl;	/* flow ctrl advertisement */
+						/* settings or forced      */
+						/* settings                */
+	u8			autoneg;
+#define AUTONEG_SPEED		1
+#define AUTONEG_FLOW_CTRL	2
+
+	u8			loopback;
+#define MAC_LOOPBACK		1
+#define PHY_LOOPBACK		2
+
+	u8			serdes_an_pending;
+
+	u8			mac_addr[8];
+
+	u32			shmem_base;
+
+	char			fw_version[32];
+
+	int			pm_cap;
+	int			pcix_cap;
+
+	const struct flash_spec	*flash_info;
+	u32			flash_size;
+
+	int			status_stats_size;
+
+	struct bnx2_irq		irq_tbl[BNX2_MAX_MSIX_VEC];
+	int			irq_nvecs;
+
+	u8			num_tx_rings;
+	u8			num_rx_rings;
+
+	u32 			leds_save;
+	u32			idle_chk_status_idx;
+
+#ifdef BCM_CNIC
+	struct mutex		cnic_lock;
+	struct cnic_eth_dev	cnic_eth_dev;
+#endif
+
+	const struct firmware	*mips_firmware;
+	const struct firmware	*rv2p_firmware;
+};
+
+#define REG_RD(bp, offset)					\
+	readl(bp->regview + offset)
+
+#define REG_WR(bp, offset, val)					\
+	writel(val, bp->regview + offset)
+
+#define REG_WR16(bp, offset, val)				\
+	writew(val, bp->regview + offset)
+
+struct cpu_reg {
+	u32 mode;
+	u32 mode_value_halt;
+	u32 mode_value_sstep;
+
+	u32 state;
+	u32 state_value_clear;
+
+	u32 gpr0;
+	u32 evmask;
+	u32 pc;
+	u32 inst;
+	u32 bp;
+
+	u32 spad_base;
+
+	u32 mips_view_base;
+};
+
+struct bnx2_fw_file_section {
+	__be32 addr;
+	__be32 len;
+	__be32 offset;
+};
+
+struct bnx2_mips_fw_file_entry {
+	__be32 start_addr;
+	struct bnx2_fw_file_section text;
+	struct bnx2_fw_file_section data;
+	struct bnx2_fw_file_section rodata;
+};
+
+struct bnx2_rv2p_fw_file_entry {
+	struct bnx2_fw_file_section rv2p;
+	__be32 fixup[8];
+};
+
+struct bnx2_mips_fw_file {
+	struct bnx2_mips_fw_file_entry com;
+	struct bnx2_mips_fw_file_entry cp;
+	struct bnx2_mips_fw_file_entry rxp;
+	struct bnx2_mips_fw_file_entry tpat;
+	struct bnx2_mips_fw_file_entry txp;
+};
+
+struct bnx2_rv2p_fw_file {
+	struct bnx2_rv2p_fw_file_entry proc1;
+	struct bnx2_rv2p_fw_file_entry proc2;
+};
+
+#define RV2P_P1_FIXUP_PAGE_SIZE_IDX		0
+#define RV2P_BD_PAGE_SIZE_MSK			0xffff
+#define RV2P_BD_PAGE_SIZE			((BCM_PAGE_SIZE / 16) - 1)
+
+#define RV2P_PROC1                              0
+#define RV2P_PROC2                              1
+
+
+/* This value (in milliseconds) determines the frequency of the driver
+ * issuing the PULSE message code.  The firmware monitors this periodic
+ * pulse to determine when to switch to an OS-absent mode. */
+#define BNX2_DRV_PULSE_PERIOD_MS                 250
+
+/* This value (in milliseconds) determines how long the driver should
+ * wait for an acknowledgement from the firmware before timing out.  Once
+ * the firmware has timed out, the driver will assume there is no firmware
+ * running and there won't be any firmware-driver synchronization during a
+ * driver reset. */
+#define BNX2_FW_ACK_TIME_OUT_MS                  1000
+
+
+#define BNX2_DRV_RESET_SIGNATURE		0x00000000
+#define BNX2_DRV_RESET_SIGNATURE_MAGIC		 0x4841564b /* HAVK */
+//#define DRV_RESET_SIGNATURE_MAGIC		 0x47495352 /* RSIG */
+
+#define BNX2_DRV_MB				0x00000004
+#define BNX2_DRV_MSG_CODE			 0xff000000
+#define BNX2_DRV_MSG_CODE_RESET			 0x01000000
+#define BNX2_DRV_MSG_CODE_UNLOAD		 0x02000000
+#define BNX2_DRV_MSG_CODE_SHUTDOWN		 0x03000000
+#define BNX2_DRV_MSG_CODE_SUSPEND_WOL		 0x04000000
+#define BNX2_DRV_MSG_CODE_FW_TIMEOUT		 0x05000000
+#define BNX2_DRV_MSG_CODE_PULSE			 0x06000000
+#define BNX2_DRV_MSG_CODE_DIAG			 0x07000000
+#define BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL	 0x09000000
+#define BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN		 0x0b000000
+#define BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE	 0x0d000000
+#define BNX2_DRV_MSG_CODE_CMD_SET_LINK		 0x10000000
+
+#define BNX2_DRV_MSG_DATA			 0x00ff0000
+#define BNX2_DRV_MSG_DATA_WAIT0			 0x00010000
+#define BNX2_DRV_MSG_DATA_WAIT1			 0x00020000
+#define BNX2_DRV_MSG_DATA_WAIT2			 0x00030000
+#define BNX2_DRV_MSG_DATA_WAIT3			 0x00040000
+
+#define BNX2_DRV_MSG_SEQ			 0x0000ffff
+
+#define BNX2_FW_MB				0x00000008
+#define BNX2_FW_MSG_ACK				 0x0000ffff
+#define BNX2_FW_MSG_STATUS_MASK			 0x00ff0000
+#define BNX2_FW_MSG_STATUS_OK			 0x00000000
+#define BNX2_FW_MSG_STATUS_FAILURE		 0x00ff0000
+
+#define BNX2_LINK_STATUS			0x0000000c
+#define BNX2_LINK_STATUS_INIT_VALUE		 0xffffffff
+#define BNX2_LINK_STATUS_LINK_UP		 0x1
+#define BNX2_LINK_STATUS_LINK_DOWN		 0x0
+#define BNX2_LINK_STATUS_SPEED_MASK		 0x1e
+#define BNX2_LINK_STATUS_AN_INCOMPLETE		 (0<<1)
+#define BNX2_LINK_STATUS_10HALF			 (1<<1)
+#define BNX2_LINK_STATUS_10FULL			 (2<<1)
+#define BNX2_LINK_STATUS_100HALF		 (3<<1)
+#define BNX2_LINK_STATUS_100BASE_T4		 (4<<1)
+#define BNX2_LINK_STATUS_100FULL		 (5<<1)
+#define BNX2_LINK_STATUS_1000HALF		 (6<<1)
+#define BNX2_LINK_STATUS_1000FULL		 (7<<1)
+#define BNX2_LINK_STATUS_2500HALF		 (8<<1)
+#define BNX2_LINK_STATUS_2500FULL		 (9<<1)
+#define BNX2_LINK_STATUS_AN_ENABLED		 (1<<5)
+#define BNX2_LINK_STATUS_AN_COMPLETE		 (1<<6)
+#define BNX2_LINK_STATUS_PARALLEL_DET		 (1<<7)
+#define BNX2_LINK_STATUS_RESERVED		 (1<<8)
+#define BNX2_LINK_STATUS_PARTNER_AD_1000FULL	 (1<<9)
+#define BNX2_LINK_STATUS_PARTNER_AD_1000HALF	 (1<<10)
+#define BNX2_LINK_STATUS_PARTNER_AD_100BT4	 (1<<11)
+#define BNX2_LINK_STATUS_PARTNER_AD_100FULL	 (1<<12)
+#define BNX2_LINK_STATUS_PARTNER_AD_100HALF	 (1<<13)
+#define BNX2_LINK_STATUS_PARTNER_AD_10FULL	 (1<<14)
+#define BNX2_LINK_STATUS_PARTNER_AD_10HALF	 (1<<15)
+#define BNX2_LINK_STATUS_TX_FC_ENABLED		 (1<<16)
+#define BNX2_LINK_STATUS_RX_FC_ENABLED		 (1<<17)
+#define BNX2_LINK_STATUS_PARTNER_SYM_PAUSE_CAP	 (1<<18)
+#define BNX2_LINK_STATUS_PARTNER_ASYM_PAUSE_CAP	 (1<<19)
+#define BNX2_LINK_STATUS_SERDES_LINK		 (1<<20)
+#define BNX2_LINK_STATUS_PARTNER_AD_2500FULL	 (1<<21)
+#define BNX2_LINK_STATUS_PARTNER_AD_2500HALF	 (1<<22)
+#define BNX2_LINK_STATUS_HEART_BEAT_EXPIRED	 (1<<31)
+
+#define BNX2_DRV_PULSE_MB			0x00000010
+#define BNX2_DRV_PULSE_SEQ_MASK			 0x00007fff
+
+/* Indicate to the firmware not to go into the
+ * OS absent when it is not getting driver pulse.
+ * This is used for debugging. */
+#define BNX2_DRV_MSG_DATA_PULSE_CODE_ALWAYS_ALIVE	 0x00080000
+
+#define BNX2_DRV_MB_ARG0			0x00000014
+#define BNX2_NETLINK_SET_LINK_SPEED_10HALF	 (1<<0)
+#define BNX2_NETLINK_SET_LINK_SPEED_10FULL	 (1<<1)
+#define BNX2_NETLINK_SET_LINK_SPEED_10		 \
+	(BNX2_NETLINK_SET_LINK_SPEED_10HALF |	 \
+	 BNX2_NETLINK_SET_LINK_SPEED_10FULL)
+#define BNX2_NETLINK_SET_LINK_SPEED_100HALF	 (1<<2)
+#define BNX2_NETLINK_SET_LINK_SPEED_100FULL	 (1<<3)
+#define BNX2_NETLINK_SET_LINK_SPEED_100		 \
+	(BNX2_NETLINK_SET_LINK_SPEED_100HALF |	 \
+	 BNX2_NETLINK_SET_LINK_SPEED_100FULL)
+#define BNX2_NETLINK_SET_LINK_SPEED_1GHALF	 (1<<4)
+#define BNX2_NETLINK_SET_LINK_SPEED_1GFULL	 (1<<5)
+#define BNX2_NETLINK_SET_LINK_SPEED_2G5HALF	 (1<<6)
+#define BNX2_NETLINK_SET_LINK_SPEED_2G5FULL	 (1<<7)
+#define BNX2_NETLINK_SET_LINK_SPEED_10GHALF	 (1<<8)
+#define BNX2_NETLINK_SET_LINK_SPEED_10GFULL	 (1<<9)
+#define BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG	 (1<<10)
+#define BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE	 (1<<11)
+#define BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE	 (1<<12)
+#define BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE	 (1<<13)
+#define BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED	 (1<<14)
+#define BNX2_NETLINK_SET_LINK_PHY_RESET		 (1<<15)
+
+#define BNX2_DEV_INFO_SIGNATURE			0x00000020
+#define BNX2_DEV_INFO_SIGNATURE_MAGIC		 0x44564900
+#define BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK	 0xffffff00
+#define BNX2_DEV_INFO_FEATURE_CFG_VALID		 0x01
+#define BNX2_DEV_INFO_SECONDARY_PORT		 0x80
+#define BNX2_DEV_INFO_DRV_ALWAYS_ALIVE		 0x40
+
+#define BNX2_SHARED_HW_CFG_PART_NUM		0x00000024
+
+#define BNX2_SHARED_HW_CFG_POWER_DISSIPATED	0x00000034
+#define BNX2_SHARED_HW_CFG_POWER_STATE_D3_MASK	 0xff000000
+#define BNX2_SHARED_HW_CFG_POWER_STATE_D2_MASK	 0xff0000
+#define BNX2_SHARED_HW_CFG_POWER_STATE_D1_MASK	 0xff00
+#define BNX2_SHARED_HW_CFG_POWER_STATE_D0_MASK	 0xff
+
+#define BNX2_SHARED_HW_CFG POWER_CONSUMED	0x00000038
+#define BNX2_SHARED_HW_CFG_CONFIG		0x0000003c
+#define BNX2_SHARED_HW_CFG_DESIGN_NIC		 0
+#define BNX2_SHARED_HW_CFG_DESIGN_LOM		 0x1
+#define BNX2_SHARED_HW_CFG_PHY_COPPER		 0
+#define BNX2_SHARED_HW_CFG_PHY_FIBER		 0x2
+#define BNX2_SHARED_HW_CFG_PHY_2_5G		 0x20
+#define BNX2_SHARED_HW_CFG_PHY_BACKPLANE	 0x40
+#define BNX2_SHARED_HW_CFG_LED_MODE_SHIFT_BITS	 8
+#define BNX2_SHARED_HW_CFG_LED_MODE_MASK	 0x300
+#define BNX2_SHARED_HW_CFG_LED_MODE_MAC		 0
+#define BNX2_SHARED_HW_CFG_LED_MODE_GPHY1	 0x100
+#define BNX2_SHARED_HW_CFG_LED_MODE_GPHY2	 0x200
+#define BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX	 0x8000
+
+#define BNX2_SHARED_HW_CFG_CONFIG2		0x00000040
+#define BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK	 0x00fff000
+
+#define BNX2_DEV_INFO_BC_REV			0x0000004c
+
+#define BNX2_PORT_HW_CFG_MAC_UPPER		0x00000050
+#define BNX2_PORT_HW_CFG_UPPERMAC_MASK		 0xffff
+
+#define BNX2_PORT_HW_CFG_MAC_LOWER		0x00000054
+#define BNX2_PORT_HW_CFG_CONFIG			0x00000058
+#define BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK	 0x0000ffff
+#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK	 0x001f0000
+#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_AN	 0x00000000
+#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G	 0x00030000
+#define BNX2_PORT_HW_CFG_CFG_DFLT_LINK_2_5G	 0x00040000
+
+#define BNX2_PORT_HW_CFG_IMD_MAC_A_UPPER	0x00000068
+#define BNX2_PORT_HW_CFG_IMD_MAC_A_LOWER	0x0000006c
+#define BNX2_PORT_HW_CFG_IMD_MAC_B_UPPER	0x00000070
+#define BNX2_PORT_HW_CFG_IMD_MAC_B_LOWER	0x00000074
+#define BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER	0x00000078
+#define BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER	0x0000007c
+
+#define BNX2_DEV_INFO_PER_PORT_HW_CONFIG2	0x000000b4
+
+#define BNX2_DEV_INFO_FORMAT_REV		0x000000c4
+#define BNX2_DEV_INFO_FORMAT_REV_MASK		 0xff000000
+#define BNX2_DEV_INFO_FORMAT_REV_ID		 ('A' << 24)
+
+#define BNX2_SHARED_FEATURE			0x000000c8
+#define BNX2_SHARED_FEATURE_MASK		 0xffffffff
+
+#define BNX2_PORT_FEATURE			0x000000d8
+#define BNX2_PORT2_FEATURE			0x00000014c
+#define BNX2_PORT_FEATURE_WOL_ENABLED		 0x01000000
+#define BNX2_PORT_FEATURE_MBA_ENABLED		 0x02000000
+#define BNX2_PORT_FEATURE_ASF_ENABLED		 0x04000000
+#define BNX2_PORT_FEATURE_IMD_ENABLED		 0x08000000
+#define BNX2_PORT_FEATURE_BAR1_SIZE_MASK	 0xf
+#define BNX2_PORT_FEATURE_BAR1_SIZE_DISABLED	 0x0
+#define BNX2_PORT_FEATURE_BAR1_SIZE_64K		 0x1
+#define BNX2_PORT_FEATURE_BAR1_SIZE_128K	 0x2
+#define BNX2_PORT_FEATURE_BAR1_SIZE_256K	 0x3
+#define BNX2_PORT_FEATURE_BAR1_SIZE_512K	 0x4
+#define BNX2_PORT_FEATURE_BAR1_SIZE_1M		 0x5
+#define BNX2_PORT_FEATURE_BAR1_SIZE_2M		 0x6
+#define BNX2_PORT_FEATURE_BAR1_SIZE_4M		 0x7
+#define BNX2_PORT_FEATURE_BAR1_SIZE_8M		 0x8
+#define BNX2_PORT_FEATURE_BAR1_SIZE_16M		 0x9
+#define BNX2_PORT_FEATURE_BAR1_SIZE_32M		 0xa
+#define BNX2_PORT_FEATURE_BAR1_SIZE_64M		 0xb
+#define BNX2_PORT_FEATURE_BAR1_SIZE_128M	 0xc
+#define BNX2_PORT_FEATURE_BAR1_SIZE_256M	 0xd
+#define BNX2_PORT_FEATURE_BAR1_SIZE_512M	 0xe
+#define BNX2_PORT_FEATURE_BAR1_SIZE_1G		 0xf
+
+#define BNX2_PORT_FEATURE_WOL			0xdc
+#define BNX2_PORT2_FEATURE_WOL			0x150
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_SHIFT_BITS	 4
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_MASK	 0x30
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_DISABLE	 0
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_MAGIC	 0x10
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_ACPI	 0x20
+#define BNX2_PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI	 0x30
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_MASK	 0xf
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_AUTONEG	 0
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_10HALF	 1
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_10FULL	 2
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_100HALF 3
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_100FULL 4
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_1000HALF	 5
+#define BNX2_PORT_FEATURE_WOL_LINK_SPEED_1000FULL	 6
+#define BNX2_PORT_FEATURE_WOL_AUTONEG_ADVERTISE_1000	 0x40
+#define BNX2_PORT_FEATURE_WOL_RESERVED_PAUSE_CAP 0x400
+#define BNX2_PORT_FEATURE_WOL_RESERVED_ASYM_PAUSE_CAP	 0x800
+
+#define BNX2_PORT_FEATURE_MBA			0xe0
+#define BNX2_PORT2_FEATURE_MBA			0x154
+#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT_BITS	 0
+#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK	 0x3
+#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE	 0
+#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL	 1
+#define BNX2_PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP	 2
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_SHIFT_BITS	 2
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_MASK	 0x3c
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_AUTONEG	 0
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_10HALF	 0x4
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_10FULL	 0x8
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_100HALF	 0xc
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_100FULL	 0x10
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_1000HALF	 0x14
+#define BNX2_PORT_FEATURE_MBA_LINK_SPEED_1000FULL	 0x18
+#define BNX2_PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE	 0x40
+#define BNX2_PORT_FEATURE_MBA_HOTKEY_CTRL_S	 0
+#define BNX2_PORT_FEATURE_MBA_HOTKEY_CTRL_B	 0x80
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT_BITS	 8
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK	 0xff00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED	 0
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_1K	 0x100
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_2K	 0x200
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_4K	 0x300
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_8K	 0x400
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_16K	 0x500
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_32K	 0x600
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_64K	 0x700
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_128K	 0x800
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_256K	 0x900
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_512K	 0xa00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_1M	 0xb00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_2M	 0xc00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_4M	 0xd00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_8M	 0xe00
+#define BNX2_PORT_FEATURE_MBA_EXP_ROM_SIZE_16M	 0xf00
+#define BNX2_PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT_BITS	 16
+#define BNX2_PORT_FEATURE_MBA_MSG_TIMEOUT_MASK	 0xf0000
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT_BITS	 20
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK	 0x300000
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO	 0
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS	 0x100000
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H	 0x200000
+#define BNX2_PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H	 0x300000
+
+#define BNX2_PORT_FEATURE_IMD			0xe4
+#define BNX2_PORT2_FEATURE_IMD			0x158
+#define BNX2_PORT_FEATURE_IMD_LINK_OVERRIDE_DEFAULT	 0
+#define BNX2_PORT_FEATURE_IMD_LINK_OVERRIDE_ENABLE	 1
+
+#define BNX2_PORT_FEATURE_VLAN			0xe8
+#define BNX2_PORT2_FEATURE_VLAN			0x15c
+#define BNX2_PORT_FEATURE_MBA_VLAN_TAG_MASK	 0xffff
+#define BNX2_PORT_FEATURE_MBA_VLAN_ENABLE	 0x10000
+
+#define BNX2_MFW_VER_PTR			0x00000014c
+
+#define BNX2_BC_STATE_RESET_TYPE		0x000001c0
+#define BNX2_BC_STATE_RESET_TYPE_SIG		 0x00005254
+#define BNX2_BC_STATE_RESET_TYPE_SIG_MASK	 0x0000ffff
+#define BNX2_BC_STATE_RESET_TYPE_NONE	 (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					  0x00010000)
+#define BNX2_BC_STATE_RESET_TYPE_PCI	 (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					  0x00020000)
+#define BNX2_BC_STATE_RESET_TYPE_VAUX	 (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					  0x00030000)
+#define BNX2_BC_STATE_RESET_TYPE_DRV_MASK	 DRV_MSG_CODE
+#define BNX2_BC_STATE_RESET_TYPE_DRV_RESET (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					    DRV_MSG_CODE_RESET)
+#define BNX2_BC_STATE_RESET_TYPE_DRV_UNLOAD (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					     DRV_MSG_CODE_UNLOAD)
+#define BNX2_BC_STATE_RESET_TYPE_DRV_SHUTDOWN (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					       DRV_MSG_CODE_SHUTDOWN)
+#define BNX2_BC_STATE_RESET_TYPE_DRV_WOL (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					  DRV_MSG_CODE_WOL)
+#define BNX2_BC_STATE_RESET_TYPE_DRV_DIAG (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					   DRV_MSG_CODE_DIAG)
+#define BNX2_BC_STATE_RESET_TYPE_VALUE(msg) (BNX2_BC_STATE_RESET_TYPE_SIG | \
+					     (msg))
+
+#define BNX2_BC_STATE				0x000001c4
+#define BNX2_BC_STATE_ERR_MASK			 0x0000ff00
+#define BNX2_BC_STATE_SIGN			 0x42530000
+#define BNX2_BC_STATE_SIGN_MASK			 0xffff0000
+#define BNX2_BC_STATE_BC1_START			 (BNX2_BC_STATE_SIGN | 0x1)
+#define BNX2_BC_STATE_GET_NVM_CFG1		 (BNX2_BC_STATE_SIGN | 0x2)
+#define BNX2_BC_STATE_PROG_BAR			 (BNX2_BC_STATE_SIGN | 0x3)
+#define BNX2_BC_STATE_INIT_VID			 (BNX2_BC_STATE_SIGN | 0x4)
+#define BNX2_BC_STATE_GET_NVM_CFG2		 (BNX2_BC_STATE_SIGN | 0x5)
+#define BNX2_BC_STATE_APPLY_WKARND		 (BNX2_BC_STATE_SIGN | 0x6)
+#define BNX2_BC_STATE_LOAD_BC2			 (BNX2_BC_STATE_SIGN | 0x7)
+#define BNX2_BC_STATE_GOING_BC2			 (BNX2_BC_STATE_SIGN | 0x8)
+#define BNX2_BC_STATE_GOING_DIAG		 (BNX2_BC_STATE_SIGN | 0x9)
+#define BNX2_BC_STATE_RT_FINAL_INIT		 (BNX2_BC_STATE_SIGN | 0x81)
+#define BNX2_BC_STATE_RT_WKARND			 (BNX2_BC_STATE_SIGN | 0x82)
+#define BNX2_BC_STATE_RT_DRV_PULSE		 (BNX2_BC_STATE_SIGN | 0x83)
+#define BNX2_BC_STATE_RT_FIOEVTS		 (BNX2_BC_STATE_SIGN | 0x84)
+#define BNX2_BC_STATE_RT_DRV_CMD		 (BNX2_BC_STATE_SIGN | 0x85)
+#define BNX2_BC_STATE_RT_LOW_POWER		 (BNX2_BC_STATE_SIGN | 0x86)
+#define BNX2_BC_STATE_RT_SET_WOL		 (BNX2_BC_STATE_SIGN | 0x87)
+#define BNX2_BC_STATE_RT_OTHER_FW		 (BNX2_BC_STATE_SIGN | 0x88)
+#define BNX2_BC_STATE_RT_GOING_D3		 (BNX2_BC_STATE_SIGN | 0x89)
+#define BNX2_BC_STATE_ERR_BAD_VERSION		 (BNX2_BC_STATE_SIGN | 0x0100)
+#define BNX2_BC_STATE_ERR_BAD_BC2_CRC		 (BNX2_BC_STATE_SIGN | 0x0200)
+#define BNX2_BC_STATE_ERR_BC1_LOOP		 (BNX2_BC_STATE_SIGN | 0x0300)
+#define BNX2_BC_STATE_ERR_UNKNOWN_CMD		 (BNX2_BC_STATE_SIGN | 0x0400)
+#define BNX2_BC_STATE_ERR_DRV_DEAD		 (BNX2_BC_STATE_SIGN | 0x0500)
+#define BNX2_BC_STATE_ERR_NO_RXP		 (BNX2_BC_STATE_SIGN | 0x0600)
+#define BNX2_BC_STATE_ERR_TOO_MANY_RBUF		 (BNX2_BC_STATE_SIGN | 0x0700)
+
+#define BNX2_BC_STATE_CONDITION			0x000001c8
+#define BNX2_CONDITION_MFW_RUN_UNKNOWN		 0x00000000
+#define BNX2_CONDITION_MFW_RUN_IPMI		 0x00002000
+#define BNX2_CONDITION_MFW_RUN_UMP		 0x00004000
+#define BNX2_CONDITION_MFW_RUN_NCSI		 0x00006000
+#define BNX2_CONDITION_MFW_RUN_NONE		 0x0000e000
+#define BNX2_CONDITION_MFW_RUN_MASK		 0x0000e000
+
+#define BNX2_BC_STATE_DEBUG_CMD			0x1dc
+#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE	 0x42440000
+#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE_MASK	 0xffff0000
+#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_CNT_MASK	 0xffff
+#define BNX2_BC_STATE_BC_DBG_CMD_LOOP_INFINITE	 0xffff
+
+#define BNX2_FW_EVT_CODE_MB			0x354
+#define BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT 0x00000000
+#define BNX2_FW_EVT_CODE_LINK_EVENT		 0x00000001
+
+#define BNX2_DRV_ACK_CAP_MB			0x364
+#define BNX2_DRV_ACK_CAP_SIGNATURE		 0x35450000
+#define BNX2_CAPABILITY_SIGNATURE_MASK		 0xFFFF0000
+
+#define BNX2_FW_CAP_MB				0x368
+#define BNX2_FW_CAP_SIGNATURE			 0xaa550000
+#define BNX2_FW_ACK_DRV_SIGNATURE		 0x52500000
+#define BNX2_FW_CAP_SIGNATURE_MASK		 0xffff0000
+#define BNX2_FW_CAP_REMOTE_PHY_CAPABLE		 0x00000001
+#define BNX2_FW_CAP_REMOTE_PHY_PRESENT		 0x00000002
+#define BNX2_FW_CAP_MFW_CAN_KEEP_VLAN		 0x00000008
+#define BNX2_FW_CAP_BC_CAN_KEEP_VLAN		 0x00000010
+#define BNX2_FW_CAP_CAN_KEEP_VLAN	(BNX2_FW_CAP_BC_CAN_KEEP_VLAN | \
+					 BNX2_FW_CAP_MFW_CAN_KEEP_VLAN)
+
+#define BNX2_RPHY_SIGNATURE			0x36c
+#define BNX2_RPHY_LOAD_SIGNATURE		 0x5a5a5a5a
+
+#define BNX2_RPHY_FLAGS				0x370
+#define BNX2_RPHY_SERDES_LINK			0x374
+#define BNX2_RPHY_COPPER_LINK			0x378
+
+#define BNX2_ISCSI_INITIATOR			0x3dc
+#define BNX2_ISCSI_INITIATOR_EN			 0x00080000
+
+#define BNX2_ISCSI_MAX_CONN			0x3e4
+#define BNX2_ISCSI_MAX_CONN_MASK		 0xffff0000
+#define BNX2_ISCSI_MAX_CONN_SHIFT		 16
+
+#define HOST_VIEW_SHMEM_BASE			0x167c00
+
+#define DP_SHMEM_LINE(bp, offset)					\
+	netdev_err(bp->dev, "DEBUG: %08x: %08x %08x %08x %08x\n",	\
+		   offset,						\
+		   bnx2_shmem_rd(bp, offset),				\
+		   bnx2_shmem_rd(bp, offset + 4),			\
+		   bnx2_shmem_rd(bp, offset + 8),			\
+		   bnx2_shmem_rd(bp, offset + 12))
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2_fw.h b/drivers/net/ethernet/broadcom/bnx2_fw.h
new file mode 100644
index 0000000..940eb91
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2_fw.h
@@ -0,0 +1,88 @@
+/* bnx2_fw.h: Broadcom NX2 network driver.
+ *
+ * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+/* Initialized Values for the Completion Processor. */
+static const struct cpu_reg cpu_reg_com = {
+	.mode = BNX2_COM_CPU_MODE,
+	.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT,
+	.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA,
+	.state = BNX2_COM_CPU_STATE,
+	.state_value_clear = 0xffffff,
+	.gpr0 = BNX2_COM_CPU_REG_FILE,
+	.evmask = BNX2_COM_CPU_EVENT_MASK,
+	.pc = BNX2_COM_CPU_PROGRAM_COUNTER,
+	.inst = BNX2_COM_CPU_INSTRUCTION,
+	.bp = BNX2_COM_CPU_HW_BREAKPOINT,
+	.spad_base = BNX2_COM_SCRATCH,
+	.mips_view_base = 0x8000000,
+};
+
+/* Initialized Values the Command Processor. */
+static const struct cpu_reg cpu_reg_cp = {
+	.mode = BNX2_CP_CPU_MODE,
+	.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT,
+	.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA,
+	.state = BNX2_CP_CPU_STATE,
+	.state_value_clear = 0xffffff,
+	.gpr0 = BNX2_CP_CPU_REG_FILE,
+	.evmask = BNX2_CP_CPU_EVENT_MASK,
+	.pc = BNX2_CP_CPU_PROGRAM_COUNTER,
+	.inst = BNX2_CP_CPU_INSTRUCTION,
+	.bp = BNX2_CP_CPU_HW_BREAKPOINT,
+	.spad_base = BNX2_CP_SCRATCH,
+	.mips_view_base = 0x8000000,
+};
+
+/* Initialized Values for the RX Processor. */
+static const struct cpu_reg cpu_reg_rxp = {
+	.mode = BNX2_RXP_CPU_MODE,
+	.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT,
+	.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA,
+	.state = BNX2_RXP_CPU_STATE,
+	.state_value_clear = 0xffffff,
+	.gpr0 = BNX2_RXP_CPU_REG_FILE,
+	.evmask = BNX2_RXP_CPU_EVENT_MASK,
+	.pc = BNX2_RXP_CPU_PROGRAM_COUNTER,
+	.inst = BNX2_RXP_CPU_INSTRUCTION,
+	.bp = BNX2_RXP_CPU_HW_BREAKPOINT,
+	.spad_base = BNX2_RXP_SCRATCH,
+	.mips_view_base = 0x8000000,
+};
+
+/* Initialized Values for the TX Patch-up Processor. */
+static const struct cpu_reg cpu_reg_tpat = {
+	.mode = BNX2_TPAT_CPU_MODE,
+	.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT,
+	.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA,
+	.state = BNX2_TPAT_CPU_STATE,
+	.state_value_clear = 0xffffff,
+	.gpr0 = BNX2_TPAT_CPU_REG_FILE,
+	.evmask = BNX2_TPAT_CPU_EVENT_MASK,
+	.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER,
+	.inst = BNX2_TPAT_CPU_INSTRUCTION,
+	.bp = BNX2_TPAT_CPU_HW_BREAKPOINT,
+	.spad_base = BNX2_TPAT_SCRATCH,
+	.mips_view_base = 0x8000000,
+};
+
+/* Initialized Values for the TX Processor. */
+static const struct cpu_reg cpu_reg_txp = {
+	.mode = BNX2_TXP_CPU_MODE,
+	.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT,
+	.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA,
+	.state = BNX2_TXP_CPU_STATE,
+	.state_value_clear = 0xffffff,
+	.gpr0 = BNX2_TXP_CPU_REG_FILE,
+	.evmask = BNX2_TXP_CPU_EVENT_MASK,
+	.pc = BNX2_TXP_CPU_PROGRAM_COUNTER,
+	.inst = BNX2_TXP_CPU_INSTRUCTION,
+	.bp = BNX2_TXP_CPU_HW_BREAKPOINT,
+	.spad_base = BNX2_TXP_SCRATCH,
+	.mips_view_base = 0x8000000,
+};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/Makefile b/drivers/net/ethernet/broadcom/bnx2x/Makefile
new file mode 100644
index 0000000..48fbdd4
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for Broadcom 10-Gigabit ethernet driver
+#
+
+obj-$(CONFIG_BNX2X) += bnx2x.o
+
+bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o bnx2x_sp.o
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
new file mode 100644
index 0000000..c423504
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -0,0 +1,2014 @@
+/* bnx2x.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ */
+
+#ifndef BNX2X_H
+#define BNX2X_H
+#include <linux/netdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/types.h>
+
+/* compilation time flags */
+
+/* define this to make the driver freeze on error to allow getting debug info
+ * (you will need to reboot afterwards) */
+/* #define BNX2X_STOP_ON_ERROR */
+
+#define DRV_MODULE_VERSION      "1.70.00-0"
+#define DRV_MODULE_RELDATE      "2011/06/13"
+#define BNX2X_BC_VER            0x040200
+
+#if defined(CONFIG_DCB)
+#define BCM_DCBNL
+#endif
+#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#define BCM_CNIC 1
+#include "../cnic_if.h"
+#endif
+
+#ifdef BCM_CNIC
+#define BNX2X_MIN_MSIX_VEC_CNT 3
+#define BNX2X_MSIX_VEC_FP_START 2
+#else
+#define BNX2X_MIN_MSIX_VEC_CNT 2
+#define BNX2X_MSIX_VEC_FP_START 1
+#endif
+
+#include <linux/mdio.h>
+
+#include "bnx2x_reg.h"
+#include "bnx2x_fw_defs.h"
+#include "bnx2x_hsi.h"
+#include "bnx2x_link.h"
+#include "bnx2x_sp.h"
+#include "bnx2x_dcb.h"
+#include "bnx2x_stats.h"
+
+/* error/debug prints */
+
+#define DRV_MODULE_NAME		"bnx2x"
+
+/* for messages that are currently off */
+#define BNX2X_MSG_OFF			0
+#define BNX2X_MSG_MCP			0x010000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_STATS			0x020000 /* was: NETIF_MSG_TIMER */
+#define BNX2X_MSG_NVM			0x040000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_DMAE			0x080000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_SP			0x100000 /* was: NETIF_MSG_INTR */
+#define BNX2X_MSG_FP			0x200000 /* was: NETIF_MSG_INTR */
+
+#define DP_LEVEL			KERN_NOTICE	/* was: KERN_DEBUG */
+
+/* regular debug print */
+#define DP(__mask, __fmt, __args...)				\
+do {								\
+	if (bp->msg_enable & (__mask))				\
+		printk(DP_LEVEL "[%s:%d(%s)]" __fmt,		\
+		       __func__, __LINE__,			\
+		       bp->dev ? (bp->dev->name) : "?",		\
+		       ##__args);				\
+} while (0)
+
+#define DP_CONT(__mask, __fmt, __args...)			\
+do {								\
+	if (bp->msg_enable & (__mask))				\
+		pr_cont(__fmt, ##__args);			\
+} while (0)
+
+/* errors debug print */
+#define BNX2X_DBG_ERR(__fmt, __args...)				\
+do {								\
+	if (netif_msg_probe(bp))				\
+		pr_err("[%s:%d(%s)]" __fmt,			\
+		       __func__, __LINE__,			\
+		       bp->dev ? (bp->dev->name) : "?",		\
+		       ##__args);				\
+} while (0)
+
+/* for errors (never masked) */
+#define BNX2X_ERR(__fmt, __args...)				\
+do {								\
+	pr_err("[%s:%d(%s)]" __fmt,				\
+	       __func__, __LINE__,				\
+	       bp->dev ? (bp->dev->name) : "?",			\
+	       ##__args);					\
+	} while (0)
+
+#define BNX2X_ERROR(__fmt, __args...) do { \
+	pr_err("[%s:%d]" __fmt, __func__, __LINE__, ##__args); \
+	} while (0)
+
+
+/* before we have a dev->name use dev_info() */
+#define BNX2X_DEV_INFO(__fmt, __args...)			 \
+do {								 \
+	if (netif_msg_probe(bp))				 \
+		dev_info(&bp->pdev->dev, __fmt, ##__args);	 \
+} while (0)
+
+#define BNX2X_MAC_FMT		"%pM"
+#define BNX2X_MAC_PRN_LIST(mac)	(mac)
+
+
+#ifdef BNX2X_STOP_ON_ERROR
+void bnx2x_int_disable(struct bnx2x *bp);
+#define bnx2x_panic() do { \
+		bp->panic = 1; \
+		BNX2X_ERR("driver assert\n"); \
+		bnx2x_int_disable(bp); \
+		bnx2x_panic_dump(bp); \
+	} while (0)
+#else
+#define bnx2x_panic() do { \
+		bp->panic = 1; \
+		BNX2X_ERR("driver assert\n"); \
+		bnx2x_panic_dump(bp); \
+	} while (0)
+#endif
+
+#define bnx2x_mc_addr(ha)      ((ha)->addr)
+#define bnx2x_uc_addr(ha)      ((ha)->addr)
+
+#define U64_LO(x)			(u32)(((u64)(x)) & 0xffffffff)
+#define U64_HI(x)			(u32)(((u64)(x)) >> 32)
+#define HILO_U64(hi, lo)		((((u64)(hi)) << 32) + (lo))
+
+
+#define REG_ADDR(bp, offset)		((bp->regview) + (offset))
+
+#define REG_RD(bp, offset)		readl(REG_ADDR(bp, offset))
+#define REG_RD8(bp, offset)		readb(REG_ADDR(bp, offset))
+#define REG_RD16(bp, offset)		readw(REG_ADDR(bp, offset))
+
+#define REG_WR(bp, offset, val)		writel((u32)val, REG_ADDR(bp, offset))
+#define REG_WR8(bp, offset, val)	writeb((u8)val, REG_ADDR(bp, offset))
+#define REG_WR16(bp, offset, val)	writew((u16)val, REG_ADDR(bp, offset))
+
+#define REG_RD_IND(bp, offset)		bnx2x_reg_rd_ind(bp, offset)
+#define REG_WR_IND(bp, offset, val)	bnx2x_reg_wr_ind(bp, offset, val)
+
+#define REG_RD_DMAE(bp, offset, valp, len32) \
+	do { \
+		bnx2x_read_dmae(bp, offset, len32);\
+		memcpy(valp, bnx2x_sp(bp, wb_data[0]), (len32) * 4); \
+	} while (0)
+
+#define REG_WR_DMAE(bp, offset, valp, len32) \
+	do { \
+		memcpy(bnx2x_sp(bp, wb_data[0]), valp, (len32) * 4); \
+		bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data), \
+				 offset, len32); \
+	} while (0)
+
+#define REG_WR_DMAE_LEN(bp, offset, valp, len32) \
+	REG_WR_DMAE(bp, offset, valp, len32)
+
+#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
+	do { \
+		memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
+		bnx2x_write_big_buf_wb(bp, addr, len32); \
+	} while (0)
+
+#define SHMEM_ADDR(bp, field)		(bp->common.shmem_base + \
+					 offsetof(struct shmem_region, field))
+#define SHMEM_RD(bp, field)		REG_RD(bp, SHMEM_ADDR(bp, field))
+#define SHMEM_WR(bp, field, val)	REG_WR(bp, SHMEM_ADDR(bp, field), val)
+
+#define SHMEM2_ADDR(bp, field)		(bp->common.shmem2_base + \
+					 offsetof(struct shmem2_region, field))
+#define SHMEM2_RD(bp, field)		REG_RD(bp, SHMEM2_ADDR(bp, field))
+#define SHMEM2_WR(bp, field, val)	REG_WR(bp, SHMEM2_ADDR(bp, field), val)
+#define MF_CFG_ADDR(bp, field)		(bp->common.mf_cfg_base + \
+					 offsetof(struct mf_cfg, field))
+#define MF2_CFG_ADDR(bp, field)		(bp->common.mf2_cfg_base + \
+					 offsetof(struct mf2_cfg, field))
+
+#define MF_CFG_RD(bp, field)		REG_RD(bp, MF_CFG_ADDR(bp, field))
+#define MF_CFG_WR(bp, field, val)	REG_WR(bp,\
+					       MF_CFG_ADDR(bp, field), (val))
+#define MF2_CFG_RD(bp, field)		REG_RD(bp, MF2_CFG_ADDR(bp, field))
+
+#define SHMEM2_HAS(bp, field)		((bp)->common.shmem2_base &&	\
+					 (SHMEM2_RD((bp), size) >	\
+					 offsetof(struct shmem2_region, field)))
+
+#define EMAC_RD(bp, reg)		REG_RD(bp, emac_base + reg)
+#define EMAC_WR(bp, reg, val)		REG_WR(bp, emac_base + reg, val)
+
+/* SP SB indices */
+
+/* General SP events - stats query, cfc delete, etc  */
+#define HC_SP_INDEX_ETH_DEF_CONS		3
+
+/* EQ completions */
+#define HC_SP_INDEX_EQ_CONS			7
+
+/* FCoE L2 connection completions */
+#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS		6
+#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS		4
+/* iSCSI L2 */
+#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS		5
+#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS	1
+
+/* Special clients parameters */
+
+/* SB indices */
+/* FCoE L2 */
+#define BNX2X_FCOE_L2_RX_INDEX \
+	(&bp->def_status_blk->sp_sb.\
+	index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS])
+
+#define BNX2X_FCOE_L2_TX_INDEX \
+	(&bp->def_status_blk->sp_sb.\
+	index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS])
+
+/**
+ *  CIDs and CLIDs:
+ *  CLIDs below is a CLID for func 0, then the CLID for other
+ *  functions will be calculated by the formula:
+ *
+ *  FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X
+ *
+ */
+/* iSCSI L2 */
+#define BNX2X_ISCSI_ETH_CL_ID_IDX	1
+#define BNX2X_ISCSI_ETH_CID		49
+
+/* FCoE L2 */
+#define BNX2X_FCOE_ETH_CL_ID_IDX	2
+#define BNX2X_FCOE_ETH_CID		50
+
+/** Additional rings budgeting */
+#ifdef BCM_CNIC
+#define CNIC_PRESENT			1
+#define FCOE_PRESENT			1
+#else
+#define CNIC_PRESENT			0
+#define FCOE_PRESENT			0
+#endif /* BCM_CNIC */
+#define NON_ETH_CONTEXT_USE	(FCOE_PRESENT)
+
+#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
+	AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
+
+#define SM_RX_ID			0
+#define SM_TX_ID			1
+
+/* defines for multiple tx priority indices */
+#define FIRST_TX_ONLY_COS_INDEX		1
+#define FIRST_TX_COS_INDEX		0
+
+/* defines for decodeing the fastpath index and the cos index out of the
+ * transmission queue index
+ */
+#define MAX_TXQS_PER_COS	FP_SB_MAX_E1x
+
+#define TXQ_TO_FP(txq_index)	((txq_index) % MAX_TXQS_PER_COS)
+#define TXQ_TO_COS(txq_index)	((txq_index) / MAX_TXQS_PER_COS)
+
+/* rules for calculating the cids of tx-only connections */
+#define CID_TO_FP(cid)		((cid) % MAX_TXQS_PER_COS)
+#define CID_COS_TO_TX_ONLY_CID(cid, cos)	(cid + cos * MAX_TXQS_PER_COS)
+
+/* fp index inside class of service range */
+#define FP_COS_TO_TXQ(fp, cos)    ((fp)->index + cos * MAX_TXQS_PER_COS)
+
+/*
+ * 0..15 eth cos0
+ * 16..31 eth cos1 if applicable
+ * 32..47 eth cos2 If applicable
+ * fcoe queue follows eth queues (16, 32, 48 depending on cos)
+ */
+#define MAX_ETH_TXQ_IDX(bp)	(MAX_TXQS_PER_COS * (bp)->max_cos)
+#define FCOE_TXQ_IDX(bp)	(MAX_ETH_TXQ_IDX(bp))
+
+/* fast path */
+struct sw_rx_bd {
+	struct sk_buff	*skb;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct sw_tx_bd {
+	struct sk_buff	*skb;
+	u16		first_bd;
+	u8		flags;
+/* Set on the first BD descriptor when there is a split BD */
+#define BNX2X_TSO_SPLIT_BD		(1<<0)
+};
+
+struct sw_rx_page {
+	struct page	*page;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+union db_prod {
+	struct doorbell_set_prod data;
+	u32		raw;
+};
+
+
+/* MC hsi */
+#define BCM_PAGE_SHIFT		12
+#define BCM_PAGE_SIZE		(1 << BCM_PAGE_SHIFT)
+#define BCM_PAGE_MASK		(~(BCM_PAGE_SIZE - 1))
+#define BCM_PAGE_ALIGN(addr)	(((addr) + BCM_PAGE_SIZE - 1) & BCM_PAGE_MASK)
+
+#define PAGES_PER_SGE_SHIFT	0
+#define PAGES_PER_SGE		(1 << PAGES_PER_SGE_SHIFT)
+#define SGE_PAGE_SIZE		PAGE_SIZE
+#define SGE_PAGE_SHIFT		PAGE_SHIFT
+#define SGE_PAGE_ALIGN(addr)	PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+
+/* SGE ring related macros */
+#define NUM_RX_SGE_PAGES	2
+#define RX_SGE_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_sge))
+#define MAX_RX_SGE_CNT		(RX_SGE_CNT - 2)
+/* RX_SGE_CNT is promised to be a power of 2 */
+#define RX_SGE_MASK		(RX_SGE_CNT - 1)
+#define NUM_RX_SGE		(RX_SGE_CNT * NUM_RX_SGE_PAGES)
+#define MAX_RX_SGE		(NUM_RX_SGE - 1)
+#define NEXT_SGE_IDX(x)		((((x) & RX_SGE_MASK) == \
+				  (MAX_RX_SGE_CNT - 1)) ? (x) + 3 : (x) + 1)
+#define RX_SGE(x)		((x) & MAX_RX_SGE)
+
+/* Manipulate a bit vector defined as an array of u64 */
+
+/* Number of bits in one sge_mask array element */
+#define BIT_VEC64_ELEM_SZ		64
+#define BIT_VEC64_ELEM_SHIFT		6
+#define BIT_VEC64_ELEM_MASK		((u64)BIT_VEC64_ELEM_SZ - 1)
+
+
+#define __BIT_VEC64_SET_BIT(el, bit) \
+	do { \
+		el = ((el) | ((u64)0x1 << (bit))); \
+	} while (0)
+
+#define __BIT_VEC64_CLEAR_BIT(el, bit) \
+	do { \
+		el = ((el) & (~((u64)0x1 << (bit)))); \
+	} while (0)
+
+
+#define BIT_VEC64_SET_BIT(vec64, idx) \
+	__BIT_VEC64_SET_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+			   (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_CLEAR_BIT(vec64, idx) \
+	__BIT_VEC64_CLEAR_BIT((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT], \
+			     (idx) & BIT_VEC64_ELEM_MASK)
+
+#define BIT_VEC64_TEST_BIT(vec64, idx) \
+	(((vec64)[(idx) >> BIT_VEC64_ELEM_SHIFT] >> \
+	((idx) & BIT_VEC64_ELEM_MASK)) & 0x1)
+
+/* Creates a bitmask of all ones in less significant bits.
+   idx - index of the most significant bit in the created mask */
+#define BIT_VEC64_ONES_MASK(idx) \
+		(((u64)0x1 << (((idx) & BIT_VEC64_ELEM_MASK) + 1)) - 1)
+#define BIT_VEC64_ELEM_ONE_MASK	((u64)(~0))
+
+/*******************************************************/
+
+
+
+/* Number of u64 elements in SGE mask array */
+#define RX_SGE_MASK_LEN			((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
+					 BIT_VEC64_ELEM_SZ)
+#define RX_SGE_MASK_LEN_MASK		(RX_SGE_MASK_LEN - 1)
+#define NEXT_SGE_MASK_ELEM(el)		(((el) + 1) & RX_SGE_MASK_LEN_MASK)
+
+union host_hc_status_block {
+	/* pointer to fp status block e1x */
+	struct host_hc_status_block_e1x *e1x_sb;
+	/* pointer to fp status block e2 */
+	struct host_hc_status_block_e2  *e2_sb;
+};
+
+struct bnx2x_agg_info {
+	/*
+	 * First aggregation buffer is an skb, the following - are pages.
+	 * We will preallocate the skbs for each aggregation when
+	 * we open the interface and will replace the BD at the consumer
+	 * with this one when we receive the TPA_START CQE in order to
+	 * keep the Rx BD ring consistent.
+	 */
+	struct sw_rx_bd		first_buf;
+	u8			tpa_state;
+#define BNX2X_TPA_START			1
+#define BNX2X_TPA_STOP			2
+#define BNX2X_TPA_ERROR			3
+	u8			placement_offset;
+	u16			parsing_flags;
+	u16			vlan_tag;
+	u16			len_on_bd;
+};
+
+#define Q_STATS_OFFSET32(stat_name) \
+			(offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
+
+struct bnx2x_fp_txdata {
+
+	struct sw_tx_bd		*tx_buf_ring;
+
+	union eth_tx_bd_types	*tx_desc_ring;
+	dma_addr_t		tx_desc_mapping;
+
+	u32			cid;
+
+	union db_prod		tx_db;
+
+	u16			tx_pkt_prod;
+	u16			tx_pkt_cons;
+	u16			tx_bd_prod;
+	u16			tx_bd_cons;
+
+	unsigned long		tx_pkt;
+
+	__le16			*tx_cons_sb;
+
+	int			txq_index;
+};
+
+struct bnx2x_fastpath {
+	struct bnx2x		*bp; /* parent */
+
+#define BNX2X_NAPI_WEIGHT       128
+	struct napi_struct	napi;
+	union host_hc_status_block	status_blk;
+	/* chip independed shortcuts into sb structure */
+	__le16			*sb_index_values;
+	__le16			*sb_running_index;
+	/* chip independed shortcut into rx_prods_offset memory */
+	u32			ustorm_rx_prods_offset;
+
+	u32			rx_buf_size;
+
+	dma_addr_t		status_blk_mapping;
+
+	u8			max_cos; /* actual number of active tx coses */
+	struct bnx2x_fp_txdata	txdata[BNX2X_MULTI_TX_COS];
+
+	struct sw_rx_bd		*rx_buf_ring;	/* BDs mappings ring */
+	struct sw_rx_page	*rx_page_ring;	/* SGE pages mappings ring */
+
+	struct eth_rx_bd	*rx_desc_ring;
+	dma_addr_t		rx_desc_mapping;
+
+	union eth_rx_cqe	*rx_comp_ring;
+	dma_addr_t		rx_comp_mapping;
+
+	/* SGE ring */
+	struct eth_rx_sge	*rx_sge_ring;
+	dma_addr_t		rx_sge_mapping;
+
+	u64			sge_mask[RX_SGE_MASK_LEN];
+
+	u32			cid;
+
+	__le16			fp_hc_idx;
+
+	u8			index;		/* number in fp array */
+	u8			cl_id;		/* eth client id */
+	u8			cl_qzone_id;
+	u8			fw_sb_id;	/* status block number in FW */
+	u8			igu_sb_id;	/* status block number in HW */
+
+	u16			rx_bd_prod;
+	u16			rx_bd_cons;
+	u16			rx_comp_prod;
+	u16			rx_comp_cons;
+	u16			rx_sge_prod;
+	/* The last maximal completed SGE */
+	u16			last_max_sge;
+	__le16			*rx_cons_sb;
+	unsigned long		rx_pkt,
+				rx_calls;
+
+	/* TPA related */
+	struct bnx2x_agg_info	tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2];
+	u8			disable_tpa;
+#ifdef BNX2X_STOP_ON_ERROR
+	u64			tpa_queue_used;
+#endif
+
+	struct tstorm_per_queue_stats old_tclient;
+	struct ustorm_per_queue_stats old_uclient;
+	struct xstorm_per_queue_stats old_xclient;
+	struct bnx2x_eth_q_stats eth_q_stats;
+
+	/* The size is calculated using the following:
+	     sizeof name field from netdev structure +
+	     4 ('-Xx-' string) +
+	     4 (for the digits and to make it DWORD aligned) */
+#define FP_NAME_SIZE		(sizeof(((struct net_device *)0)->name) + 8)
+	char			name[FP_NAME_SIZE];
+
+	/* MACs object */
+	struct bnx2x_vlan_mac_obj mac_obj;
+
+	/* Queue State object */
+	struct bnx2x_queue_sp_obj q_obj;
+
+};
+
+#define bnx2x_fp(bp, nr, var)		(bp->fp[nr].var)
+
+/* Use 2500 as a mini-jumbo MTU for FCoE */
+#define BNX2X_FCOE_MINI_JUMBO_MTU	2500
+
+/* FCoE L2 `fastpath' entry is right after the eth entries */
+#define FCOE_IDX			BNX2X_NUM_ETH_QUEUES(bp)
+#define bnx2x_fcoe_fp(bp)		(&bp->fp[FCOE_IDX])
+#define bnx2x_fcoe(bp, var)		(bnx2x_fcoe_fp(bp)->var)
+#define bnx2x_fcoe_tx(bp, var)		(bnx2x_fcoe_fp(bp)-> \
+						txdata[FIRST_TX_COS_INDEX].var)
+
+
+#define IS_ETH_FP(fp)			(fp->index < \
+					 BNX2X_NUM_ETH_QUEUES(fp->bp))
+#ifdef BCM_CNIC
+#define IS_FCOE_FP(fp)			(fp->index == FCOE_IDX)
+#define IS_FCOE_IDX(idx)		((idx) == FCOE_IDX)
+#else
+#define IS_FCOE_FP(fp)		false
+#define IS_FCOE_IDX(idx)	false
+#endif
+
+
+/* MC hsi */
+#define MAX_FETCH_BD		13	/* HW max BDs per packet */
+#define RX_COPY_THRESH		92
+
+#define NUM_TX_RINGS		16
+#define TX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_tx_bd_types))
+#define MAX_TX_DESC_CNT		(TX_DESC_CNT - 1)
+#define NUM_TX_BD		(TX_DESC_CNT * NUM_TX_RINGS)
+#define MAX_TX_BD		(NUM_TX_BD - 1)
+#define MAX_TX_AVAIL		(MAX_TX_DESC_CNT * NUM_TX_RINGS - 2)
+#define NEXT_TX_IDX(x)		((((x) & MAX_TX_DESC_CNT) == \
+				  (MAX_TX_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
+#define TX_BD(x)		((x) & MAX_TX_BD)
+#define TX_BD_POFF(x)		((x) & MAX_TX_DESC_CNT)
+
+/* The RX BD ring is special, each bd is 8 bytes but the last one is 16 */
+#define NUM_RX_RINGS		8
+#define RX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
+#define MAX_RX_DESC_CNT		(RX_DESC_CNT - 2)
+#define RX_DESC_MASK		(RX_DESC_CNT - 1)
+#define NUM_RX_BD		(RX_DESC_CNT * NUM_RX_RINGS)
+#define MAX_RX_BD		(NUM_RX_BD - 1)
+#define MAX_RX_AVAIL		(MAX_RX_DESC_CNT * NUM_RX_RINGS - 2)
+#define MIN_RX_AVAIL		128
+
+#define MIN_RX_SIZE_TPA_HW	(CHIP_IS_E1(bp) ? \
+					ETH_MIN_RX_CQES_WITH_TPA_E1 : \
+					ETH_MIN_RX_CQES_WITH_TPA_E1H_E2)
+#define MIN_RX_SIZE_NONTPA_HW   ETH_MIN_RX_CQES_WITHOUT_TPA
+#define MIN_RX_SIZE_TPA		(max_t(u32, MIN_RX_SIZE_TPA_HW, MIN_RX_AVAIL))
+#define MIN_RX_SIZE_NONTPA	(max_t(u32, MIN_RX_SIZE_NONTPA_HW,\
+								MIN_RX_AVAIL))
+
+#define NEXT_RX_IDX(x)		((((x) & RX_DESC_MASK) == \
+				  (MAX_RX_DESC_CNT - 1)) ? (x) + 3 : (x) + 1)
+#define RX_BD(x)		((x) & MAX_RX_BD)
+
+/*
+ * As long as CQE is X times bigger than BD entry we have to allocate X times
+ * more pages for CQ ring in order to keep it balanced with BD ring
+ */
+#define CQE_BD_REL	(sizeof(union eth_rx_cqe) / sizeof(struct eth_rx_bd))
+#define NUM_RCQ_RINGS		(NUM_RX_RINGS * CQE_BD_REL)
+#define RCQ_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
+#define MAX_RCQ_DESC_CNT	(RCQ_DESC_CNT - 1)
+#define NUM_RCQ_BD		(RCQ_DESC_CNT * NUM_RCQ_RINGS)
+#define MAX_RCQ_BD		(NUM_RCQ_BD - 1)
+#define MAX_RCQ_AVAIL		(MAX_RCQ_DESC_CNT * NUM_RCQ_RINGS - 2)
+#define NEXT_RCQ_IDX(x)		((((x) & MAX_RCQ_DESC_CNT) == \
+				  (MAX_RCQ_DESC_CNT - 1)) ? (x) + 2 : (x) + 1)
+#define RCQ_BD(x)		((x) & MAX_RCQ_BD)
+
+
+/* This is needed for determining of last_max */
+#define SUB_S16(a, b)		(s16)((s16)(a) - (s16)(b))
+#define SUB_S32(a, b)		(s32)((s32)(a) - (s32)(b))
+
+
+#define BNX2X_SWCID_SHIFT	17
+#define BNX2X_SWCID_MASK	((0x1 << BNX2X_SWCID_SHIFT) - 1)
+
+/* used on a CID received from the HW */
+#define SW_CID(x)			(le32_to_cpu(x) & BNX2X_SWCID_MASK)
+#define CQE_CMD(x)			(le32_to_cpu(x) >> \
+					COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT)
+
+#define BD_UNMAP_ADDR(bd)		HILO_U64(le32_to_cpu((bd)->addr_hi), \
+						 le32_to_cpu((bd)->addr_lo))
+#define BD_UNMAP_LEN(bd)		(le16_to_cpu((bd)->nbytes))
+
+#define BNX2X_DB_MIN_SHIFT		3	/* 8 bytes */
+#define BNX2X_DB_SHIFT			7	/* 128 bytes*/
+#if (BNX2X_DB_SHIFT < BNX2X_DB_MIN_SHIFT)
+#error "Min DB doorbell stride is 8"
+#endif
+#define DPM_TRIGER_TYPE			0x40
+#define DOORBELL(bp, cid, val) \
+	do { \
+		writel((u32)(val), bp->doorbells + (bp->db_size * (cid)) + \
+		       DPM_TRIGER_TYPE); \
+	} while (0)
+
+
+/* TX CSUM helpers */
+#define SKB_CS_OFF(skb)		(offsetof(struct tcphdr, check) - \
+				 skb->csum_offset)
+#define SKB_CS(skb)		(*(u16 *)(skb_transport_header(skb) + \
+					  skb->csum_offset))
+
+#define pbd_tcp_flags(skb)	(ntohl(tcp_flag_word(tcp_hdr(skb)))>>16 & 0xff)
+
+#define XMIT_PLAIN			0
+#define XMIT_CSUM_V4			0x1
+#define XMIT_CSUM_V6			0x2
+#define XMIT_CSUM_TCP			0x4
+#define XMIT_GSO_V4			0x8
+#define XMIT_GSO_V6			0x10
+
+#define XMIT_CSUM			(XMIT_CSUM_V4 | XMIT_CSUM_V6)
+#define XMIT_GSO			(XMIT_GSO_V4 | XMIT_GSO_V6)
+
+
+/* stuff added to make the code fit 80Col */
+#define CQE_TYPE(cqe_fp_flags)	 ((cqe_fp_flags) & ETH_FAST_PATH_RX_CQE_TYPE)
+#define CQE_TYPE_START(cqe_type) ((cqe_type) == RX_ETH_CQE_TYPE_ETH_START_AGG)
+#define CQE_TYPE_STOP(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_STOP_AGG)
+#define CQE_TYPE_SLOW(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_RAMROD)
+#define CQE_TYPE_FAST(cqe_type)  ((cqe_type) == RX_ETH_CQE_TYPE_ETH_FASTPATH)
+
+#define ETH_RX_ERROR_FALGS		ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
+
+#define BNX2X_IP_CSUM_ERR(cqe) \
+			(!((cqe)->fast_path_cqe.status_flags & \
+			   ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
+			 ((cqe)->fast_path_cqe.type_error_flags & \
+			  ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
+
+#define BNX2X_L4_CSUM_ERR(cqe) \
+			(!((cqe)->fast_path_cqe.status_flags & \
+			   ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
+			 ((cqe)->fast_path_cqe.type_error_flags & \
+			  ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
+
+#define BNX2X_RX_CSUM_OK(cqe) \
+			(!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
+
+#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
+				(((le16_to_cpu(flags) & \
+				   PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
+				  PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT) \
+				 == PRS_FLAG_OVERETH_IPV4)
+#define BNX2X_RX_SUM_FIX(cqe) \
+	BNX2X_PRS_FLAG_OVERETH_IPV4(cqe->fast_path_cqe.pars_flags.flags)
+
+
+#define FP_USB_FUNC_OFF	\
+			offsetof(struct cstorm_status_block_u, func)
+#define FP_CSB_FUNC_OFF	\
+			offsetof(struct cstorm_status_block_c, func)
+
+#define HC_INDEX_TOE_RX_CQ_CONS		0 /* Formerly Ustorm TOE CQ index */
+					  /* (HC_INDEX_U_TOE_RX_CQ_CONS)  */
+#define HC_INDEX_ETH_RX_CQ_CONS		1 /* Formerly Ustorm ETH CQ index */
+					  /* (HC_INDEX_U_ETH_RX_CQ_CONS)  */
+#define HC_INDEX_ETH_RX_BD_CONS		2 /* Formerly Ustorm ETH BD index */
+					  /* (HC_INDEX_U_ETH_RX_BD_CONS)  */
+
+#define HC_INDEX_TOE_TX_CQ_CONS		4 /* Formerly Cstorm TOE CQ index   */
+					  /* (HC_INDEX_C_TOE_TX_CQ_CONS)    */
+#define HC_INDEX_ETH_TX_CQ_CONS_COS0	5 /* Formerly Cstorm ETH CQ index   */
+					  /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
+#define HC_INDEX_ETH_TX_CQ_CONS_COS1	6 /* Formerly Cstorm ETH CQ index   */
+					  /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
+#define HC_INDEX_ETH_TX_CQ_CONS_COS2	7 /* Formerly Cstorm ETH CQ index   */
+					  /* (HC_INDEX_C_ETH_TX_CQ_CONS)    */
+
+#define HC_INDEX_ETH_FIRST_TX_CQ_CONS	HC_INDEX_ETH_TX_CQ_CONS_COS0
+
+
+#define BNX2X_RX_SB_INDEX \
+	(&fp->sb_index_values[HC_INDEX_ETH_RX_CQ_CONS])
+
+#define BNX2X_TX_SB_INDEX_BASE BNX2X_TX_SB_INDEX_COS0
+
+#define BNX2X_TX_SB_INDEX_COS0 \
+	(&fp->sb_index_values[HC_INDEX_ETH_TX_CQ_CONS_COS0])
+
+/* end of fast path */
+
+/* common */
+
+struct bnx2x_common {
+
+	u32			chip_id;
+/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+#define CHIP_ID(bp)			(bp->common.chip_id & 0xfffffff0)
+
+#define CHIP_NUM(bp)			(bp->common.chip_id >> 16)
+#define CHIP_NUM_57710			0x164e
+#define CHIP_NUM_57711			0x164f
+#define CHIP_NUM_57711E			0x1650
+#define CHIP_NUM_57712			0x1662
+#define CHIP_NUM_57712_MF		0x1663
+#define CHIP_NUM_57713			0x1651
+#define CHIP_NUM_57713E			0x1652
+#define CHIP_NUM_57800			0x168a
+#define CHIP_NUM_57800_MF		0x16a5
+#define CHIP_NUM_57810			0x168e
+#define CHIP_NUM_57810_MF		0x16ae
+#define CHIP_NUM_57840			0x168d
+#define CHIP_NUM_57840_MF		0x16ab
+#define CHIP_IS_E1(bp)			(CHIP_NUM(bp) == CHIP_NUM_57710)
+#define CHIP_IS_57711(bp)		(CHIP_NUM(bp) == CHIP_NUM_57711)
+#define CHIP_IS_57711E(bp)		(CHIP_NUM(bp) == CHIP_NUM_57711E)
+#define CHIP_IS_57712(bp)		(CHIP_NUM(bp) == CHIP_NUM_57712)
+#define CHIP_IS_57712_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57712_MF)
+#define CHIP_IS_57800(bp)		(CHIP_NUM(bp) == CHIP_NUM_57800)
+#define CHIP_IS_57800_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57800_MF)
+#define CHIP_IS_57810(bp)		(CHIP_NUM(bp) == CHIP_NUM_57810)
+#define CHIP_IS_57810_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57810_MF)
+#define CHIP_IS_57840(bp)		(CHIP_NUM(bp) == CHIP_NUM_57840)
+#define CHIP_IS_57840_MF(bp)		(CHIP_NUM(bp) == CHIP_NUM_57840_MF)
+#define CHIP_IS_E1H(bp)			(CHIP_IS_57711(bp) || \
+					 CHIP_IS_57711E(bp))
+#define CHIP_IS_E2(bp)			(CHIP_IS_57712(bp) || \
+					 CHIP_IS_57712_MF(bp))
+#define CHIP_IS_E3(bp)			(CHIP_IS_57800(bp) || \
+					 CHIP_IS_57800_MF(bp) || \
+					 CHIP_IS_57810(bp) || \
+					 CHIP_IS_57810_MF(bp) || \
+					 CHIP_IS_57840(bp) || \
+					 CHIP_IS_57840_MF(bp))
+#define CHIP_IS_E1x(bp)			(CHIP_IS_E1((bp)) || CHIP_IS_E1H((bp)))
+#define USES_WARPCORE(bp)		(CHIP_IS_E3(bp))
+#define IS_E1H_OFFSET			(!CHIP_IS_E1(bp))
+
+#define CHIP_REV_SHIFT			12
+#define CHIP_REV_MASK			(0xF << CHIP_REV_SHIFT)
+#define CHIP_REV_VAL(bp)		(bp->common.chip_id & CHIP_REV_MASK)
+#define CHIP_REV_Ax			(0x0 << CHIP_REV_SHIFT)
+#define CHIP_REV_Bx			(0x1 << CHIP_REV_SHIFT)
+/* assume maximum 5 revisions */
+#define CHIP_REV_IS_SLOW(bp)		(CHIP_REV_VAL(bp) > 0x00005000)
+/* Emul versions are A=>0xe, B=>0xc, C=>0xa, D=>8, E=>6 */
+#define CHIP_REV_IS_EMUL(bp)		((CHIP_REV_IS_SLOW(bp)) && \
+					 !(CHIP_REV_VAL(bp) & 0x00001000))
+/* FPGA versions are A=>0xf, B=>0xd, C=>0xb, D=>9, E=>7 */
+#define CHIP_REV_IS_FPGA(bp)		((CHIP_REV_IS_SLOW(bp)) && \
+					 (CHIP_REV_VAL(bp) & 0x00001000))
+
+#define CHIP_TIME(bp)			((CHIP_REV_IS_EMUL(bp)) ? 2000 : \
+					((CHIP_REV_IS_FPGA(bp)) ? 200 : 1))
+
+#define CHIP_METAL(bp)			(bp->common.chip_id & 0x00000ff0)
+#define CHIP_BOND_ID(bp)		(bp->common.chip_id & 0x0000000f)
+#define CHIP_REV_SIM(bp)		(((CHIP_REV_MASK - CHIP_REV_VAL(bp)) >>\
+					   (CHIP_REV_SHIFT + 1)) \
+						<< CHIP_REV_SHIFT)
+#define CHIP_REV(bp)			(CHIP_REV_IS_SLOW(bp) ? \
+						CHIP_REV_SIM(bp) :\
+						CHIP_REV_VAL(bp))
+#define CHIP_IS_E3B0(bp)		(CHIP_IS_E3(bp) && \
+					 (CHIP_REV(bp) == CHIP_REV_Bx))
+#define CHIP_IS_E3A0(bp)		(CHIP_IS_E3(bp) && \
+					 (CHIP_REV(bp) == CHIP_REV_Ax))
+
+	int			flash_size;
+#define BNX2X_NVRAM_1MB_SIZE			0x20000	/* 1M bit in bytes */
+#define BNX2X_NVRAM_TIMEOUT_COUNT		30000
+#define BNX2X_NVRAM_PAGE_SIZE			256
+
+	u32			shmem_base;
+	u32			shmem2_base;
+	u32			mf_cfg_base;
+	u32			mf2_cfg_base;
+
+	u32			hw_config;
+
+	u32			bc_ver;
+
+	u8			int_block;
+#define INT_BLOCK_HC			0
+#define INT_BLOCK_IGU			1
+#define INT_BLOCK_MODE_NORMAL		0
+#define INT_BLOCK_MODE_BW_COMP		2
+#define CHIP_INT_MODE_IS_NBC(bp)		\
+			(!CHIP_IS_E1x(bp) &&	\
+			!((bp)->common.int_block & INT_BLOCK_MODE_BW_COMP))
+#define CHIP_INT_MODE_IS_BC(bp) (!CHIP_INT_MODE_IS_NBC(bp))
+
+	u8			chip_port_mode;
+#define CHIP_4_PORT_MODE			0x0
+#define CHIP_2_PORT_MODE			0x1
+#define CHIP_PORT_MODE_NONE			0x2
+#define CHIP_MODE(bp)			(bp->common.chip_port_mode)
+#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
+};
+
+/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
+#define BNX2X_IGU_STAS_MSG_VF_CNT 64
+#define BNX2X_IGU_STAS_MSG_PF_CNT 4
+
+/* end of common */
+
+/* port */
+
+struct bnx2x_port {
+	u32			pmf;
+
+	u32			link_config[LINK_CONFIG_SIZE];
+
+	u32			supported[LINK_CONFIG_SIZE];
+/* link settings - missing defines */
+#define SUPPORTED_2500baseX_Full	(1 << 15)
+
+	u32			advertising[LINK_CONFIG_SIZE];
+/* link settings - missing defines */
+#define ADVERTISED_2500baseX_Full	(1 << 15)
+
+	u32			phy_addr;
+
+	/* used to synchronize phy accesses */
+	struct mutex		phy_mutex;
+	int			need_hw_lock;
+
+	u32			port_stx;
+
+	struct nig_stats	old_nig_stats;
+};
+
+/* end of port */
+
+#define STATS_OFFSET32(stat_name) \
+			(offsetof(struct bnx2x_eth_stats, stat_name) / 4)
+
+/* slow path */
+
+/* slow path work-queue */
+extern struct workqueue_struct *bnx2x_wq;
+
+#define BNX2X_MAX_NUM_OF_VFS	64
+#define BNX2X_VF_ID_INVALID	0xFF
+
+/*
+ * The total number of L2 queues, MSIX vectors and HW contexts (CIDs) is
+ * control by the number of fast-path status blocks supported by the
+ * device (HW/FW). Each fast-path status block (FP-SB) aka non-default
+ * status block represents an independent interrupts context that can
+ * serve a regular L2 networking queue. However special L2 queues such
+ * as the FCoE queue do not require a FP-SB and other components like
+ * the CNIC may consume FP-SB reducing the number of possible L2 queues
+ *
+ * If the maximum number of FP-SB available is X then:
+ * a. If CNIC is supported it consumes 1 FP-SB thus the max number of
+ *    regular L2 queues is Y=X-1
+ * b. in MF mode the actual number of L2 queues is Y= (X-1/MF_factor)
+ * c. If the FCoE L2 queue is supported the actual number of L2 queues
+ *    is Y+1
+ * d. The number of irqs (MSIX vectors) is either Y+1 (one extra for
+ *    slow-path interrupts) or Y+2 if CNIC is supported (one additional
+ *    FP interrupt context for the CNIC).
+ * e. The number of HW context (CID count) is always X or X+1 if FCoE
+ *    L2 queue is supported. the cid for the FCoE L2 queue is always X.
+ */
+
+/* fast-path interrupt contexts E1x */
+#define FP_SB_MAX_E1x		16
+/* fast-path interrupt contexts E2 */
+#define FP_SB_MAX_E2		HC_SB_MAX_SB_E2
+
+union cdu_context {
+	struct eth_context eth;
+	char pad[1024];
+};
+
+/* CDU host DB constants */
+#define CDU_ILT_PAGE_SZ_HW	3
+#define CDU_ILT_PAGE_SZ		(8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */
+#define ILT_PAGE_CIDS		(CDU_ILT_PAGE_SZ / sizeof(union cdu_context))
+
+#ifdef BCM_CNIC
+#define CNIC_ISCSI_CID_MAX	256
+#define CNIC_FCOE_CID_MAX	2048
+#define CNIC_CID_MAX		(CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
+#define CNIC_ILT_LINES		DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
+#endif
+
+#define QM_ILT_PAGE_SZ_HW	0
+#define QM_ILT_PAGE_SZ		(4096 << QM_ILT_PAGE_SZ_HW) /* 4K */
+#define QM_CID_ROUND		1024
+
+#ifdef BCM_CNIC
+/* TM (timers) host DB constants */
+#define TM_ILT_PAGE_SZ_HW	0
+#define TM_ILT_PAGE_SZ		(4096 << TM_ILT_PAGE_SZ_HW) /* 4K */
+/* #define TM_CONN_NUM		(CNIC_STARTING_CID+CNIC_ISCSI_CXT_MAX) */
+#define TM_CONN_NUM		1024
+#define TM_ILT_SZ		(8 * TM_CONN_NUM)
+#define TM_ILT_LINES		DIV_ROUND_UP(TM_ILT_SZ, TM_ILT_PAGE_SZ)
+
+/* SRC (Searcher) host DB constants */
+#define SRC_ILT_PAGE_SZ_HW	0
+#define SRC_ILT_PAGE_SZ		(4096 << SRC_ILT_PAGE_SZ_HW) /* 4K */
+#define SRC_HASH_BITS		10
+#define SRC_CONN_NUM		(1 << SRC_HASH_BITS) /* 1024 */
+#define SRC_ILT_SZ		(sizeof(struct src_ent) * SRC_CONN_NUM)
+#define SRC_T2_SZ		SRC_ILT_SZ
+#define SRC_ILT_LINES		DIV_ROUND_UP(SRC_ILT_SZ, SRC_ILT_PAGE_SZ)
+
+#endif
+
+#define MAX_DMAE_C		8
+
+/* DMA memory not used in fastpath */
+struct bnx2x_slowpath {
+	union {
+		struct mac_configuration_cmd		e1x;
+		struct eth_classify_rules_ramrod_data	e2;
+	} mac_rdata;
+
+
+	union {
+		struct tstorm_eth_mac_filter_config	e1x;
+		struct eth_filter_rules_ramrod_data	e2;
+	} rx_mode_rdata;
+
+	union {
+		struct mac_configuration_cmd		e1;
+		struct eth_multicast_rules_ramrod_data  e2;
+	} mcast_rdata;
+
+	struct eth_rss_update_ramrod_data	rss_rdata;
+
+	/* Queue State related ramrods are always sent under rtnl_lock */
+	union {
+		struct client_init_ramrod_data  init_data;
+		struct client_update_ramrod_data update_data;
+	} q_rdata;
+
+	union {
+		struct function_start_data	func_start;
+		/* pfc configuration for DCBX ramrod */
+		struct flow_control_configuration pfc_config;
+	} func_rdata;
+
+	/* used by dmae command executer */
+	struct dmae_command		dmae[MAX_DMAE_C];
+
+	u32				stats_comp;
+	union mac_stats			mac_stats;
+	struct nig_stats		nig_stats;
+	struct host_port_stats		port_stats;
+	struct host_func_stats		func_stats;
+	struct host_func_stats		func_stats_base;
+
+	u32				wb_comp;
+	u32				wb_data[4];
+};
+
+#define bnx2x_sp(bp, var)		(&bp->slowpath->var)
+#define bnx2x_sp_mapping(bp, var) \
+		(bp->slowpath_mapping + offsetof(struct bnx2x_slowpath, var))
+
+
+/* attn group wiring */
+#define MAX_DYNAMIC_ATTN_GRPS		8
+
+struct attn_route {
+	u32 sig[5];
+};
+
+struct iro {
+	u32 base;
+	u16 m1;
+	u16 m2;
+	u16 m3;
+	u16 size;
+};
+
+struct hw_context {
+	union cdu_context *vcxt;
+	dma_addr_t cxt_mapping;
+	size_t size;
+};
+
+/* forward */
+struct bnx2x_ilt;
+
+
+enum bnx2x_recovery_state {
+	BNX2X_RECOVERY_DONE,
+	BNX2X_RECOVERY_INIT,
+	BNX2X_RECOVERY_WAIT,
+	BNX2X_RECOVERY_FAILED
+};
+
+/*
+ * Event queue (EQ or event ring) MC hsi
+ * NUM_EQ_PAGES and EQ_DESC_CNT_PAGE must be power of 2
+ */
+#define NUM_EQ_PAGES		1
+#define EQ_DESC_CNT_PAGE	(BCM_PAGE_SIZE / sizeof(union event_ring_elem))
+#define EQ_DESC_MAX_PAGE	(EQ_DESC_CNT_PAGE - 1)
+#define NUM_EQ_DESC		(EQ_DESC_CNT_PAGE * NUM_EQ_PAGES)
+#define EQ_DESC_MASK		(NUM_EQ_DESC - 1)
+#define MAX_EQ_AVAIL		(EQ_DESC_MAX_PAGE * NUM_EQ_PAGES - 2)
+
+/* depends on EQ_DESC_CNT_PAGE being a power of 2 */
+#define NEXT_EQ_IDX(x)		((((x) & EQ_DESC_MAX_PAGE) == \
+				  (EQ_DESC_MAX_PAGE - 1)) ? (x) + 2 : (x) + 1)
+
+/* depends on the above and on NUM_EQ_PAGES being a power of 2 */
+#define EQ_DESC(x)		((x) & EQ_DESC_MASK)
+
+#define BNX2X_EQ_INDEX \
+	(&bp->def_status_blk->sp_sb.\
+	index_values[HC_SP_INDEX_EQ_CONS])
+
+/* This is a data that will be used to create a link report message.
+ * We will keep the data used for the last link report in order
+ * to prevent reporting the same link parameters twice.
+ */
+struct bnx2x_link_report_data {
+	u16 line_speed;			/* Effective line speed */
+	unsigned long link_report_flags;/* BNX2X_LINK_REPORT_XXX flags */
+};
+
+enum {
+	BNX2X_LINK_REPORT_FD,		/* Full DUPLEX */
+	BNX2X_LINK_REPORT_LINK_DOWN,
+	BNX2X_LINK_REPORT_RX_FC_ON,
+	BNX2X_LINK_REPORT_TX_FC_ON,
+};
+
+enum {
+	BNX2X_PORT_QUERY_IDX,
+	BNX2X_PF_QUERY_IDX,
+	BNX2X_FIRST_QUEUE_QUERY_IDX,
+};
+
+struct bnx2x_fw_stats_req {
+	struct stats_query_header hdr;
+	struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+};
+
+struct bnx2x_fw_stats_data {
+	struct stats_counter	storm_counters;
+	struct per_port_stats	port;
+	struct per_pf_stats	pf;
+	struct per_queue_stats  queue_stats[1];
+};
+
+/* Public slow path states */
+enum {
+	BNX2X_SP_RTNL_SETUP_TC,
+	BNX2X_SP_RTNL_TX_TIMEOUT,
+};
+
+
+struct bnx2x {
+	/* Fields used in the tx and intr/napi performance paths
+	 * are grouped together in the beginning of the structure
+	 */
+	struct bnx2x_fastpath	*fp;
+	void __iomem		*regview;
+	void __iomem		*doorbells;
+	u16			db_size;
+
+	u8			pf_num;	/* absolute PF number */
+	u8			pfid;	/* per-path PF number */
+	int			base_fw_ndsb; /**/
+#define BP_PATH(bp)			(CHIP_IS_E1x(bp) ? 0 : (bp->pf_num & 1))
+#define BP_PORT(bp)			(bp->pfid & 1)
+#define BP_FUNC(bp)			(bp->pfid)
+#define BP_ABS_FUNC(bp)			(bp->pf_num)
+#define BP_E1HVN(bp)			(bp->pfid >> 1)
+#define BP_VN(bp)			(BP_E1HVN(bp)) /*remove when approved*/
+#define BP_L_ID(bp)			(BP_E1HVN(bp) << 2)
+#define BP_FW_MB_IDX(bp)		(BP_PORT(bp) +\
+	  BP_VN(bp) * ((CHIP_IS_E1x(bp) || (CHIP_MODE_IS_4_PORT(bp))) ? 2  : 1))
+
+	struct net_device	*dev;
+	struct pci_dev		*pdev;
+
+	const struct iro	*iro_arr;
+#define IRO (bp->iro_arr)
+
+	enum bnx2x_recovery_state recovery_state;
+	int			is_leader;
+	struct msix_entry	*msix_table;
+
+	int			tx_ring_size;
+
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD		(ETH_HLEN + 8 + 8)
+#define ETH_MIN_PACKET_SIZE		60
+#define ETH_MAX_PACKET_SIZE		1500
+#define ETH_MAX_JUMBO_PACKET_SIZE	9600
+
+	/* Max supported alignment is 256 (8 shift) */
+#define BNX2X_RX_ALIGN_SHIFT		((L1_CACHE_SHIFT < 8) ? \
+					 L1_CACHE_SHIFT : 8)
+	/* FW use 2 Cache lines Alignment for start packet and size  */
+#define BNX2X_FW_RX_ALIGN		(2 << BNX2X_RX_ALIGN_SHIFT)
+#define BNX2X_PXP_DRAM_ALIGN		(BNX2X_RX_ALIGN_SHIFT - 5)
+
+	struct host_sp_status_block *def_status_blk;
+#define DEF_SB_IGU_ID			16
+#define DEF_SB_ID			HC_SP_SB_ID
+	__le16			def_idx;
+	__le16			def_att_idx;
+	u32			attn_state;
+	struct attn_route	attn_group[MAX_DYNAMIC_ATTN_GRPS];
+
+	/* slow path ring */
+	struct eth_spe		*spq;
+	dma_addr_t		spq_mapping;
+	u16			spq_prod_idx;
+	struct eth_spe		*spq_prod_bd;
+	struct eth_spe		*spq_last_bd;
+	__le16			*dsb_sp_prod;
+	atomic_t		cq_spq_left; /* ETH_XXX ramrods credit */
+	/* used to synchronize spq accesses */
+	spinlock_t		spq_lock;
+
+	/* event queue */
+	union event_ring_elem	*eq_ring;
+	dma_addr_t		eq_mapping;
+	u16			eq_prod;
+	u16			eq_cons;
+	__le16			*eq_cons_sb;
+	atomic_t		eq_spq_left; /* COMMON_XXX ramrods credit */
+
+
+
+	/* Counter for marking that there is a STAT_QUERY ramrod pending */
+	u16			stats_pending;
+	/*  Counter for completed statistics ramrods */
+	u16			stats_comp;
+
+	/* End of fields used in the performance code paths */
+
+	int			panic;
+	int			msg_enable;
+
+	u32			flags;
+#define PCIX_FLAG			(1 << 0)
+#define PCI_32BIT_FLAG			(1 << 1)
+#define ONE_PORT_FLAG			(1 << 2)
+#define NO_WOL_FLAG			(1 << 3)
+#define USING_DAC_FLAG			(1 << 4)
+#define USING_MSIX_FLAG			(1 << 5)
+#define USING_MSI_FLAG			(1 << 6)
+#define DISABLE_MSI_FLAG		(1 << 7)
+#define TPA_ENABLE_FLAG			(1 << 8)
+#define NO_MCP_FLAG			(1 << 9)
+
+#define BP_NOMCP(bp)			(bp->flags & NO_MCP_FLAG)
+#define MF_FUNC_DIS			(1 << 11)
+#define OWN_CNIC_IRQ			(1 << 12)
+#define NO_ISCSI_OOO_FLAG		(1 << 13)
+#define NO_ISCSI_FLAG			(1 << 14)
+#define NO_FCOE_FLAG			(1 << 15)
+
+#define NO_ISCSI(bp)		((bp)->flags & NO_ISCSI_FLAG)
+#define NO_ISCSI_OOO(bp)	((bp)->flags & NO_ISCSI_OOO_FLAG)
+#define NO_FCOE(bp)		((bp)->flags & NO_FCOE_FLAG)
+
+	int			pm_cap;
+	int			mrrs;
+
+	struct delayed_work	sp_task;
+	struct delayed_work	sp_rtnl_task;
+
+	struct delayed_work	period_task;
+	struct timer_list	timer;
+	int			current_interval;
+
+	u16			fw_seq;
+	u16			fw_drv_pulse_wr_seq;
+	u32			func_stx;
+
+	struct link_params	link_params;
+	struct link_vars	link_vars;
+	u32			link_cnt;
+	struct bnx2x_link_report_data last_reported_link;
+
+	struct mdio_if_info	mdio;
+
+	struct bnx2x_common	common;
+	struct bnx2x_port	port;
+
+	struct cmng_struct_per_port cmng;
+	u32			vn_weight_sum;
+	u32			mf_config[E1HVN_MAX];
+	u32			mf2_config[E2_FUNC_MAX];
+	u32			path_has_ovlan; /* E3 */
+	u16			mf_ov;
+	u8			mf_mode;
+#define IS_MF(bp)		(bp->mf_mode != 0)
+#define IS_MF_SI(bp)		(bp->mf_mode == MULTI_FUNCTION_SI)
+#define IS_MF_SD(bp)		(bp->mf_mode == MULTI_FUNCTION_SD)
+
+	u8			wol;
+
+	int			rx_ring_size;
+
+	u16			tx_quick_cons_trip_int;
+	u16			tx_quick_cons_trip;
+	u16			tx_ticks_int;
+	u16			tx_ticks;
+
+	u16			rx_quick_cons_trip_int;
+	u16			rx_quick_cons_trip;
+	u16			rx_ticks_int;
+	u16			rx_ticks;
+/* Maximal coalescing timeout in us */
+#define BNX2X_MAX_COALESCE_TOUT		(0xf0*12)
+
+	u32			lin_cnt;
+
+	u16			state;
+#define BNX2X_STATE_CLOSED		0
+#define BNX2X_STATE_OPENING_WAIT4_LOAD	0x1000
+#define BNX2X_STATE_OPENING_WAIT4_PORT	0x2000
+#define BNX2X_STATE_OPEN		0x3000
+#define BNX2X_STATE_CLOSING_WAIT4_HALT	0x4000
+#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
+
+#define BNX2X_STATE_DIAG		0xe000
+#define BNX2X_STATE_ERROR		0xf000
+
+	int			multi_mode;
+#define BNX2X_MAX_PRIORITY		8
+#define BNX2X_MAX_ENTRIES_PER_PRI	16
+#define BNX2X_MAX_COS			3
+#define BNX2X_MAX_TX_COS		2
+	int			num_queues;
+	int			disable_tpa;
+
+	u32			rx_mode;
+#define BNX2X_RX_MODE_NONE		0
+#define BNX2X_RX_MODE_NORMAL		1
+#define BNX2X_RX_MODE_ALLMULTI		2
+#define BNX2X_RX_MODE_PROMISC		3
+#define BNX2X_MAX_MULTICAST		64
+
+	u8			igu_dsb_id;
+	u8			igu_base_sb;
+	u8			igu_sb_cnt;
+	dma_addr_t		def_status_blk_mapping;
+
+	struct bnx2x_slowpath	*slowpath;
+	dma_addr_t		slowpath_mapping;
+
+	/* Total number of FW statistics requests */
+	u8			fw_stats_num;
+
+	/*
+	 * This is a memory buffer that will contain both statistics
+	 * ramrod request and data.
+	 */
+	void			*fw_stats;
+	dma_addr_t		fw_stats_mapping;
+
+	/*
+	 * FW statistics request shortcut (points at the
+	 * beginning of fw_stats buffer).
+	 */
+	struct bnx2x_fw_stats_req	*fw_stats_req;
+	dma_addr_t			fw_stats_req_mapping;
+	int				fw_stats_req_sz;
+
+	/*
+	 * FW statistics data shortcut (points at the begining of
+	 * fw_stats buffer + fw_stats_req_sz).
+	 */
+	struct bnx2x_fw_stats_data	*fw_stats_data;
+	dma_addr_t			fw_stats_data_mapping;
+	int				fw_stats_data_sz;
+
+	struct hw_context	context;
+
+	struct bnx2x_ilt	*ilt;
+#define BP_ILT(bp)		((bp)->ilt)
+#define ILT_MAX_LINES		256
+/*
+ * Maximum supported number of RSS queues: number of IGU SBs minus one that goes
+ * to CNIC.
+ */
+#define BNX2X_MAX_RSS_COUNT(bp)	((bp)->igu_sb_cnt - CNIC_PRESENT)
+
+/*
+ * Maximum CID count that might be required by the bnx2x:
+ * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related)
+ */
+#define BNX2X_L2_CID_COUNT(bp)	(MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\
+					NON_ETH_CONTEXT_USE + CNIC_PRESENT)
+#define L2_ILT_LINES(bp)	(DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\
+					ILT_PAGE_CIDS))
+#define BNX2X_DB_SIZE(bp)	(BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT))
+
+	int			qm_cid_count;
+
+	int			dropless_fc;
+
+#ifdef BCM_CNIC
+	u32			cnic_flags;
+#define BNX2X_CNIC_FLAG_MAC_SET		1
+	void			*t2;
+	dma_addr_t		t2_mapping;
+	struct cnic_ops	__rcu	*cnic_ops;
+	void			*cnic_data;
+	u32			cnic_tag;
+	struct cnic_eth_dev	cnic_eth_dev;
+	union host_hc_status_block cnic_sb;
+	dma_addr_t		cnic_sb_mapping;
+	struct eth_spe		*cnic_kwq;
+	struct eth_spe		*cnic_kwq_prod;
+	struct eth_spe		*cnic_kwq_cons;
+	struct eth_spe		*cnic_kwq_last;
+	u16			cnic_kwq_pending;
+	u16			cnic_spq_pending;
+	u8			fip_mac[ETH_ALEN];
+	struct mutex		cnic_mutex;
+	struct bnx2x_vlan_mac_obj iscsi_l2_mac_obj;
+
+	/* Start index of the "special" (CNIC related) L2 cleints */
+	u8				cnic_base_cl_id;
+#endif
+
+	int			dmae_ready;
+	/* used to synchronize dmae accesses */
+	spinlock_t		dmae_lock;
+
+	/* used to protect the FW mail box */
+	struct mutex		fw_mb_mutex;
+
+	/* used to synchronize stats collecting */
+	int			stats_state;
+
+	/* used for synchronization of concurrent threads statistics handling */
+	spinlock_t		stats_lock;
+
+	/* used by dmae command loader */
+	struct dmae_command	stats_dmae;
+	int			executer_idx;
+
+	u16			stats_counter;
+	struct bnx2x_eth_stats	eth_stats;
+
+	struct z_stream_s	*strm;
+	void			*gunzip_buf;
+	dma_addr_t		gunzip_mapping;
+	int			gunzip_outlen;
+#define FW_BUF_SIZE			0x8000
+#define GUNZIP_BUF(bp)			(bp->gunzip_buf)
+#define GUNZIP_PHYS(bp)			(bp->gunzip_mapping)
+#define GUNZIP_OUTLEN(bp)		(bp->gunzip_outlen)
+
+	struct raw_op		*init_ops;
+	/* Init blocks offsets inside init_ops */
+	u16			*init_ops_offsets;
+	/* Data blob - has 32 bit granularity */
+	u32			*init_data;
+	u32			init_mode_flags;
+#define INIT_MODE_FLAGS(bp)	(bp->init_mode_flags)
+	/* Zipped PRAM blobs - raw data */
+	const u8		*tsem_int_table_data;
+	const u8		*tsem_pram_data;
+	const u8		*usem_int_table_data;
+	const u8		*usem_pram_data;
+	const u8		*xsem_int_table_data;
+	const u8		*xsem_pram_data;
+	const u8		*csem_int_table_data;
+	const u8		*csem_pram_data;
+#define INIT_OPS(bp)			(bp->init_ops)
+#define INIT_OPS_OFFSETS(bp)		(bp->init_ops_offsets)
+#define INIT_DATA(bp)			(bp->init_data)
+#define INIT_TSEM_INT_TABLE_DATA(bp)	(bp->tsem_int_table_data)
+#define INIT_TSEM_PRAM_DATA(bp)		(bp->tsem_pram_data)
+#define INIT_USEM_INT_TABLE_DATA(bp)	(bp->usem_int_table_data)
+#define INIT_USEM_PRAM_DATA(bp)		(bp->usem_pram_data)
+#define INIT_XSEM_INT_TABLE_DATA(bp)	(bp->xsem_int_table_data)
+#define INIT_XSEM_PRAM_DATA(bp)		(bp->xsem_pram_data)
+#define INIT_CSEM_INT_TABLE_DATA(bp)	(bp->csem_int_table_data)
+#define INIT_CSEM_PRAM_DATA(bp)		(bp->csem_pram_data)
+
+#define PHY_FW_VER_LEN			20
+	char			fw_ver[32];
+	const struct firmware	*firmware;
+
+	/* DCB support on/off */
+	u16 dcb_state;
+#define BNX2X_DCB_STATE_OFF			0
+#define BNX2X_DCB_STATE_ON			1
+
+	/* DCBX engine mode */
+	int dcbx_enabled;
+#define BNX2X_DCBX_ENABLED_OFF			0
+#define BNX2X_DCBX_ENABLED_ON_NEG_OFF		1
+#define BNX2X_DCBX_ENABLED_ON_NEG_ON		2
+#define BNX2X_DCBX_ENABLED_INVALID		(-1)
+
+	bool dcbx_mode_uset;
+
+	struct bnx2x_config_dcbx_params		dcbx_config_params;
+	struct bnx2x_dcbx_port_params		dcbx_port_params;
+	int					dcb_version;
+
+	/* CAM credit pools */
+	struct bnx2x_credit_pool_obj		macs_pool;
+
+	/* RX_MODE object */
+	struct bnx2x_rx_mode_obj		rx_mode_obj;
+
+	/* MCAST object */
+	struct bnx2x_mcast_obj			mcast_obj;
+
+	/* RSS configuration object */
+	struct bnx2x_rss_config_obj		rss_conf_obj;
+
+	/* Function State controlling object */
+	struct bnx2x_func_sp_obj		func_obj;
+
+	unsigned long				sp_state;
+
+	/* operation indication for the sp_rtnl task */
+	unsigned long				sp_rtnl_state;
+
+	/* DCBX Negotation results */
+	struct dcbx_features			dcbx_local_feat;
+	u32					dcbx_error;
+
+#ifdef BCM_DCBNL
+	struct dcbx_features			dcbx_remote_feat;
+	u32					dcbx_remote_flags;
+#endif
+	u32					pending_max;
+
+	/* multiple tx classes of service */
+	u8					max_cos;
+
+	/* priority to cos mapping */
+	u8					prio_to_cos[8];
+};
+
+/* Tx queues may be less or equal to Rx queues */
+extern int num_queues;
+#define BNX2X_NUM_QUEUES(bp)	(bp->num_queues)
+#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE)
+#define BNX2X_NUM_RX_QUEUES(bp)	BNX2X_NUM_QUEUES(bp)
+
+#define is_multi(bp)		(BNX2X_NUM_QUEUES(bp) > 1)
+
+#define BNX2X_MAX_QUEUES(bp)	BNX2X_MAX_RSS_COUNT(bp)
+/* #define is_eth_multi(bp)	(BNX2X_NUM_ETH_QUEUES(bp) > 1) */
+
+#define RSS_IPV4_CAP_MASK						\
+	TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
+
+#define RSS_IPV4_TCP_CAP_MASK						\
+	TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY
+
+#define RSS_IPV6_CAP_MASK						\
+	TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY
+
+#define RSS_IPV6_TCP_CAP_MASK						\
+	TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY
+
+/* func init flags */
+#define FUNC_FLG_RSS		0x0001
+#define FUNC_FLG_STATS		0x0002
+/* removed  FUNC_FLG_UNMATCHED	0x0004 */
+#define FUNC_FLG_TPA		0x0008
+#define FUNC_FLG_SPQ		0x0010
+#define FUNC_FLG_LEADING	0x0020	/* PF only */
+
+
+struct bnx2x_func_init_params {
+	/* dma */
+	dma_addr_t	fw_stat_map;	/* valid iff FUNC_FLG_STATS */
+	dma_addr_t	spq_map;	/* valid iff FUNC_FLG_SPQ */
+
+	u16		func_flgs;
+	u16		func_id;	/* abs fid */
+	u16		pf_id;
+	u16		spq_prod;	/* valid iff FUNC_FLG_SPQ */
+};
+
+#define for_each_eth_queue(bp, var) \
+	for ((var) = 0; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
+
+#define for_each_nondefault_eth_queue(bp, var) \
+	for ((var) = 1; (var) < BNX2X_NUM_ETH_QUEUES(bp); (var)++)
+
+#define for_each_queue(bp, var) \
+	for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+		if (skip_queue(bp, var))	\
+			continue;		\
+		else
+
+/* Skip forwarding FP */
+#define for_each_rx_queue(bp, var) \
+	for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+		if (skip_rx_queue(bp, var))	\
+			continue;		\
+		else
+
+/* Skip OOO FP */
+#define for_each_tx_queue(bp, var) \
+	for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+		if (skip_tx_queue(bp, var))	\
+			continue;		\
+		else
+
+#define for_each_nondefault_queue(bp, var) \
+	for ((var) = 1; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \
+		if (skip_queue(bp, var))	\
+			continue;		\
+		else
+
+#define for_each_cos_in_tx_queue(fp, var) \
+	for ((var) = 0; (var) < (fp)->max_cos; (var)++)
+
+/* skip rx queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
+ */
+#define skip_rx_queue(bp, idx)	(NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+/* skip tx queue
+ * if FCOE l2 support is disabled and this is the fcoe L2 queue
+ */
+#define skip_tx_queue(bp, idx)	(NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+#define skip_queue(bp, idx)	(NO_FCOE(bp) && IS_FCOE_IDX(idx))
+
+
+
+
+/**
+ * bnx2x_set_mac_one - configure a single MAC address
+ *
+ * @bp:			driver handle
+ * @mac:		MAC to configure
+ * @obj:		MAC object handle
+ * @set:		if 'true' add a new MAC, otherwise - delete
+ * @mac_type:		the type of the MAC to configure (e.g. ETH, UC list)
+ * @ramrod_flags:	RAMROD_XXX flags (e.g. RAMROD_CONT, RAMROD_COMP_WAIT)
+ *
+ * Configures one MAC according to provided parameters or continues the
+ * execution of previously scheduled commands if RAMROD_CONT is set in
+ * ramrod_flags.
+ *
+ * Returns zero if operation has successfully completed, a positive value if the
+ * operation has been successfully scheduled and a negative - if a requested
+ * operations has failed.
+ */
+int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+		      struct bnx2x_vlan_mac_obj *obj, bool set,
+		      int mac_type, unsigned long *ramrod_flags);
+/**
+ * Deletes all MACs configured for the specific MAC object.
+ *
+ * @param bp Function driver instance
+ * @param mac_obj MAC object to cleanup
+ *
+ * @return zero if all MACs were cleaned
+ */
+
+/**
+ * bnx2x_del_all_macs - delete all MACs configured for the specific MAC object
+ *
+ * @bp:			driver handle
+ * @mac_obj:		MAC object handle
+ * @mac_type:		type of the MACs to clear (BNX2X_XXX_MAC)
+ * @wait_for_comp:	if 'true' block until completion
+ *
+ * Deletes all MACs of the specific type (e.g. ETH, UC list).
+ *
+ * Returns zero if operation has successfully completed, a positive value if the
+ * operation has been successfully scheduled and a negative - if a requested
+ * operations has failed.
+ */
+int bnx2x_del_all_macs(struct bnx2x *bp,
+		       struct bnx2x_vlan_mac_obj *mac_obj,
+		       int mac_type, bool wait_for_comp);
+
+/* Init Function API  */
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p);
+int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port);
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
+int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode);
+int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port);
+void bnx2x_read_mf_cfg(struct bnx2x *bp);
+
+
+/* dmae */
+void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32);
+void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
+		      u32 len32);
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx);
+u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type);
+u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode);
+u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
+		      bool with_comp, u8 comp_type);
+
+
+void bnx2x_calc_fc_adv(struct bnx2x *bp);
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+		  u32 data_hi, u32 data_lo, int cmd_type);
+void bnx2x_update_coalesce(struct bnx2x *bp);
+int bnx2x_get_cur_phy_idx(struct bnx2x *bp);
+
+static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
+			   int wait)
+{
+	u32 val;
+
+	do {
+		val = REG_RD(bp, reg);
+		if (val == expected)
+			break;
+		ms -= wait;
+		msleep(wait);
+
+	} while (ms > 0);
+
+	return val;
+}
+
+#define BNX2X_ILT_ZALLOC(x, y, size) \
+	do { \
+		x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+		if (x) \
+			memset(x, 0, size); \
+	} while (0)
+
+#define BNX2X_ILT_FREE(x, y, size) \
+	do { \
+		if (x) { \
+			dma_free_coherent(&bp->pdev->dev, size, x, y); \
+			x = NULL; \
+			y = 0; \
+		} \
+	} while (0)
+
+#define ILOG2(x)	(ilog2((x)))
+
+#define ILT_NUM_PAGE_ENTRIES	(3072)
+/* In 57710/11 we use whole table since we have 8 func
+ * In 57712 we have only 4 func, but use same size per func, then only half of
+ * the table in use
+ */
+#define ILT_PER_FUNC		(ILT_NUM_PAGE_ENTRIES/8)
+
+#define FUNC_ILT_BASE(func)	(func * ILT_PER_FUNC)
+/*
+ * the phys address is shifted right 12 bits and has an added
+ * 1=valid bit added to the 53rd bit
+ * then since this is a wide register(TM)
+ * we split it into two 32 bit writes
+ */
+#define ONCHIP_ADDR1(x)		((u32)(((u64)x >> 12) & 0xFFFFFFFF))
+#define ONCHIP_ADDR2(x)		((u32)((1 << 20) | ((u64)x >> 44)))
+
+/* load/unload mode */
+#define LOAD_NORMAL			0
+#define LOAD_OPEN			1
+#define LOAD_DIAG			2
+#define UNLOAD_NORMAL			0
+#define UNLOAD_CLOSE			1
+#define UNLOAD_RECOVERY			2
+
+
+/* DMAE command defines */
+#define DMAE_TIMEOUT			-1
+#define DMAE_PCI_ERROR			-2	/* E2 and onward */
+#define DMAE_NOT_RDY			-3
+#define DMAE_PCI_ERR_FLAG		0x80000000
+
+#define DMAE_SRC_PCI			0
+#define DMAE_SRC_GRC			1
+
+#define DMAE_DST_NONE			0
+#define DMAE_DST_PCI			1
+#define DMAE_DST_GRC			2
+
+#define DMAE_COMP_PCI			0
+#define DMAE_COMP_GRC			1
+
+/* E2 and onward - PCI error handling in the completion */
+
+#define DMAE_COMP_REGULAR		0
+#define DMAE_COM_SET_ERR		1
+
+#define DMAE_CMD_SRC_PCI		(DMAE_SRC_PCI << \
+						DMAE_COMMAND_SRC_SHIFT)
+#define DMAE_CMD_SRC_GRC		(DMAE_SRC_GRC << \
+						DMAE_COMMAND_SRC_SHIFT)
+
+#define DMAE_CMD_DST_PCI		(DMAE_DST_PCI << \
+						DMAE_COMMAND_DST_SHIFT)
+#define DMAE_CMD_DST_GRC		(DMAE_DST_GRC << \
+						DMAE_COMMAND_DST_SHIFT)
+
+#define DMAE_CMD_C_DST_PCI		(DMAE_COMP_PCI << \
+						DMAE_COMMAND_C_DST_SHIFT)
+#define DMAE_CMD_C_DST_GRC		(DMAE_COMP_GRC << \
+						DMAE_COMMAND_C_DST_SHIFT)
+
+#define DMAE_CMD_C_ENABLE		DMAE_COMMAND_C_TYPE_ENABLE
+
+#define DMAE_CMD_ENDIANITY_NO_SWAP	(0 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_SWAP	(1 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_DW_SWAP	(2 << DMAE_COMMAND_ENDIANITY_SHIFT)
+#define DMAE_CMD_ENDIANITY_B_DW_SWAP	(3 << DMAE_COMMAND_ENDIANITY_SHIFT)
+
+#define DMAE_CMD_PORT_0			0
+#define DMAE_CMD_PORT_1			DMAE_COMMAND_PORT
+
+#define DMAE_CMD_SRC_RESET		DMAE_COMMAND_SRC_RESET
+#define DMAE_CMD_DST_RESET		DMAE_COMMAND_DST_RESET
+#define DMAE_CMD_E1HVN_SHIFT		DMAE_COMMAND_E1HVN_SHIFT
+
+#define DMAE_SRC_PF			0
+#define DMAE_SRC_VF			1
+
+#define DMAE_DST_PF			0
+#define DMAE_DST_VF			1
+
+#define DMAE_C_SRC			0
+#define DMAE_C_DST			1
+
+#define DMAE_LEN32_RD_MAX		0x80
+#define DMAE_LEN32_WR_MAX(bp)		(CHIP_IS_E1(bp) ? 0x400 : 0x2000)
+
+#define DMAE_COMP_VAL			0x60d0d0ae /* E2 and on - upper bit
+							indicates eror */
+
+#define MAX_DMAE_C_PER_PORT		8
+#define INIT_DMAE_C(bp)			(BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
+					 BP_E1HVN(bp))
+#define PMF_DMAE_C(bp)			(BP_PORT(bp) * MAX_DMAE_C_PER_PORT + \
+					 E1HVN_MAX)
+
+/* PCIE link and speed */
+#define PCICFG_LINK_WIDTH		0x1f00000
+#define PCICFG_LINK_WIDTH_SHIFT		20
+#define PCICFG_LINK_SPEED		0xf0000
+#define PCICFG_LINK_SPEED_SHIFT		16
+
+
+#define BNX2X_NUM_TESTS			7
+
+#define BNX2X_PHY_LOOPBACK		0
+#define BNX2X_MAC_LOOPBACK		1
+#define BNX2X_PHY_LOOPBACK_FAILED	1
+#define BNX2X_MAC_LOOPBACK_FAILED	2
+#define BNX2X_LOOPBACK_FAILED		(BNX2X_MAC_LOOPBACK_FAILED | \
+					 BNX2X_PHY_LOOPBACK_FAILED)
+
+
+#define STROM_ASSERT_ARRAY_SIZE		50
+
+
+/* must be used on a CID before placing it on a HW ring */
+#define HW_CID(bp, x)			((BP_PORT(bp) << 23) | \
+					 (BP_E1HVN(bp) << BNX2X_SWCID_SHIFT) | \
+					 (x))
+
+#define SP_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_spe))
+#define MAX_SP_DESC_CNT			(SP_DESC_CNT - 1)
+
+
+#define BNX2X_BTR			4
+#define MAX_SPQ_PENDING			8
+
+/* CMNG constants, as derived from system spec calculations */
+/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
+#define DEF_MIN_RATE					100
+/* resolution of the rate shaping timer - 400 usec */
+#define RS_PERIODIC_TIMEOUT_USEC			400
+/* number of bytes in single QM arbitration cycle -
+ * coefficient for calculating the fairness timer */
+#define QM_ARB_BYTES					160000
+/* resolution of Min algorithm 1:100 */
+#define MIN_RES						100
+/* how many bytes above threshold for the minimal credit of Min algorithm*/
+#define MIN_ABOVE_THRESH				32768
+/* Fairness algorithm integration time coefficient -
+ * for calculating the actual Tfair */
+#define T_FAIR_COEF	((MIN_ABOVE_THRESH +  QM_ARB_BYTES) * 8 * MIN_RES)
+/* Memory of fairness algorithm . 2 cycles */
+#define FAIR_MEM					2
+
+
+#define ATTN_NIG_FOR_FUNC		(1L << 8)
+#define ATTN_SW_TIMER_4_FUNC		(1L << 9)
+#define GPIO_2_FUNC			(1L << 10)
+#define GPIO_3_FUNC			(1L << 11)
+#define GPIO_4_FUNC			(1L << 12)
+#define ATTN_GENERAL_ATTN_1		(1L << 13)
+#define ATTN_GENERAL_ATTN_2		(1L << 14)
+#define ATTN_GENERAL_ATTN_3		(1L << 15)
+#define ATTN_GENERAL_ATTN_4		(1L << 13)
+#define ATTN_GENERAL_ATTN_5		(1L << 14)
+#define ATTN_GENERAL_ATTN_6		(1L << 15)
+
+#define ATTN_HARD_WIRED_MASK		0xff00
+#define ATTENTION_ID			4
+
+
+/* stuff added to make the code fit 80Col */
+
+#define BNX2X_PMF_LINK_ASSERT \
+	GENERAL_ATTEN_OFFSET(LINK_SYNC_ATTENTION_BIT_FUNC_0 + BP_FUNC(bp))
+
+#define BNX2X_MC_ASSERT_BITS \
+	(GENERAL_ATTEN_OFFSET(TSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+	 GENERAL_ATTEN_OFFSET(USTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+	 GENERAL_ATTEN_OFFSET(CSTORM_FATAL_ASSERT_ATTENTION_BIT) | \
+	 GENERAL_ATTEN_OFFSET(XSTORM_FATAL_ASSERT_ATTENTION_BIT))
+
+#define BNX2X_MCP_ASSERT \
+	GENERAL_ATTEN_OFFSET(MCP_FATAL_ASSERT_ATTENTION_BIT)
+
+#define BNX2X_GRC_TIMEOUT	GENERAL_ATTEN_OFFSET(LATCHED_ATTN_TIMEOUT_GRC)
+#define BNX2X_GRC_RSV		(GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCR) | \
+				 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCT) | \
+				 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCN) | \
+				 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCU) | \
+				 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RBCP) | \
+				 GENERAL_ATTEN_OFFSET(LATCHED_ATTN_RSVD_GRC))
+
+#define HW_INTERRUT_ASSERT_SET_0 \
+				(AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT)
+#define HW_PRTY_ASSERT_SET_0	(AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR)
+#define HW_INTERRUT_ASSERT_SET_1 \
+				(AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT)
+#define HW_PRTY_ASSERT_SET_1	(AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR |\
+			     AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR)
+#define HW_INTERRUT_ASSERT_SET_2 \
+				(AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT | \
+				 AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT | \
+			AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT |\
+				 AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT)
+#define HW_PRTY_ASSERT_SET_2	(AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR | \
+			AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR |\
+				 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
+				 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
+
+#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
+			      AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
+
+#define RSS_FLAGS(bp) \
+		(TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
+		 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
+		 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
+		 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
+		 (bp->multi_mode << \
+		  TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
+#define MULTI_MASK			0x7f
+
+
+#define DEF_USB_FUNC_OFF	offsetof(struct cstorm_def_status_block_u, func)
+#define DEF_CSB_FUNC_OFF	offsetof(struct cstorm_def_status_block_c, func)
+#define DEF_XSB_FUNC_OFF	offsetof(struct xstorm_def_status_block, func)
+#define DEF_TSB_FUNC_OFF	offsetof(struct tstorm_def_status_block, func)
+
+#define DEF_USB_IGU_INDEX_OFF \
+			offsetof(struct cstorm_def_status_block_u, igu_index)
+#define DEF_CSB_IGU_INDEX_OFF \
+			offsetof(struct cstorm_def_status_block_c, igu_index)
+#define DEF_XSB_IGU_INDEX_OFF \
+			offsetof(struct xstorm_def_status_block, igu_index)
+#define DEF_TSB_IGU_INDEX_OFF \
+			offsetof(struct tstorm_def_status_block, igu_index)
+
+#define DEF_USB_SEGMENT_OFF \
+			offsetof(struct cstorm_def_status_block_u, segment)
+#define DEF_CSB_SEGMENT_OFF \
+			offsetof(struct cstorm_def_status_block_c, segment)
+#define DEF_XSB_SEGMENT_OFF \
+			offsetof(struct xstorm_def_status_block, segment)
+#define DEF_TSB_SEGMENT_OFF \
+			offsetof(struct tstorm_def_status_block, segment)
+
+#define BNX2X_SP_DSB_INDEX \
+		(&bp->def_status_blk->sp_sb.\
+					index_values[HC_SP_INDEX_ETH_DEF_CONS])
+
+#define SET_FLAG(value, mask, flag) \
+	do {\
+		(value) &= ~(mask);\
+		(value) |= ((flag) << (mask##_SHIFT));\
+	} while (0)
+
+#define GET_FLAG(value, mask) \
+	(((value) & (mask)) >> (mask##_SHIFT))
+
+#define GET_FIELD(value, fname) \
+	(((value) & (fname##_MASK)) >> (fname##_SHIFT))
+
+#define CAM_IS_INVALID(x) \
+	(GET_FLAG(x.flags, \
+	MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
+	(T_ETH_MAC_COMMAND_INVALIDATE))
+
+/* Number of u32 elements in MC hash array */
+#define MC_HASH_SIZE			8
+#define MC_HASH_OFFSET(bp, i)		(BAR_TSTRORM_INTMEM + \
+	TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(BP_FUNC(bp)) + i*4)
+
+
+#ifndef PXP2_REG_PXP2_INT_STS
+#define PXP2_REG_PXP2_INT_STS		PXP2_REG_PXP2_INT_STS_0
+#endif
+
+#ifndef ETH_MAX_RX_CLIENTS_E2
+#define ETH_MAX_RX_CLIENTS_E2		ETH_MAX_RX_CLIENTS_E1H
+#endif
+
+#define BNX2X_VPD_LEN			128
+#define VENDOR_ID_LEN			4
+
+/* Congestion management fairness mode */
+#define CMNG_FNS_NONE		0
+#define CMNG_FNS_MINMAX		1
+
+#define HC_SEG_ACCESS_DEF		0   /*Driver decision 0-3*/
+#define HC_SEG_ACCESS_ATTN		4
+#define HC_SEG_ACCESS_NORM		0   /*Driver decision 0-1*/
+
+static const u32 dmae_reg_go_c[] = {
+	DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
+	DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
+	DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
+	DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
+};
+
+void bnx2x_set_ethtool_ops(struct net_device *netdev);
+void bnx2x_notify_link_changed(struct bnx2x *bp);
+#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
new file mode 100644
index 0000000..d724a18
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -0,0 +1,3571 @@
+/* bnx2x_cmn.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <linux/firmware.h>
+#include <linux/prefetch.h>
+#include "bnx2x_cmn.h"
+#include "bnx2x_init.h"
+#include "bnx2x_sp.h"
+
+
+
+/**
+ * bnx2x_bz_fp - zero content of the fastpath structure.
+ *
+ * @bp:		driver handle
+ * @index:	fastpath index to be zeroed
+ *
+ * Makes sure the contents of the bp->fp[index].napi is kept
+ * intact.
+ */
+static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
+{
+	struct bnx2x_fastpath *fp = &bp->fp[index];
+	struct napi_struct orig_napi = fp->napi;
+	/* bzero bnx2x_fastpath contents */
+	memset(fp, 0, sizeof(*fp));
+
+	/* Restore the NAPI object as it has been already initialized */
+	fp->napi = orig_napi;
+
+	fp->bp = bp;
+	fp->index = index;
+	if (IS_ETH_FP(fp))
+		fp->max_cos = bp->max_cos;
+	else
+		/* Special queues support only one CoS */
+		fp->max_cos = 1;
+
+	/*
+	 * set the tpa flag for each queue. The tpa flag determines the queue
+	 * minimal size so it must be set prior to queue memory allocation
+	 */
+	fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
+
+#ifdef BCM_CNIC
+	/* We don't want TPA on FCoE, FWD and OOO L2 rings */
+	bnx2x_fcoe(bp, disable_tpa) = 1;
+#endif
+}
+
+/**
+ * bnx2x_move_fp - move content of the fastpath structure.
+ *
+ * @bp:		driver handle
+ * @from:	source FP index
+ * @to:		destination FP index
+ *
+ * Makes sure the contents of the bp->fp[to].napi is kept
+ * intact.
+ */
+static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
+{
+	struct bnx2x_fastpath *from_fp = &bp->fp[from];
+	struct bnx2x_fastpath *to_fp = &bp->fp[to];
+	struct napi_struct orig_napi = to_fp->napi;
+	/* Move bnx2x_fastpath contents */
+	memcpy(to_fp, from_fp, sizeof(*to_fp));
+	to_fp->index = to;
+
+	/* Restore the NAPI object as it has been already initialized */
+	to_fp->napi = orig_napi;
+}
+
+int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
+
+/* free skb in the packet ring at pos idx
+ * return idx of last bd freed
+ */
+static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
+			     u16 idx)
+{
+	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
+	struct eth_tx_start_bd *tx_start_bd;
+	struct eth_tx_bd *tx_data_bd;
+	struct sk_buff *skb = tx_buf->skb;
+	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
+	int nbd;
+
+	/* prefetch skb end pointer to speedup dev_kfree_skb() */
+	prefetch(&skb->end);
+
+	DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
+	   txdata->txq_index, idx, tx_buf, skb);
+
+	/* unmap first bd */
+	DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
+	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
+	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
+			 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
+
+
+	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
+#ifdef BNX2X_STOP_ON_ERROR
+	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
+		BNX2X_ERR("BAD nbd!\n");
+		bnx2x_panic();
+	}
+#endif
+	new_cons = nbd + tx_buf->first_bd;
+
+	/* Get the next bd */
+	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+	/* Skip a parse bd... */
+	--nbd;
+	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+
+	/* ...and the TSO split header bd since they have no mapping */
+	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
+		--nbd;
+		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+	}
+
+	/* now free frags */
+	while (nbd > 0) {
+
+		DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
+		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
+		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
+			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
+		if (--nbd)
+			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+	}
+
+	/* release skb */
+	WARN_ON(!skb);
+	dev_kfree_skb_any(skb);
+	tx_buf->first_bd = 0;
+	tx_buf->skb = NULL;
+
+	return new_cons;
+}
+
+int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
+{
+	struct netdev_queue *txq;
+	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return -1;
+#endif
+
+	txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
+	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
+	sw_cons = txdata->tx_pkt_cons;
+
+	while (sw_cons != hw_cons) {
+		u16 pkt_cons;
+
+		pkt_cons = TX_BD(sw_cons);
+
+		DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u  sw_cons %u "
+				      " pkt_cons %u\n",
+		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
+
+		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
+		sw_cons++;
+	}
+
+	txdata->tx_pkt_cons = sw_cons;
+	txdata->tx_bd_cons = bd_cons;
+
+	/* Need to make the tx_bd_cons update visible to start_xmit()
+	 * before checking for netif_tx_queue_stopped().  Without the
+	 * memory barrier, there is a small possibility that
+	 * start_xmit() will miss it and cause the queue to be stopped
+	 * forever.
+	 * On the other hand we need an rmb() here to ensure the proper
+	 * ordering of bit testing in the following
+	 * netif_tx_queue_stopped(txq) call.
+	 */
+	smp_mb();
+
+	if (unlikely(netif_tx_queue_stopped(txq))) {
+		/* Taking tx_lock() is needed to prevent reenabling the queue
+		 * while it's empty. This could have happen if rx_action() gets
+		 * suspended in bnx2x_tx_int() after the condition before
+		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
+		 *
+		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
+		 * sends some packets consuming the whole queue again->
+		 * stops the queue
+		 */
+
+		__netif_tx_lock(txq, smp_processor_id());
+
+		if ((netif_tx_queue_stopped(txq)) &&
+		    (bp->state == BNX2X_STATE_OPEN) &&
+		    (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
+			netif_tx_wake_queue(txq);
+
+		__netif_tx_unlock(txq);
+	}
+	return 0;
+}
+
+static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
+					     u16 idx)
+{
+	u16 last_max = fp->last_max_sge;
+
+	if (SUB_S16(idx, last_max) > 0)
+		fp->last_max_sge = idx;
+}
+
+static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
+				  struct eth_fast_path_rx_cqe *fp_cqe)
+{
+	struct bnx2x *bp = fp->bp;
+	u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
+				     le16_to_cpu(fp_cqe->len_on_bd)) >>
+		      SGE_PAGE_SHIFT;
+	u16 last_max, last_elem, first_elem;
+	u16 delta = 0;
+	u16 i;
+
+	if (!sge_len)
+		return;
+
+	/* First mark all used pages */
+	for (i = 0; i < sge_len; i++)
+		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
+			RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
+
+	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
+	   sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+
+	/* Here we assume that the last SGE index is the biggest */
+	prefetch((void *)(fp->sge_mask));
+	bnx2x_update_last_max_sge(fp,
+		le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+
+	last_max = RX_SGE(fp->last_max_sge);
+	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
+	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
+
+	/* If ring is not full */
+	if (last_elem + 1 != first_elem)
+		last_elem++;
+
+	/* Now update the prod */
+	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
+		if (likely(fp->sge_mask[i]))
+			break;
+
+		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
+		delta += BIT_VEC64_ELEM_SZ;
+	}
+
+	if (delta > 0) {
+		fp->rx_sge_prod += delta;
+		/* clear page-end entries */
+		bnx2x_clear_sge_mask_next_elems(fp);
+	}
+
+	DP(NETIF_MSG_RX_STATUS,
+	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
+	   fp->last_max_sge, fp->rx_sge_prod);
+}
+
+static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
+			    struct sk_buff *skb, u16 cons, u16 prod,
+			    struct eth_fast_path_rx_cqe *cqe)
+{
+	struct bnx2x *bp = fp->bp;
+	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
+	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
+	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+	dma_addr_t mapping;
+	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
+
+	/* print error if current state != stop */
+	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
+		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
+
+	/* Try to map an empty skb from the aggregation info  */
+	mapping = dma_map_single(&bp->pdev->dev,
+				 first_buf->skb->data,
+				 fp->rx_buf_size, DMA_FROM_DEVICE);
+	/*
+	 *  ...if it fails - move the skb from the consumer to the producer
+	 *  and set the current aggregation state as ERROR to drop it
+	 *  when TPA_STOP arrives.
+	 */
+
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		/* Move the BD from the consumer to the producer */
+		bnx2x_reuse_rx_skb(fp, cons, prod);
+		tpa_info->tpa_state = BNX2X_TPA_ERROR;
+		return;
+	}
+
+	/* move empty skb from pool to prod */
+	prod_rx_buf->skb = first_buf->skb;
+	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
+	/* point prod_bd to new skb */
+	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+	/* move partial skb from cons to pool (don't unmap yet) */
+	*first_buf = *cons_rx_buf;
+
+	/* mark bin state as START */
+	tpa_info->parsing_flags =
+		le16_to_cpu(cqe->pars_flags.flags);
+	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+	tpa_info->tpa_state = BNX2X_TPA_START;
+	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
+	tpa_info->placement_offset = cqe->placement_offset;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	fp->tpa_queue_used |= (1 << queue);
+#ifdef _ASM_GENERIC_INT_L64_H
+	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
+#else
+	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
+#endif
+	   fp->tpa_queue_used);
+#endif
+}
+
+/* Timestamp option length allowed for TPA aggregation:
+ *
+ *		nop nop kind length echo val
+ */
+#define TPA_TSTAMP_OPT_LEN	12
+/**
+ * bnx2x_set_lro_mss - calculate the approximate value of the MSS
+ *
+ * @bp:			driver handle
+ * @parsing_flags:	parsing flags from the START CQE
+ * @len_on_bd:		total length of the first packet for the
+ *			aggregation.
+ *
+ * Approximate value of the MSS for this aggregation calculated using
+ * the first packet of it.
+ */
+static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
+				    u16 len_on_bd)
+{
+	/*
+	 * TPA arrgregation won't have either IP options or TCP options
+	 * other than timestamp or IPv6 extension headers.
+	 */
+	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
+
+	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
+	    PRS_FLAG_OVERETH_IPV6)
+		hdrs_len += sizeof(struct ipv6hdr);
+	else /* IPv4 */
+		hdrs_len += sizeof(struct iphdr);
+
+
+	/* Check if there was a TCP timestamp, if there is it's will
+	 * always be 12 bytes length: nop nop kind length echo val.
+	 *
+	 * Otherwise FW would close the aggregation.
+	 */
+	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
+		hdrs_len += TPA_TSTAMP_OPT_LEN;
+
+	return len_on_bd - hdrs_len;
+}
+
+static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			       u16 queue, struct sk_buff *skb,
+			       struct eth_end_agg_rx_cqe *cqe,
+			       u16 cqe_idx)
+{
+	struct sw_rx_page *rx_pg, old_rx_pg;
+	u32 i, frag_len, frag_size, pages;
+	int err;
+	int j;
+	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+	u16 len_on_bd = tpa_info->len_on_bd;
+
+	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
+	pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
+
+	/* This is needed in order to enable forwarding support */
+	if (frag_size)
+		skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
+					tpa_info->parsing_flags, len_on_bd);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
+		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
+			  pages, cqe_idx);
+		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
+		bnx2x_panic();
+		return -EINVAL;
+	}
+#endif
+
+	/* Run through the SGL and compose the fragmented skb */
+	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
+		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
+
+		/* FW gives the indices of the SGE as if the ring is an array
+		   (meaning that "next" element will consume 2 indices) */
+		frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
+		rx_pg = &fp->rx_page_ring[sge_idx];
+		old_rx_pg = *rx_pg;
+
+		/* If we fail to allocate a substitute page, we simply stop
+		   where we are and drop the whole packet */
+		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
+		if (unlikely(err)) {
+			fp->eth_q_stats.rx_skb_alloc_failed++;
+			return err;
+		}
+
+		/* Unmap the page as we r going to pass it to the stack */
+		dma_unmap_page(&bp->pdev->dev,
+			       dma_unmap_addr(&old_rx_pg, mapping),
+			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+
+		/* Add one frag and update the appropriate fields in the skb */
+		skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+
+		skb->data_len += frag_len;
+		skb->truesize += frag_len;
+		skb->len += frag_len;
+
+		frag_size -= frag_len;
+	}
+
+	return 0;
+}
+
+static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			   u16 queue, struct eth_end_agg_rx_cqe *cqe,
+			   u16 cqe_idx)
+{
+	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
+	u8 pad = tpa_info->placement_offset;
+	u16 len = tpa_info->len_on_bd;
+	struct sk_buff *skb = rx_buf->skb;
+	/* alloc new skb */
+	struct sk_buff *new_skb;
+	u8 old_tpa_state = tpa_info->tpa_state;
+
+	tpa_info->tpa_state = BNX2X_TPA_STOP;
+
+	/* If we there was an error during the handling of the TPA_START -
+	 * drop this aggregation.
+	 */
+	if (old_tpa_state == BNX2X_TPA_ERROR)
+		goto drop;
+
+	/* Try to allocate the new skb */
+	new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
+
+	/* Unmap skb in the pool anyway, as we are going to change
+	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
+	   fails. */
+	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
+			 fp->rx_buf_size, DMA_FROM_DEVICE);
+
+	if (likely(new_skb)) {
+		prefetch(skb);
+		prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+
+#ifdef BNX2X_STOP_ON_ERROR
+		if (pad + len > fp->rx_buf_size) {
+			BNX2X_ERR("skb_put is about to fail...  "
+				  "pad %d  len %d  rx_buf_size %d\n",
+				  pad, len, fp->rx_buf_size);
+			bnx2x_panic();
+			return;
+		}
+#endif
+
+		skb_reserve(skb, pad);
+		skb_put(skb, len);
+
+		skb->protocol = eth_type_trans(skb, bp->dev);
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+		if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
+			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
+				__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
+			napi_gro_receive(&fp->napi, skb);
+		} else {
+			DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
+			   " - dropping packet!\n");
+			dev_kfree_skb_any(skb);
+		}
+
+
+		/* put new skb in bin */
+		rx_buf->skb = new_skb;
+
+		return;
+	}
+
+drop:
+	/* drop the packet and keep the buffer in the bin */
+	DP(NETIF_MSG_RX_STATUS,
+	   "Failed to allocate or map a new skb - dropping packet!\n");
+	fp->eth_q_stats.rx_skb_alloc_failed++;
+}
+
+/* Set Toeplitz hash value in the skb using the value from the
+ * CQE (calculated by HW).
+ */
+static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
+					struct sk_buff *skb)
+{
+	/* Set Toeplitz hash from CQE */
+	if ((bp->dev->features & NETIF_F_RXHASH) &&
+	    (cqe->fast_path_cqe.status_flags &
+	     ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+		skb->rxhash =
+		le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
+}
+
+int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
+{
+	struct bnx2x *bp = fp->bp;
+	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
+	u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
+	int rx_pkt = 0;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return 0;
+#endif
+
+	/* CQ "next element" is of the size of the regular element,
+	   that's why it's ok here */
+	hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
+	if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
+		hw_comp_cons++;
+
+	bd_cons = fp->rx_bd_cons;
+	bd_prod = fp->rx_bd_prod;
+	bd_prod_fw = bd_prod;
+	sw_comp_cons = fp->rx_comp_cons;
+	sw_comp_prod = fp->rx_comp_prod;
+
+	/* Memory barrier necessary as speculative reads of the rx
+	 * buffer can be ahead of the index in the status block
+	 */
+	rmb();
+
+	DP(NETIF_MSG_RX_STATUS,
+	   "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
+	   fp->index, hw_comp_cons, sw_comp_cons);
+
+	while (sw_comp_cons != hw_comp_cons) {
+		struct sw_rx_bd *rx_buf = NULL;
+		struct sk_buff *skb;
+		union eth_rx_cqe *cqe;
+		struct eth_fast_path_rx_cqe *cqe_fp;
+		u8 cqe_fp_flags;
+		enum eth_rx_cqe_type cqe_fp_type;
+		u16 len, pad;
+
+#ifdef BNX2X_STOP_ON_ERROR
+		if (unlikely(bp->panic))
+			return 0;
+#endif
+
+		comp_ring_cons = RCQ_BD(sw_comp_cons);
+		bd_prod = RX_BD(bd_prod);
+		bd_cons = RX_BD(bd_cons);
+
+		/* Prefetch the page containing the BD descriptor
+		   at producer's index. It will be needed when new skb is
+		   allocated */
+		prefetch((void *)(PAGE_ALIGN((unsigned long)
+					     (&fp->rx_desc_ring[bd_prod])) -
+				  PAGE_SIZE + 1));
+
+		cqe = &fp->rx_comp_ring[comp_ring_cons];
+		cqe_fp = &cqe->fast_path_cqe;
+		cqe_fp_flags = cqe_fp->type_error_flags;
+		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
+
+		DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
+		   "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
+		   cqe_fp_flags, cqe_fp->status_flags,
+		   le32_to_cpu(cqe_fp->rss_hash_result),
+		   le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
+
+		/* is this a slowpath msg? */
+		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
+			bnx2x_sp_event(fp, cqe);
+			goto next_cqe;
+
+		/* this is an rx packet */
+		} else {
+			rx_buf = &fp->rx_buf_ring[bd_cons];
+			skb = rx_buf->skb;
+			prefetch(skb);
+
+			if (!CQE_TYPE_FAST(cqe_fp_type)) {
+#ifdef BNX2X_STOP_ON_ERROR
+				/* sanity check */
+				if (fp->disable_tpa &&
+				    (CQE_TYPE_START(cqe_fp_type) ||
+				     CQE_TYPE_STOP(cqe_fp_type)))
+					BNX2X_ERR("START/STOP packet while "
+						  "disable_tpa type %x\n",
+						  CQE_TYPE(cqe_fp_type));
+#endif
+
+				if (CQE_TYPE_START(cqe_fp_type)) {
+					u16 queue = cqe_fp->queue_index;
+					DP(NETIF_MSG_RX_STATUS,
+					   "calling tpa_start on queue %d\n",
+					   queue);
+
+					bnx2x_tpa_start(fp, queue, skb,
+							bd_cons, bd_prod,
+							cqe_fp);
+
+					/* Set Toeplitz hash for LRO skb */
+					bnx2x_set_skb_rxhash(bp, cqe, skb);
+
+					goto next_rx;
+
+				} else {
+					u16 queue =
+						cqe->end_agg_cqe.queue_index;
+					DP(NETIF_MSG_RX_STATUS,
+					   "calling tpa_stop on queue %d\n",
+					   queue);
+
+					bnx2x_tpa_stop(bp, fp, queue,
+						       &cqe->end_agg_cqe,
+						       comp_ring_cons);
+#ifdef BNX2X_STOP_ON_ERROR
+					if (bp->panic)
+						return 0;
+#endif
+
+					bnx2x_update_sge_prod(fp, cqe_fp);
+					goto next_cqe;
+				}
+			}
+			/* non TPA */
+			len = le16_to_cpu(cqe_fp->pkt_len);
+			pad = cqe_fp->placement_offset;
+			dma_sync_single_for_cpu(&bp->pdev->dev,
+					dma_unmap_addr(rx_buf, mapping),
+						       pad + RX_COPY_THRESH,
+						       DMA_FROM_DEVICE);
+			prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+
+			/* is this an error packet? */
+			if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+				DP(NETIF_MSG_RX_ERR,
+				   "ERROR  flags %x  rx packet %u\n",
+				   cqe_fp_flags, sw_comp_cons);
+				fp->eth_q_stats.rx_err_discard_pkt++;
+				goto reuse_rx;
+			}
+
+			/* Since we don't have a jumbo ring
+			 * copy small packets if mtu > 1500
+			 */
+			if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
+			    (len <= RX_COPY_THRESH)) {
+				struct sk_buff *new_skb;
+
+				new_skb = netdev_alloc_skb(bp->dev, len + pad);
+				if (new_skb == NULL) {
+					DP(NETIF_MSG_RX_ERR,
+					   "ERROR  packet dropped "
+					   "because of alloc failure\n");
+					fp->eth_q_stats.rx_skb_alloc_failed++;
+					goto reuse_rx;
+				}
+
+				/* aligned copy */
+				skb_copy_from_linear_data_offset(skb, pad,
+						    new_skb->data + pad, len);
+				skb_reserve(new_skb, pad);
+				skb_put(new_skb, len);
+
+				bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
+
+				skb = new_skb;
+
+			} else
+			if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
+				dma_unmap_single(&bp->pdev->dev,
+					dma_unmap_addr(rx_buf, mapping),
+						 fp->rx_buf_size,
+						 DMA_FROM_DEVICE);
+				skb_reserve(skb, pad);
+				skb_put(skb, len);
+
+			} else {
+				DP(NETIF_MSG_RX_ERR,
+				   "ERROR  packet dropped because "
+				   "of alloc failure\n");
+				fp->eth_q_stats.rx_skb_alloc_failed++;
+reuse_rx:
+				bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
+				goto next_rx;
+			}
+
+			skb->protocol = eth_type_trans(skb, bp->dev);
+
+			/* Set Toeplitz hash for a none-LRO skb */
+			bnx2x_set_skb_rxhash(bp, cqe, skb);
+
+			skb_checksum_none_assert(skb);
+
+			if (bp->dev->features & NETIF_F_RXCSUM) {
+
+				if (likely(BNX2X_RX_CSUM_OK(cqe)))
+					skb->ip_summed = CHECKSUM_UNNECESSARY;
+				else
+					fp->eth_q_stats.hw_csum_err++;
+			}
+		}
+
+		skb_record_rx_queue(skb, fp->index);
+
+		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
+		    PARSING_FLAGS_VLAN)
+			__vlan_hwaccel_put_tag(skb,
+					       le16_to_cpu(cqe_fp->vlan_tag));
+		napi_gro_receive(&fp->napi, skb);
+
+
+next_rx:
+		rx_buf->skb = NULL;
+
+		bd_cons = NEXT_RX_IDX(bd_cons);
+		bd_prod = NEXT_RX_IDX(bd_prod);
+		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
+		rx_pkt++;
+next_cqe:
+		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
+		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
+
+		if (rx_pkt == budget)
+			break;
+	} /* while */
+
+	fp->rx_bd_cons = bd_cons;
+	fp->rx_bd_prod = bd_prod_fw;
+	fp->rx_comp_cons = sw_comp_cons;
+	fp->rx_comp_prod = sw_comp_prod;
+
+	/* Update producers */
+	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
+			     fp->rx_sge_prod);
+
+	fp->rx_pkt += rx_pkt;
+	fp->rx_calls++;
+
+	return rx_pkt;
+}
+
+static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
+{
+	struct bnx2x_fastpath *fp = fp_cookie;
+	struct bnx2x *bp = fp->bp;
+	u8 cos;
+
+	DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
+			 "[fp %d fw_sd %d igusb %d]\n",
+	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
+	bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return IRQ_HANDLED;
+#endif
+
+	/* Handle Rx and Tx according to MSI-X vector */
+	prefetch(fp->rx_cons_sb);
+
+	for_each_cos_in_tx_queue(fp, cos)
+		prefetch(fp->txdata[cos].tx_cons_sb);
+
+	prefetch(&fp->sb_running_index[SM_RX_ID]);
+	napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+
+	return IRQ_HANDLED;
+}
+
+/* HW Lock for shared dual port PHYs */
+void bnx2x_acquire_phy_lock(struct bnx2x *bp)
+{
+	mutex_lock(&bp->port.phy_mutex);
+
+	if (bp->port.need_hw_lock)
+		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+}
+
+void bnx2x_release_phy_lock(struct bnx2x *bp)
+{
+	if (bp->port.need_hw_lock)
+		bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
+
+	mutex_unlock(&bp->port.phy_mutex);
+}
+
+/* calculates MF speed according to current linespeed and MF configuration */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp)
+{
+	u16 line_speed = bp->link_vars.line_speed;
+	if (IS_MF(bp)) {
+		u16 maxCfg = bnx2x_extract_max_cfg(bp,
+						   bp->mf_config[BP_VN(bp)]);
+
+		/* Calculate the current MAX line speed limit for the MF
+		 * devices
+		 */
+		if (IS_MF_SI(bp))
+			line_speed = (line_speed * maxCfg) / 100;
+		else { /* SD mode */
+			u16 vn_max_rate = maxCfg * 100;
+
+			if (vn_max_rate < line_speed)
+				line_speed = vn_max_rate;
+		}
+	}
+
+	return line_speed;
+}
+
+/**
+ * bnx2x_fill_report_data - fill link report data to report
+ *
+ * @bp:		driver handle
+ * @data:	link state to update
+ *
+ * It uses a none-atomic bit operations because is called under the mutex.
+ */
+static inline void bnx2x_fill_report_data(struct bnx2x *bp,
+					  struct bnx2x_link_report_data *data)
+{
+	u16 line_speed = bnx2x_get_mf_speed(bp);
+
+	memset(data, 0, sizeof(*data));
+
+	/* Fill the report data: efective line speed */
+	data->line_speed = line_speed;
+
+	/* Link is down */
+	if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
+		__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+			  &data->link_report_flags);
+
+	/* Full DUPLEX */
+	if (bp->link_vars.duplex == DUPLEX_FULL)
+		__set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
+
+	/* Rx Flow Control is ON */
+	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
+		__set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
+
+	/* Tx Flow Control is ON */
+	if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+		__set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
+}
+
+/**
+ * bnx2x_link_report - report link status to OS.
+ *
+ * @bp:		driver handle
+ *
+ * Calls the __bnx2x_link_report() under the same locking scheme
+ * as a link/PHY state managing code to ensure a consistent link
+ * reporting.
+ */
+
+void bnx2x_link_report(struct bnx2x *bp)
+{
+	bnx2x_acquire_phy_lock(bp);
+	__bnx2x_link_report(bp);
+	bnx2x_release_phy_lock(bp);
+}
+
+/**
+ * __bnx2x_link_report - report link status to OS.
+ *
+ * @bp:		driver handle
+ *
+ * None atomic inmlementation.
+ * Should be called under the phy_lock.
+ */
+void __bnx2x_link_report(struct bnx2x *bp)
+{
+	struct bnx2x_link_report_data cur_data;
+
+	/* reread mf_cfg */
+	if (!CHIP_IS_E1(bp))
+		bnx2x_read_mf_cfg(bp);
+
+	/* Read the current link report info */
+	bnx2x_fill_report_data(bp, &cur_data);
+
+	/* Don't report link down or exactly the same link status twice */
+	if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
+	    (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+		      &bp->last_reported_link.link_report_flags) &&
+	     test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+		      &cur_data.link_report_flags)))
+		return;
+
+	bp->link_cnt++;
+
+	/* We are going to report a new link parameters now -
+	 * remember the current data for the next time.
+	 */
+	memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
+
+	if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+		     &cur_data.link_report_flags)) {
+		netif_carrier_off(bp->dev);
+		netdev_err(bp->dev, "NIC Link is Down\n");
+		return;
+	} else {
+		netif_carrier_on(bp->dev);
+		netdev_info(bp->dev, "NIC Link is Up, ");
+		pr_cont("%d Mbps ", cur_data.line_speed);
+
+		if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
+				       &cur_data.link_report_flags))
+			pr_cont("full duplex");
+		else
+			pr_cont("half duplex");
+
+		/* Handle the FC at the end so that only these flags would be
+		 * possibly set. This way we may easily check if there is no FC
+		 * enabled.
+		 */
+		if (cur_data.link_report_flags) {
+			if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+				     &cur_data.link_report_flags)) {
+				pr_cont(", receive ");
+				if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+				     &cur_data.link_report_flags))
+					pr_cont("& transmit ");
+			} else {
+				pr_cont(", transmit ");
+			}
+			pr_cont("flow control ON");
+		}
+		pr_cont("\n");
+	}
+}
+
+void bnx2x_init_rx_rings(struct bnx2x *bp)
+{
+	int func = BP_FUNC(bp);
+	int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
+					      ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
+	u16 ring_prod;
+	int i, j;
+
+	/* Allocate TPA resources */
+	for_each_rx_queue(bp, j) {
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+
+		DP(NETIF_MSG_IFUP,
+		   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
+
+		if (!fp->disable_tpa) {
+			/* Fill the per-aggregtion pool */
+			for (i = 0; i < max_agg_queues; i++) {
+				struct bnx2x_agg_info *tpa_info =
+					&fp->tpa_info[i];
+				struct sw_rx_bd *first_buf =
+					&tpa_info->first_buf;
+
+				first_buf->skb = netdev_alloc_skb(bp->dev,
+						       fp->rx_buf_size);
+				if (!first_buf->skb) {
+					BNX2X_ERR("Failed to allocate TPA "
+						  "skb pool for queue[%d] - "
+						  "disabling TPA on this "
+						  "queue!\n", j);
+					bnx2x_free_tpa_pool(bp, fp, i);
+					fp->disable_tpa = 1;
+					break;
+				}
+				dma_unmap_addr_set(first_buf, mapping, 0);
+				tpa_info->tpa_state = BNX2X_TPA_STOP;
+			}
+
+			/* "next page" elements initialization */
+			bnx2x_set_next_page_sgl(fp);
+
+			/* set SGEs bit mask */
+			bnx2x_init_sge_ring_bit_mask(fp);
+
+			/* Allocate SGEs and initialize the ring elements */
+			for (i = 0, ring_prod = 0;
+			     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
+
+				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
+					BNX2X_ERR("was only able to allocate "
+						  "%d rx sges\n", i);
+					BNX2X_ERR("disabling TPA for "
+						  "queue[%d]\n", j);
+					/* Cleanup already allocated elements */
+					bnx2x_free_rx_sge_range(bp, fp,
+								ring_prod);
+					bnx2x_free_tpa_pool(bp, fp,
+							    max_agg_queues);
+					fp->disable_tpa = 1;
+					ring_prod = 0;
+					break;
+				}
+				ring_prod = NEXT_SGE_IDX(ring_prod);
+			}
+
+			fp->rx_sge_prod = ring_prod;
+		}
+	}
+
+	for_each_rx_queue(bp, j) {
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+
+		fp->rx_bd_cons = 0;
+
+		/* Activate BD ring */
+		/* Warning!
+		 * this will generate an interrupt (to the TSTORM)
+		 * must only be done after chip is initialized
+		 */
+		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
+				     fp->rx_sge_prod);
+
+		if (j != 0)
+			continue;
+
+		if (CHIP_IS_E1(bp)) {
+			REG_WR(bp, BAR_USTRORM_INTMEM +
+			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
+			       U64_LO(fp->rx_comp_mapping));
+			REG_WR(bp, BAR_USTRORM_INTMEM +
+			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
+			       U64_HI(fp->rx_comp_mapping));
+		}
+	}
+}
+
+static void bnx2x_free_tx_skbs(struct bnx2x *bp)
+{
+	int i;
+	u8 cos;
+
+	for_each_tx_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		for_each_cos_in_tx_queue(fp, cos) {
+			struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+			u16 bd_cons = txdata->tx_bd_cons;
+			u16 sw_prod = txdata->tx_pkt_prod;
+			u16 sw_cons = txdata->tx_pkt_cons;
+
+			while (sw_cons != sw_prod) {
+				bd_cons = bnx2x_free_tx_pkt(bp, txdata,
+							    TX_BD(sw_cons));
+				sw_cons++;
+			}
+		}
+	}
+}
+
+static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
+{
+	struct bnx2x *bp = fp->bp;
+	int i;
+
+	/* ring wasn't allocated */
+	if (fp->rx_buf_ring == NULL)
+		return;
+
+	for (i = 0; i < NUM_RX_BD; i++) {
+		struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
+		struct sk_buff *skb = rx_buf->skb;
+
+		if (skb == NULL)
+			continue;
+		dma_unmap_single(&bp->pdev->dev,
+				 dma_unmap_addr(rx_buf, mapping),
+				 fp->rx_buf_size, DMA_FROM_DEVICE);
+
+		rx_buf->skb = NULL;
+		dev_kfree_skb(skb);
+	}
+}
+
+static void bnx2x_free_rx_skbs(struct bnx2x *bp)
+{
+	int j;
+
+	for_each_rx_queue(bp, j) {
+		struct bnx2x_fastpath *fp = &bp->fp[j];
+
+		bnx2x_free_rx_bds(fp);
+
+		if (!fp->disable_tpa)
+			bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
+					    ETH_MAX_AGGREGATION_QUEUES_E1 :
+					    ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
+	}
+}
+
+void bnx2x_free_skbs(struct bnx2x *bp)
+{
+	bnx2x_free_tx_skbs(bp);
+	bnx2x_free_rx_skbs(bp);
+}
+
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
+{
+	/* load old values */
+	u32 mf_cfg = bp->mf_config[BP_VN(bp)];
+
+	if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
+		/* leave all but MAX value */
+		mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
+
+		/* set new MAX value */
+		mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
+				& FUNC_MF_CFG_MAX_BW_MASK;
+
+		bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
+	}
+}
+
+/**
+ * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
+ *
+ * @bp:		driver handle
+ * @nvecs:	number of vectors to be released
+ */
+static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
+{
+	int i, offset = 0;
+
+	if (nvecs == offset)
+		return;
+	free_irq(bp->msix_table[offset].vector, bp->dev);
+	DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
+	   bp->msix_table[offset].vector);
+	offset++;
+#ifdef BCM_CNIC
+	if (nvecs == offset)
+		return;
+	offset++;
+#endif
+
+	for_each_eth_queue(bp, i) {
+		if (nvecs == offset)
+			return;
+		DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
+		   "irq\n", i, bp->msix_table[offset].vector);
+
+		free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
+	}
+}
+
+void bnx2x_free_irq(struct bnx2x *bp)
+{
+	if (bp->flags & USING_MSIX_FLAG)
+		bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
+				     CNIC_PRESENT + 1);
+	else if (bp->flags & USING_MSI_FLAG)
+		free_irq(bp->pdev->irq, bp->dev);
+	else
+		free_irq(bp->pdev->irq, bp->dev);
+}
+
+int bnx2x_enable_msix(struct bnx2x *bp)
+{
+	int msix_vec = 0, i, rc, req_cnt;
+
+	bp->msix_table[msix_vec].entry = msix_vec;
+	DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
+	   bp->msix_table[0].entry);
+	msix_vec++;
+
+#ifdef BCM_CNIC
+	bp->msix_table[msix_vec].entry = msix_vec;
+	DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
+	   bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
+	msix_vec++;
+#endif
+	/* We need separate vectors for ETH queues only (not FCoE) */
+	for_each_eth_queue(bp, i) {
+		bp->msix_table[msix_vec].entry = msix_vec;
+		DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
+		   "(fastpath #%u)\n", msix_vec, msix_vec, i);
+		msix_vec++;
+	}
+
+	req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
+
+	rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
+
+	/*
+	 * reconfigure number of tx/rx queues according to available
+	 * MSI-X vectors
+	 */
+	if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
+		/* how less vectors we will have? */
+		int diff = req_cnt - rc;
+
+		DP(NETIF_MSG_IFUP,
+		   "Trying to use less MSI-X vectors: %d\n", rc);
+
+		rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
+
+		if (rc) {
+			DP(NETIF_MSG_IFUP,
+			   "MSI-X is not attainable  rc %d\n", rc);
+			return rc;
+		}
+		/*
+		 * decrease number of queues by number of unallocated entries
+		 */
+		bp->num_queues -= diff;
+
+		DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
+				  bp->num_queues);
+	} else if (rc) {
+		/* fall to INTx if not enough memory */
+		if (rc == -ENOMEM)
+			bp->flags |= DISABLE_MSI_FLAG;
+		DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
+		return rc;
+	}
+
+	bp->flags |= USING_MSIX_FLAG;
+
+	return 0;
+}
+
+static int bnx2x_req_msix_irqs(struct bnx2x *bp)
+{
+	int i, rc, offset = 0;
+
+	rc = request_irq(bp->msix_table[offset++].vector,
+			 bnx2x_msix_sp_int, 0,
+			 bp->dev->name, bp->dev);
+	if (rc) {
+		BNX2X_ERR("request sp irq failed\n");
+		return -EBUSY;
+	}
+
+#ifdef BCM_CNIC
+	offset++;
+#endif
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
+			 bp->dev->name, i);
+
+		rc = request_irq(bp->msix_table[offset].vector,
+				 bnx2x_msix_fp_int, 0, fp->name, fp);
+		if (rc) {
+			BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
+			      bp->msix_table[offset].vector, rc);
+			bnx2x_free_msix_irqs(bp, offset);
+			return -EBUSY;
+		}
+
+		offset++;
+	}
+
+	i = BNX2X_NUM_ETH_QUEUES(bp);
+	offset = 1 + CNIC_PRESENT;
+	netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
+	       " ... fp[%d] %d\n",
+	       bp->msix_table[0].vector,
+	       0, bp->msix_table[offset].vector,
+	       i - 1, bp->msix_table[offset + i - 1].vector);
+
+	return 0;
+}
+
+int bnx2x_enable_msi(struct bnx2x *bp)
+{
+	int rc;
+
+	rc = pci_enable_msi(bp->pdev);
+	if (rc) {
+		DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
+		return -1;
+	}
+	bp->flags |= USING_MSI_FLAG;
+
+	return 0;
+}
+
+static int bnx2x_req_irq(struct bnx2x *bp)
+{
+	unsigned long flags;
+	int rc;
+
+	if (bp->flags & USING_MSI_FLAG)
+		flags = 0;
+	else
+		flags = IRQF_SHARED;
+
+	rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
+			 bp->dev->name, bp->dev);
+	return rc;
+}
+
+static inline int bnx2x_setup_irqs(struct bnx2x *bp)
+{
+	int rc = 0;
+	if (bp->flags & USING_MSIX_FLAG) {
+		rc = bnx2x_req_msix_irqs(bp);
+		if (rc)
+			return rc;
+	} else {
+		bnx2x_ack_int(bp);
+		rc = bnx2x_req_irq(bp);
+		if (rc) {
+			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
+			return rc;
+		}
+		if (bp->flags & USING_MSI_FLAG) {
+			bp->dev->irq = bp->pdev->irq;
+			netdev_info(bp->dev, "using MSI  IRQ %d\n",
+			       bp->pdev->irq);
+		}
+	}
+
+	return 0;
+}
+
+static inline void bnx2x_napi_enable(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_rx_queue(bp, i)
+		napi_enable(&bnx2x_fp(bp, i, napi));
+}
+
+static inline void bnx2x_napi_disable(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_rx_queue(bp, i)
+		napi_disable(&bnx2x_fp(bp, i, napi));
+}
+
+void bnx2x_netif_start(struct bnx2x *bp)
+{
+	if (netif_running(bp->dev)) {
+		bnx2x_napi_enable(bp);
+		bnx2x_int_enable(bp);
+		if (bp->state == BNX2X_STATE_OPEN)
+			netif_tx_wake_all_queues(bp->dev);
+	}
+}
+
+void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
+{
+	bnx2x_int_disable_sync(bp, disable_hw);
+	bnx2x_napi_disable(bp);
+}
+
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+#ifdef BCM_CNIC
+	if (NO_FCOE(bp))
+		return skb_tx_hash(dev, skb);
+	else {
+		struct ethhdr *hdr = (struct ethhdr *)skb->data;
+		u16 ether_type = ntohs(hdr->h_proto);
+
+		/* Skip VLAN tag if present */
+		if (ether_type == ETH_P_8021Q) {
+			struct vlan_ethhdr *vhdr =
+				(struct vlan_ethhdr *)skb->data;
+
+			ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
+		}
+
+		/* If ethertype is FCoE or FIP - use FCoE ring */
+		if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
+			return bnx2x_fcoe_tx(bp, txq_index);
+	}
+#endif
+	/* Select a none-FCoE queue:  if FCoE is enabled, exclude FCoE L2 ring
+	 */
+	return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
+}
+
+void bnx2x_set_num_queues(struct bnx2x *bp)
+{
+	switch (bp->multi_mode) {
+	case ETH_RSS_MODE_DISABLED:
+		bp->num_queues = 1;
+		break;
+	case ETH_RSS_MODE_REGULAR:
+		bp->num_queues = bnx2x_calc_num_queues(bp);
+		break;
+
+	default:
+		bp->num_queues = 1;
+		break;
+	}
+
+	/* Add special queues */
+	bp->num_queues += NON_ETH_CONTEXT_USE;
+}
+
+static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
+{
+	int rc, tx, rx;
+
+	tx = MAX_TXQS_PER_COS * bp->max_cos;
+	rx = BNX2X_NUM_ETH_QUEUES(bp);
+
+/* account for fcoe queue */
+#ifdef BCM_CNIC
+	if (!NO_FCOE(bp)) {
+		rx += FCOE_PRESENT;
+		tx += FCOE_PRESENT;
+	}
+#endif
+
+	rc = netif_set_real_num_tx_queues(bp->dev, tx);
+	if (rc) {
+		BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
+		return rc;
+	}
+	rc = netif_set_real_num_rx_queues(bp->dev, rx);
+	if (rc) {
+		BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
+		return rc;
+	}
+
+	DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
+			  tx, rx);
+
+	return rc;
+}
+
+static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		/* Always use a mini-jumbo MTU for the FCoE L2 ring */
+		if (IS_FCOE_IDX(i))
+			/*
+			 * Although there are no IP frames expected to arrive to
+			 * this ring we still want to add an
+			 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
+			 * overrun attack.
+			 */
+			fp->rx_buf_size =
+				BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
+				BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+		else
+			fp->rx_buf_size =
+				bp->dev->mtu + ETH_OVREHEAD +
+				BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+	}
+}
+
+static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
+{
+	int i;
+	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+	u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
+
+	/*
+	 * Prepare the inital contents fo the indirection table if RSS is
+	 * enabled
+	 */
+	if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
+		for (i = 0; i < sizeof(ind_table); i++)
+			ind_table[i] =
+				bp->fp->cl_id +	(i % num_eth_queues);
+	}
+
+	/*
+	 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
+	 * per-port, so if explicit configuration is needed , do it only
+	 * for a PMF.
+	 *
+	 * For 57712 and newer on the other hand it's a per-function
+	 * configuration.
+	 */
+	return bnx2x_config_rss_pf(bp, ind_table,
+				   bp->port.pmf || !CHIP_IS_E1x(bp));
+}
+
+int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
+{
+	struct bnx2x_config_rss_params params = {0};
+	int i;
+
+	/* Although RSS is meaningless when there is a single HW queue we
+	 * still need it enabled in order to have HW Rx hash generated.
+	 *
+	 * if (!is_eth_multi(bp))
+	 *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
+	 */
+
+	params.rss_obj = &bp->rss_conf_obj;
+
+	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
+
+	/* RSS mode */
+	switch (bp->multi_mode) {
+	case ETH_RSS_MODE_DISABLED:
+		__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
+		break;
+	case ETH_RSS_MODE_REGULAR:
+		__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
+		break;
+	case ETH_RSS_MODE_VLAN_PRI:
+		__set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
+		break;
+	case ETH_RSS_MODE_E1HOV_PRI:
+		__set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
+		break;
+	case ETH_RSS_MODE_IP_DSCP:
+		__set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
+		break;
+	default:
+		BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
+		return -EINVAL;
+	}
+
+	/* If RSS is enabled */
+	if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
+		/* RSS configuration */
+		__set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
+		__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
+		__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
+		__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+
+		/* Hash bits */
+		params.rss_result_mask = MULTI_MASK;
+
+		memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
+
+		if (config_hash) {
+			/* RSS keys */
+			for (i = 0; i < sizeof(params.rss_key) / 4; i++)
+				params.rss_key[i] = random32();
+
+			__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
+		}
+	}
+
+	return bnx2x_config_rss(bp, &params);
+}
+
+static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
+{
+	struct bnx2x_func_state_params func_params = {0};
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_HW_INIT;
+
+	func_params.params.hw_init.load_phase = load_code;
+
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+/*
+ * Cleans the object that have internal lists without sending
+ * ramrods. Should be run when interrutps are disabled.
+ */
+static void bnx2x_squeeze_objects(struct bnx2x *bp)
+{
+	int rc;
+	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+	struct bnx2x_mcast_ramrod_params rparam = {0};
+	struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+
+	/***************** Cleanup MACs' object first *************************/
+
+	/* Wait for completion of requested */
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	/* Perform a dry cleanup */
+	__set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
+
+	/* Clean ETH primary MAC */
+	__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
+	rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
+				 &ramrod_flags);
+	if (rc != 0)
+		BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
+
+	/* Cleanup UC list */
+	vlan_mac_flags = 0;
+	__set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
+	rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
+				 &ramrod_flags);
+	if (rc != 0)
+		BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
+
+	/***************** Now clean mcast object *****************************/
+	rparam.mcast_obj = &bp->mcast_obj;
+	__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
+
+	/* Add a DEL command... */
+	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+	if (rc < 0)
+		BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
+			  "object: %d\n", rc);
+
+	/* ...and wait until all pending commands are cleared */
+	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+	while (rc != 0) {
+		if (rc < 0) {
+			BNX2X_ERR("Failed to clean multi-cast object: %d\n",
+				  rc);
+			return;
+		}
+
+		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+	}
+}
+
+#ifndef BNX2X_STOP_ON_ERROR
+#define LOAD_ERROR_EXIT(bp, label) \
+	do { \
+		(bp)->state = BNX2X_STATE_ERROR; \
+		goto label; \
+	} while (0)
+#else
+#define LOAD_ERROR_EXIT(bp, label) \
+	do { \
+		(bp)->state = BNX2X_STATE_ERROR; \
+		(bp)->panic = 1; \
+		return -EBUSY; \
+	} while (0)
+#endif
+
+/* must be called with rtnl_lock */
+int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
+{
+	int port = BP_PORT(bp);
+	u32 load_code;
+	int i, rc;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return -EPERM;
+#endif
+
+	bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
+
+	/* Set the initial link reported state to link down */
+	bnx2x_acquire_phy_lock(bp);
+	memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
+	__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+		&bp->last_reported_link.link_report_flags);
+	bnx2x_release_phy_lock(bp);
+
+	/* must be called before memory allocation and HW init */
+	bnx2x_ilt_set_info(bp);
+
+	/*
+	 * Zero fastpath structures preserving invariants like napi, which are
+	 * allocated only once, fp index, max_cos, bp pointer.
+	 * Also set fp->disable_tpa.
+	 */
+	for_each_queue(bp, i)
+		bnx2x_bz_fp(bp, i);
+
+
+	/* Set the receive queues buffer size */
+	bnx2x_set_rx_buf_size(bp);
+
+	if (bnx2x_alloc_mem(bp))
+		return -ENOMEM;
+
+	/* As long as bnx2x_alloc_mem() may possibly update
+	 * bp->num_queues, bnx2x_set_real_num_queues() should always
+	 * come after it.
+	 */
+	rc = bnx2x_set_real_num_queues(bp);
+	if (rc) {
+		BNX2X_ERR("Unable to set real_num_queues\n");
+		LOAD_ERROR_EXIT(bp, load_error0);
+	}
+
+	/* configure multi cos mappings in kernel.
+	 * this configuration may be overriden by a multi class queue discipline
+	 * or by a dcbx negotiation result.
+	 */
+	bnx2x_setup_tc(bp->dev, bp->max_cos);
+
+	bnx2x_napi_enable(bp);
+
+	/* Send LOAD_REQUEST command to MCP
+	 * Returns the type of LOAD command:
+	 * if it is the first port to be initialized
+	 * common blocks should be initialized, otherwise - not
+	 */
+	if (!BP_NOMCP(bp)) {
+		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
+		if (!load_code) {
+			BNX2X_ERR("MCP response failure, aborting\n");
+			rc = -EBUSY;
+			LOAD_ERROR_EXIT(bp, load_error1);
+		}
+		if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+			rc = -EBUSY; /* other port in diagnostic mode */
+			LOAD_ERROR_EXIT(bp, load_error1);
+		}
+
+	} else {
+		int path = BP_PATH(bp);
+
+		DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
+		   path, load_count[path][0], load_count[path][1],
+		   load_count[path][2]);
+		load_count[path][0]++;
+		load_count[path][1 + port]++;
+		DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
+		   path, load_count[path][0], load_count[path][1],
+		   load_count[path][2]);
+		if (load_count[path][0] == 1)
+			load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
+		else if (load_count[path][1 + port] == 1)
+			load_code = FW_MSG_CODE_DRV_LOAD_PORT;
+		else
+			load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
+	}
+
+	if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+	    (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
+	    (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
+		bp->port.pmf = 1;
+		/*
+		 * We need the barrier to ensure the ordering between the
+		 * writing to bp->port.pmf here and reading it from the
+		 * bnx2x_periodic_task().
+		 */
+		smp_mb();
+		queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+	} else
+		bp->port.pmf = 0;
+
+	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+
+	/* Init Function state controlling object */
+	bnx2x__init_func_obj(bp);
+
+	/* Initialize HW */
+	rc = bnx2x_init_hw(bp, load_code);
+	if (rc) {
+		BNX2X_ERR("HW init failed, aborting\n");
+		bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+		LOAD_ERROR_EXIT(bp, load_error2);
+	}
+
+	/* Connect to IRQs */
+	rc = bnx2x_setup_irqs(bp);
+	if (rc) {
+		bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+		LOAD_ERROR_EXIT(bp, load_error2);
+	}
+
+	/* Setup NIC internals and enable interrupts */
+	bnx2x_nic_init(bp, load_code);
+
+	/* Init per-function objects */
+	bnx2x_init_bp_objs(bp);
+
+	if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
+	    (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
+	    (bp->common.shmem2_base)) {
+		if (SHMEM2_HAS(bp, dcc_support))
+			SHMEM2_WR(bp, dcc_support,
+				  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
+				   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+	}
+
+	bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+	rc = bnx2x_func_start(bp);
+	if (rc) {
+		BNX2X_ERR("Function start failed!\n");
+		bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+		LOAD_ERROR_EXIT(bp, load_error3);
+	}
+
+	/* Send LOAD_DONE command to MCP */
+	if (!BP_NOMCP(bp)) {
+		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+		if (!load_code) {
+			BNX2X_ERR("MCP response failure, aborting\n");
+			rc = -EBUSY;
+			LOAD_ERROR_EXIT(bp, load_error3);
+		}
+	}
+
+	rc = bnx2x_setup_leading(bp);
+	if (rc) {
+		BNX2X_ERR("Setup leading failed!\n");
+		LOAD_ERROR_EXIT(bp, load_error3);
+	}
+
+#ifdef BCM_CNIC
+	/* Enable Timer scan */
+	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
+#endif
+
+	for_each_nondefault_queue(bp, i) {
+		rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
+		if (rc)
+			LOAD_ERROR_EXIT(bp, load_error4);
+	}
+
+	rc = bnx2x_init_rss_pf(bp);
+	if (rc)
+		LOAD_ERROR_EXIT(bp, load_error4);
+
+	/* Now when Clients are configured we are ready to work */
+	bp->state = BNX2X_STATE_OPEN;
+
+	/* Configure a ucast MAC */
+	rc = bnx2x_set_eth_mac(bp, true);
+	if (rc)
+		LOAD_ERROR_EXIT(bp, load_error4);
+
+	if (bp->pending_max) {
+		bnx2x_update_max_mf_config(bp, bp->pending_max);
+		bp->pending_max = 0;
+	}
+
+	if (bp->port.pmf)
+		bnx2x_initial_phy_init(bp, load_mode);
+
+	/* Start fast path */
+
+	/* Initialize Rx filter. */
+	netif_addr_lock_bh(bp->dev);
+	bnx2x_set_rx_mode(bp->dev);
+	netif_addr_unlock_bh(bp->dev);
+
+	/* Start the Tx */
+	switch (load_mode) {
+	case LOAD_NORMAL:
+		/* Tx queue should be only reenabled */
+		netif_tx_wake_all_queues(bp->dev);
+		break;
+
+	case LOAD_OPEN:
+		netif_tx_start_all_queues(bp->dev);
+		smp_mb__after_clear_bit();
+		break;
+
+	case LOAD_DIAG:
+		bp->state = BNX2X_STATE_DIAG;
+		break;
+
+	default:
+		break;
+	}
+
+	if (!bp->port.pmf)
+		bnx2x__link_status_update(bp);
+
+	/* start the timer */
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+
+#ifdef BCM_CNIC
+	bnx2x_setup_cnic_irq_info(bp);
+	if (bp->state == BNX2X_STATE_OPEN)
+		bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
+#endif
+	bnx2x_inc_load_cnt(bp);
+
+	/* Wait for all pending SP commands to complete */
+	if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
+		BNX2X_ERR("Timeout waiting for SP elements to complete\n");
+		bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+		return -EBUSY;
+	}
+
+	bnx2x_dcbx_init(bp);
+	return 0;
+
+#ifndef BNX2X_STOP_ON_ERROR
+load_error4:
+#ifdef BCM_CNIC
+	/* Disable Timer scan */
+	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+#endif
+load_error3:
+	bnx2x_int_disable_sync(bp, 1);
+
+	/* Clean queueable objects */
+	bnx2x_squeeze_objects(bp);
+
+	/* Free SKBs, SGEs, TPA pool and driver internals */
+	bnx2x_free_skbs(bp);
+	for_each_rx_queue(bp, i)
+		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+
+	/* Release IRQs */
+	bnx2x_free_irq(bp);
+load_error2:
+	if (!BP_NOMCP(bp)) {
+		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+	}
+
+	bp->port.pmf = 0;
+load_error1:
+	bnx2x_napi_disable(bp);
+load_error0:
+	bnx2x_free_mem(bp);
+
+	return rc;
+#endif /* ! BNX2X_STOP_ON_ERROR */
+}
+
+/* must be called with rtnl_lock */
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
+{
+	int i;
+	bool global = false;
+
+	if ((bp->state == BNX2X_STATE_CLOSED) ||
+	    (bp->state == BNX2X_STATE_ERROR)) {
+		/* We can get here if the driver has been unloaded
+		 * during parity error recovery and is either waiting for a
+		 * leader to complete or for other functions to unload and
+		 * then ifdown has been issued. In this case we want to
+		 * unload and let other functions to complete a recovery
+		 * process.
+		 */
+		bp->recovery_state = BNX2X_RECOVERY_DONE;
+		bp->is_leader = 0;
+		bnx2x_release_leader_lock(bp);
+		smp_mb();
+
+		DP(NETIF_MSG_HW, "Releasing a leadership...\n");
+
+		return -EINVAL;
+	}
+
+	/*
+	 * It's important to set the bp->state to the value different from
+	 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
+	 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
+	 */
+	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+	smp_mb();
+
+	/* Stop Tx */
+	bnx2x_tx_disable(bp);
+
+#ifdef BCM_CNIC
+	bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
+
+	bp->rx_mode = BNX2X_RX_MODE_NONE;
+
+	del_timer_sync(&bp->timer);
+
+	/* Set ALWAYS_ALIVE bit in shmem */
+	bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
+
+	bnx2x_drv_pulse(bp);
+
+	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+	/* Cleanup the chip if needed */
+	if (unload_mode != UNLOAD_RECOVERY)
+		bnx2x_chip_cleanup(bp, unload_mode);
+	else {
+		/* Send the UNLOAD_REQUEST to the MCP */
+		bnx2x_send_unload_req(bp, unload_mode);
+
+		/*
+		 * Prevent transactions to host from the functions on the
+		 * engine that doesn't reset global blocks in case of global
+		 * attention once gloabl blocks are reset and gates are opened
+		 * (the engine which leader will perform the recovery
+		 * last).
+		 */
+		if (!CHIP_IS_E1x(bp))
+			bnx2x_pf_disable(bp);
+
+		/* Disable HW interrupts, NAPI */
+		bnx2x_netif_stop(bp, 1);
+
+		/* Release IRQs */
+		bnx2x_free_irq(bp);
+
+		/* Report UNLOAD_DONE to MCP */
+		bnx2x_send_unload_done(bp);
+	}
+
+	/*
+	 * At this stage no more interrupts will arrive so we may safly clean
+	 * the queueable objects here in case they failed to get cleaned so far.
+	 */
+	bnx2x_squeeze_objects(bp);
+
+	/* There should be no more pending SP commands at this stage */
+	bp->sp_state = 0;
+
+	bp->port.pmf = 0;
+
+	/* Free SKBs, SGEs, TPA pool and driver internals */
+	bnx2x_free_skbs(bp);
+	for_each_rx_queue(bp, i)
+		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+
+	bnx2x_free_mem(bp);
+
+	bp->state = BNX2X_STATE_CLOSED;
+
+	/* Check if there are pending parity attentions. If there are - set
+	 * RECOVERY_IN_PROGRESS.
+	 */
+	if (bnx2x_chk_parity_attn(bp, &global, false)) {
+		bnx2x_set_reset_in_progress(bp);
+
+		/* Set RESET_IS_GLOBAL if needed */
+		if (global)
+			bnx2x_set_reset_global(bp);
+	}
+
+
+	/* The last driver must disable a "close the gate" if there is no
+	 * parity attention or "process kill" pending.
+	 */
+	if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
+		bnx2x_disable_close_the_gate(bp);
+
+	return 0;
+}
+
+int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
+{
+	u16 pmcsr;
+
+	/* If there is no power capability, silently succeed */
+	if (!bp->pm_cap) {
+		DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
+		return 0;
+	}
+
+	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+	switch (state) {
+	case PCI_D0:
+		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+				      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
+				       PCI_PM_CTRL_PME_STATUS));
+
+		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
+			/* delay required during transition out of D3hot */
+			msleep(20);
+		break;
+
+	case PCI_D3hot:
+		/* If there are other clients above don't
+		   shut down the power */
+		if (atomic_read(&bp->pdev->enable_cnt) != 1)
+			return 0;
+		/* Don't shut down the power for emulation and FPGA */
+		if (CHIP_REV_IS_SLOW(bp))
+			return 0;
+
+		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+		pmcsr |= 3;
+
+		if (bp->wol)
+			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
+
+		pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
+				      pmcsr);
+
+		/* No more memory access after this point until
+		* device is brought back to D0.
+		*/
+		break;
+
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*
+ * net_device service functions
+ */
+int bnx2x_poll(struct napi_struct *napi, int budget)
+{
+	int work_done = 0;
+	u8 cos;
+	struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
+						 napi);
+	struct bnx2x *bp = fp->bp;
+
+	while (1) {
+#ifdef BNX2X_STOP_ON_ERROR
+		if (unlikely(bp->panic)) {
+			napi_complete(napi);
+			return 0;
+		}
+#endif
+
+		for_each_cos_in_tx_queue(fp, cos)
+			if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
+				bnx2x_tx_int(bp, &fp->txdata[cos]);
+
+
+		if (bnx2x_has_rx_work(fp)) {
+			work_done += bnx2x_rx_int(fp, budget - work_done);
+
+			/* must not complete if we consumed full budget */
+			if (work_done >= budget)
+				break;
+		}
+
+		/* Fall out from the NAPI loop if needed */
+		if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+#ifdef BCM_CNIC
+			/* No need to update SB for FCoE L2 ring as long as
+			 * it's connected to the default SB and the SB
+			 * has been updated when NAPI was scheduled.
+			 */
+			if (IS_FCOE_FP(fp)) {
+				napi_complete(napi);
+				break;
+			}
+#endif
+
+			bnx2x_update_fpsb_idx(fp);
+			/* bnx2x_has_rx_work() reads the status block,
+			 * thus we need to ensure that status block indices
+			 * have been actually read (bnx2x_update_fpsb_idx)
+			 * prior to this check (bnx2x_has_rx_work) so that
+			 * we won't write the "newer" value of the status block
+			 * to IGU (if there was a DMA right after
+			 * bnx2x_has_rx_work and if there is no rmb, the memory
+			 * reading (bnx2x_update_fpsb_idx) may be postponed
+			 * to right before bnx2x_ack_sb). In this case there
+			 * will never be another interrupt until there is
+			 * another update of the status block, while there
+			 * is still unhandled work.
+			 */
+			rmb();
+
+			if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+				napi_complete(napi);
+				/* Re-enable interrupts */
+				DP(NETIF_MSG_HW,
+				   "Update index to %d\n", fp->fp_hc_idx);
+				bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
+					     le16_to_cpu(fp->fp_hc_idx),
+					     IGU_INT_ENABLE, 1);
+				break;
+			}
+		}
+	}
+
+	return work_done;
+}
+
+/* we split the first BD into headers and data BDs
+ * to ease the pain of our fellow microcode engineers
+ * we use one mapping for both BDs
+ * So far this has only been observed to happen
+ * in Other Operating Systems(TM)
+ */
+static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
+				   struct bnx2x_fp_txdata *txdata,
+				   struct sw_tx_bd *tx_buf,
+				   struct eth_tx_start_bd **tx_bd, u16 hlen,
+				   u16 bd_prod, int nbd)
+{
+	struct eth_tx_start_bd *h_tx_bd = *tx_bd;
+	struct eth_tx_bd *d_tx_bd;
+	dma_addr_t mapping;
+	int old_len = le16_to_cpu(h_tx_bd->nbytes);
+
+	/* first fix first BD */
+	h_tx_bd->nbd = cpu_to_le16(nbd);
+	h_tx_bd->nbytes = cpu_to_le16(hlen);
+
+	DP(NETIF_MSG_TX_QUEUED,	"TSO split header size is %d "
+	   "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
+	   h_tx_bd->addr_lo, h_tx_bd->nbd);
+
+	/* now get a new data BD
+	 * (after the pbd) and fill it */
+	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+	d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
+
+	mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
+			   le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
+
+	d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+	d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
+
+	/* this marks the BD as one that has no individual mapping */
+	tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
+
+	DP(NETIF_MSG_TX_QUEUED,
+	   "TSO split data size is %d (%x:%x)\n",
+	   d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
+
+	/* update tx_bd */
+	*tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
+
+	return bd_prod;
+}
+
+static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
+{
+	if (fix > 0)
+		csum = (u16) ~csum_fold(csum_sub(csum,
+				csum_partial(t_header - fix, fix, 0)));
+
+	else if (fix < 0)
+		csum = (u16) ~csum_fold(csum_add(csum,
+				csum_partial(t_header, -fix, 0)));
+
+	return swab16(csum);
+}
+
+static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
+{
+	u32 rc;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		rc = XMIT_PLAIN;
+
+	else {
+		if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
+			rc = XMIT_CSUM_V6;
+			if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+				rc |= XMIT_CSUM_TCP;
+
+		} else {
+			rc = XMIT_CSUM_V4;
+			if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+				rc |= XMIT_CSUM_TCP;
+		}
+	}
+
+	if (skb_is_gso_v6(skb))
+		rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
+	else if (skb_is_gso(skb))
+		rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
+
+	return rc;
+}
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+/* check if packet requires linearization (packet is too fragmented)
+   no need to check fragmentation if page size > 8K (there will be no
+   violation to FW restrictions) */
+static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
+			     u32 xmit_type)
+{
+	int to_copy = 0;
+	int hlen = 0;
+	int first_bd_sz = 0;
+
+	/* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
+	if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
+
+		if (xmit_type & XMIT_GSO) {
+			unsigned short lso_mss = skb_shinfo(skb)->gso_size;
+			/* Check if LSO packet needs to be copied:
+			   3 = 1 (for headers BD) + 2 (for PBD and last BD) */
+			int wnd_size = MAX_FETCH_BD - 3;
+			/* Number of windows to check */
+			int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
+			int wnd_idx = 0;
+			int frag_idx = 0;
+			u32 wnd_sum = 0;
+
+			/* Headers length */
+			hlen = (int)(skb_transport_header(skb) - skb->data) +
+				tcp_hdrlen(skb);
+
+			/* Amount of data (w/o headers) on linear part of SKB*/
+			first_bd_sz = skb_headlen(skb) - hlen;
+
+			wnd_sum  = first_bd_sz;
+
+			/* Calculate the first sum - it's special */
+			for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
+				wnd_sum +=
+					skb_shinfo(skb)->frags[frag_idx].size;
+
+			/* If there was data on linear skb data - check it */
+			if (first_bd_sz > 0) {
+				if (unlikely(wnd_sum < lso_mss)) {
+					to_copy = 1;
+					goto exit_lbl;
+				}
+
+				wnd_sum -= first_bd_sz;
+			}
+
+			/* Others are easier: run through the frag list and
+			   check all windows */
+			for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
+				wnd_sum +=
+			  skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
+
+				if (unlikely(wnd_sum < lso_mss)) {
+					to_copy = 1;
+					break;
+				}
+				wnd_sum -=
+					skb_shinfo(skb)->frags[wnd_idx].size;
+			}
+		} else {
+			/* in non-LSO too fragmented packet should always
+			   be linearized */
+			to_copy = 1;
+		}
+	}
+
+exit_lbl:
+	if (unlikely(to_copy))
+		DP(NETIF_MSG_TX_QUEUED,
+		   "Linearization IS REQUIRED for %s packet. "
+		   "num_frags %d  hlen %d  first_bd_sz %d\n",
+		   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
+		   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
+
+	return to_copy;
+}
+#endif
+
+static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+					u32 xmit_type)
+{
+	*parsing_data |= (skb_shinfo(skb)->gso_size <<
+			      ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
+			      ETH_TX_PARSE_BD_E2_LSO_MSS;
+	if ((xmit_type & XMIT_GSO_V6) &&
+	    (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
+		*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
+}
+
+/**
+ * bnx2x_set_pbd_gso - update PBD in GSO case.
+ *
+ * @skb:	packet skb
+ * @pbd:	parse BD
+ * @xmit_type:	xmit flags
+ */
+static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
+				     struct eth_tx_parse_bd_e1x *pbd,
+				     u32 xmit_type)
+{
+	pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+	pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
+	pbd->tcp_flags = pbd_tcp_flags(skb);
+
+	if (xmit_type & XMIT_GSO_V4) {
+		pbd->ip_id = swab16(ip_hdr(skb)->id);
+		pbd->tcp_pseudo_csum =
+			swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+						  ip_hdr(skb)->daddr,
+						  0, IPPROTO_TCP, 0));
+
+	} else
+		pbd->tcp_pseudo_csum =
+			swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+						&ipv6_hdr(skb)->daddr,
+						0, IPPROTO_TCP, 0));
+
+	pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
+}
+
+/**
+ * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
+ *
+ * @bp:			driver handle
+ * @skb:		packet skb
+ * @parsing_data:	data to be updated
+ * @xmit_type:		xmit flags
+ *
+ * 57712 related
+ */
+static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
+	u32 *parsing_data, u32 xmit_type)
+{
+	*parsing_data |=
+			((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
+			ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
+			ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
+
+	if (xmit_type & XMIT_CSUM_TCP) {
+		*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
+			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
+
+		return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
+	} else
+		/* We support checksum offload for TCP and UDP only.
+		 * No need to pass the UDP header length - it's a constant.
+		 */
+		return skb_transport_header(skb) +
+				sizeof(struct udphdr) - skb->data;
+}
+
+static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+	struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
+{
+	tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
+
+	if (xmit_type & XMIT_CSUM_V4)
+		tx_start_bd->bd_flags.as_bitfield |=
+					ETH_TX_BD_FLAGS_IP_CSUM;
+	else
+		tx_start_bd->bd_flags.as_bitfield |=
+					ETH_TX_BD_FLAGS_IPV6;
+
+	if (!(xmit_type & XMIT_CSUM_TCP))
+		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
+}
+
+/**
+ * bnx2x_set_pbd_csum - update PBD with checksum and return header length
+ *
+ * @bp:		driver handle
+ * @skb:	packet skb
+ * @pbd:	parse BD to be updated
+ * @xmit_type:	xmit flags
+ */
+static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
+	struct eth_tx_parse_bd_e1x *pbd,
+	u32 xmit_type)
+{
+	u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
+
+	/* for now NS flag is not used in Linux */
+	pbd->global_data =
+		(hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
+			 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
+
+	pbd->ip_hlen_w = (skb_transport_header(skb) -
+			skb_network_header(skb)) >> 1;
+
+	hlen += pbd->ip_hlen_w;
+
+	/* We support checksum offload for TCP and UDP only */
+	if (xmit_type & XMIT_CSUM_TCP)
+		hlen += tcp_hdrlen(skb) / 2;
+	else
+		hlen += sizeof(struct udphdr) / 2;
+
+	pbd->total_hlen_w = cpu_to_le16(hlen);
+	hlen = hlen*2;
+
+	if (xmit_type & XMIT_CSUM_TCP) {
+		pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
+
+	} else {
+		s8 fix = SKB_CS_OFF(skb); /* signed! */
+
+		DP(NETIF_MSG_TX_QUEUED,
+		   "hlen %d  fix %d  csum before fix %x\n",
+		   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
+
+		/* HW bug: fixup the CSUM */
+		pbd->tcp_pseudo_csum =
+			bnx2x_csum_fix(skb_transport_header(skb),
+				       SKB_CS(skb), fix);
+
+		DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
+		   pbd->tcp_pseudo_csum);
+	}
+
+	return hlen;
+}
+
+/* called with netif_tx_lock
+ * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
+ * netif_wake_queue()
+ */
+netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	struct bnx2x_fastpath *fp;
+	struct netdev_queue *txq;
+	struct bnx2x_fp_txdata *txdata;
+	struct sw_tx_bd *tx_buf;
+	struct eth_tx_start_bd *tx_start_bd, *first_bd;
+	struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
+	struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
+	struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+	u32 pbd_e2_parsing_data = 0;
+	u16 pkt_prod, bd_prod;
+	int nbd, txq_index, fp_index, txdata_index;
+	dma_addr_t mapping;
+	u32 xmit_type = bnx2x_xmit_type(bp, skb);
+	int i;
+	u8 hlen = 0;
+	__le16 pkt_size = 0;
+	struct ethhdr *eth;
+	u8 mac_type = UNICAST_ADDRESS;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return NETDEV_TX_BUSY;
+#endif
+
+	txq_index = skb_get_queue_mapping(skb);
+	txq = netdev_get_tx_queue(dev, txq_index);
+
+	BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
+
+	/* decode the fastpath index and the cos index from the txq */
+	fp_index = TXQ_TO_FP(txq_index);
+	txdata_index = TXQ_TO_COS(txq_index);
+
+#ifdef BCM_CNIC
+	/*
+	 * Override the above for the FCoE queue:
+	 *   - FCoE fp entry is right after the ETH entries.
+	 *   - FCoE L2 queue uses bp->txdata[0] only.
+	 */
+	if (unlikely(!NO_FCOE(bp) && (txq_index ==
+				      bnx2x_fcoe_tx(bp, txq_index)))) {
+		fp_index = FCOE_IDX;
+		txdata_index = 0;
+	}
+#endif
+
+	/* enable this debug print to view the transmission queue being used
+	DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d",
+	   txq_index, fp_index, txdata_index); */
+
+	/* locate the fastpath and the txdata */
+	fp = &bp->fp[fp_index];
+	txdata = &fp->txdata[txdata_index];
+
+	/* enable this debug print to view the tranmission details
+	DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
+			" tx_data ptr %p fp pointer %p",
+	   txdata->cid, fp_index, txdata_index, txdata, fp); */
+
+	if (unlikely(bnx2x_tx_avail(bp, txdata) <
+		     (skb_shinfo(skb)->nr_frags + 3))) {
+		fp->eth_q_stats.driver_xoff++;
+		netif_tx_stop_queue(txq);
+		BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x  protocol %x  "
+				"protocol(%x,%x) gso type %x  xmit_type %x\n",
+	   txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
+	   ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
+
+	eth = (struct ethhdr *)skb->data;
+
+	/* set flag according to packet type (UNICAST_ADDRESS is default)*/
+	if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+		if (is_broadcast_ether_addr(eth->h_dest))
+			mac_type = BROADCAST_ADDRESS;
+		else
+			mac_type = MULTICAST_ADDRESS;
+	}
+
+#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
+	/* First, check if we need to linearize the skb (due to FW
+	   restrictions). No need to check fragmentation if page size > 8K
+	   (there will be no violation to FW restrictions) */
+	if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
+		/* Statistics of linearization */
+		bp->lin_cnt++;
+		if (skb_linearize(skb) != 0) {
+			DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
+			   "silently dropping this SKB\n");
+			dev_kfree_skb_any(skb);
+			return NETDEV_TX_OK;
+		}
+	}
+#endif
+	/* Map skb linear data for DMA */
+	mapping = dma_map_single(&bp->pdev->dev, skb->data,
+				 skb_headlen(skb), DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
+		   "silently dropping this SKB\n");
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
+	/*
+	Please read carefully. First we use one BD which we mark as start,
+	then we have a parsing info BD (used for TSO or xsum),
+	and only then we have the rest of the TSO BDs.
+	(don't forget to mark the last one as last,
+	and to unmap only AFTER you write to the BD ...)
+	And above all, all pdb sizes are in words - NOT DWORDS!
+	*/
+
+	/* get current pkt produced now - advance it just before sending packet
+	 * since mapping of pages may fail and cause packet to be dropped
+	 */
+	pkt_prod = txdata->tx_pkt_prod;
+	bd_prod = TX_BD(txdata->tx_bd_prod);
+
+	/* get a tx_buf and first BD
+	 * tx_start_bd may be changed during SPLIT,
+	 * but first_bd will always stay first
+	 */
+	tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
+	tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
+	first_bd = tx_start_bd;
+
+	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+	SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
+		 mac_type);
+
+	/* header nbd */
+	SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
+
+	/* remember the first BD of the packet */
+	tx_buf->first_bd = txdata->tx_bd_prod;
+	tx_buf->skb = skb;
+	tx_buf->flags = 0;
+
+	DP(NETIF_MSG_TX_QUEUED,
+	   "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
+	   pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
+
+	if (vlan_tx_tag_present(skb)) {
+		tx_start_bd->vlan_or_ethertype =
+		    cpu_to_le16(vlan_tx_tag_get(skb));
+		tx_start_bd->bd_flags.as_bitfield |=
+		    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+	} else
+		tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+
+	/* turn on parsing and get a BD */
+	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+	if (xmit_type & XMIT_CSUM)
+		bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
+
+	if (!CHIP_IS_E1x(bp)) {
+		pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
+		memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+		/* Set PBD in checksum offload case */
+		if (xmit_type & XMIT_CSUM)
+			hlen = bnx2x_set_pbd_csum_e2(bp, skb,
+						     &pbd_e2_parsing_data,
+						     xmit_type);
+		if (IS_MF_SI(bp)) {
+			/*
+			 * fill in the MAC addresses in the PBD - for local
+			 * switching
+			 */
+			bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
+					      &pbd_e2->src_mac_addr_mid,
+					      &pbd_e2->src_mac_addr_lo,
+					      eth->h_source);
+			bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
+					      &pbd_e2->dst_mac_addr_mid,
+					      &pbd_e2->dst_mac_addr_lo,
+					      eth->h_dest);
+		}
+	} else {
+		pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
+		memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
+		/* Set PBD in checksum offload case */
+		if (xmit_type & XMIT_CSUM)
+			hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
+
+	}
+
+	/* Setup the data pointer of the first BD of the packet */
+	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+	nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
+	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
+	pkt_size = tx_start_bd->nbytes;
+
+	DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
+	   "  nbytes %d  flags %x  vlan %x\n",
+	   tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
+	   le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
+	   tx_start_bd->bd_flags.as_bitfield,
+	   le16_to_cpu(tx_start_bd->vlan_or_ethertype));
+
+	if (xmit_type & XMIT_GSO) {
+
+		DP(NETIF_MSG_TX_QUEUED,
+		   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
+		   skb->len, hlen, skb_headlen(skb),
+		   skb_shinfo(skb)->gso_size);
+
+		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
+
+		if (unlikely(skb_headlen(skb) > hlen))
+			bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
+						 &tx_start_bd, hlen,
+						 bd_prod, ++nbd);
+		if (!CHIP_IS_E1x(bp))
+			bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
+					     xmit_type);
+		else
+			bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
+	}
+
+	/* Set the PBD's parsing_data field if not zero
+	 * (for the chips newer than 57711).
+	 */
+	if (pbd_e2_parsing_data)
+		pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
+
+	tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
+
+	/* Handle fragmented skb */
+	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		mapping = dma_map_page(&bp->pdev->dev, frag->page,
+				       frag->page_offset, frag->size,
+				       DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+
+			DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
+						"dropping packet...\n");
+
+			/* we need unmap all buffers already mapped
+			 * for this SKB;
+			 * first_bd->nbd need to be properly updated
+			 * before call to bnx2x_free_tx_pkt
+			 */
+			first_bd->nbd = cpu_to_le16(nbd);
+			bnx2x_free_tx_pkt(bp, txdata,
+					  TX_BD(txdata->tx_pkt_prod));
+			return NETDEV_TX_OK;
+		}
+
+		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+		tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
+		if (total_pkt_bd == NULL)
+			total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
+
+		tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+		tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+		tx_data_bd->nbytes = cpu_to_le16(frag->size);
+		le16_add_cpu(&pkt_size, frag->size);
+		nbd++;
+
+		DP(NETIF_MSG_TX_QUEUED,
+		   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
+		   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
+		   le16_to_cpu(tx_data_bd->nbytes));
+	}
+
+	DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
+
+	/* update with actual num BDs */
+	first_bd->nbd = cpu_to_le16(nbd);
+
+	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+	/* now send a tx doorbell, counting the next BD
+	 * if the packet contains or ends with it
+	 */
+	if (TX_BD_POFF(bd_prod) < nbd)
+		nbd++;
+
+	/* total_pkt_bytes should be set on the first data BD if
+	 * it's not an LSO packet and there is more than one
+	 * data BD. In this case pkt_size is limited by an MTU value.
+	 * However we prefer to set it for an LSO packet (while we don't
+	 * have to) in order to save some CPU cycles in a none-LSO
+	 * case, when we much more care about them.
+	 */
+	if (total_pkt_bd != NULL)
+		total_pkt_bd->total_pkt_bytes = pkt_size;
+
+	if (pbd_e1x)
+		DP(NETIF_MSG_TX_QUEUED,
+		   "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
+		   "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
+		   pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
+		   pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
+		   pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
+		    le16_to_cpu(pbd_e1x->total_hlen_w));
+	if (pbd_e2)
+		DP(NETIF_MSG_TX_QUEUED,
+		   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
+		   pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
+		   pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
+		   pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
+		   pbd_e2->parsing_data);
+	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
+
+	txdata->tx_pkt_prod++;
+	/*
+	 * Make sure that the BD data is updated before updating the producer
+	 * since FW might read the BD right after the producer is updated.
+	 * This is only applicable for weak-ordered memory model archs such
+	 * as IA-64. The following barrier is also mandatory since FW will
+	 * assumes packets must have BDs.
+	 */
+	wmb();
+
+	txdata->tx_db.data.prod += nbd;
+	barrier();
+
+	DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
+
+	mmiowb();
+
+	txdata->tx_bd_prod += nbd;
+
+	if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
+		netif_tx_stop_queue(txq);
+
+		/* paired memory barrier is in bnx2x_tx_int(), we have to keep
+		 * ordering of set_bit() in netif_tx_stop_queue() and read of
+		 * fp->bd_tx_cons */
+		smp_mb();
+
+		fp->eth_q_stats.driver_xoff++;
+		if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
+			netif_tx_wake_queue(txq);
+	}
+	txdata->tx_pkt++;
+
+	return NETDEV_TX_OK;
+}
+
+/**
+ * bnx2x_setup_tc - routine to configure net_device for multi tc
+ *
+ * @netdev: net device to configure
+ * @tc: number of traffic classes to enable
+ *
+ * callback connected to the ndo_setup_tc function pointer
+ */
+int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
+{
+	int cos, prio, count, offset;
+	struct bnx2x *bp = netdev_priv(dev);
+
+	/* setup tc must be called under rtnl lock */
+	ASSERT_RTNL();
+
+	/* no traffic classes requested. aborting */
+	if (!num_tc) {
+		netdev_reset_tc(dev);
+		return 0;
+	}
+
+	/* requested to support too many traffic classes */
+	if (num_tc > bp->max_cos) {
+		DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
+				     " requested: %d. max supported is %d",
+				     num_tc, bp->max_cos);
+		return -EINVAL;
+	}
+
+	/* declare amount of supported traffic classes */
+	if (netdev_set_num_tc(dev, num_tc)) {
+		DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes",
+				     num_tc);
+		return -EINVAL;
+	}
+
+	/* configure priority to traffic class mapping */
+	for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
+		netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
+		DP(BNX2X_MSG_SP, "mapping priority %d to tc %d",
+		   prio, bp->prio_to_cos[prio]);
+	}
+
+
+	/* Use this configuration to diffrentiate tc0 from other COSes
+	   This can be used for ets or pfc, and save the effort of setting
+	   up a multio class queue disc or negotiating DCBX with a switch
+	netdev_set_prio_tc_map(dev, 0, 0);
+	DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", 0, 0);
+	for (prio = 1; prio < 16; prio++) {
+		netdev_set_prio_tc_map(dev, prio, 1);
+		DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", prio, 1);
+	} */
+
+	/* configure traffic class to transmission queue mapping */
+	for (cos = 0; cos < bp->max_cos; cos++) {
+		count = BNX2X_NUM_ETH_QUEUES(bp);
+		offset = cos * MAX_TXQS_PER_COS;
+		netdev_set_tc_queue(dev, cos, count, offset);
+		DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d",
+		   cos, offset, count);
+	}
+
+	return 0;
+}
+
+/* called with rtnl_lock */
+int bnx2x_change_mac_addr(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc = 0;
+
+	if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
+		return -EINVAL;
+
+	if (netif_running(dev))  {
+		rc = bnx2x_set_eth_mac(bp, false);
+		if (rc)
+			return rc;
+	}
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	if (netif_running(dev))
+		rc = bnx2x_set_eth_mac(bp, true);
+
+	return rc;
+}
+
+static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
+{
+	union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
+	struct bnx2x_fastpath *fp = &bp->fp[fp_index];
+	u8 cos;
+
+	/* Common */
+#ifdef BCM_CNIC
+	if (IS_FCOE_IDX(fp_index)) {
+		memset(sb, 0, sizeof(union host_hc_status_block));
+		fp->status_blk_mapping = 0;
+
+	} else {
+#endif
+		/* status blocks */
+		if (!CHIP_IS_E1x(bp))
+			BNX2X_PCI_FREE(sb->e2_sb,
+				       bnx2x_fp(bp, fp_index,
+						status_blk_mapping),
+				       sizeof(struct host_hc_status_block_e2));
+		else
+			BNX2X_PCI_FREE(sb->e1x_sb,
+				       bnx2x_fp(bp, fp_index,
+						status_blk_mapping),
+				       sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+	}
+#endif
+	/* Rx */
+	if (!skip_rx_queue(bp, fp_index)) {
+		bnx2x_free_rx_bds(fp);
+
+		/* fastpath rx rings: rx_buf rx_desc rx_comp */
+		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
+		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
+			       bnx2x_fp(bp, fp_index, rx_desc_mapping),
+			       sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
+			       bnx2x_fp(bp, fp_index, rx_comp_mapping),
+			       sizeof(struct eth_fast_path_rx_cqe) *
+			       NUM_RCQ_BD);
+
+		/* SGE ring */
+		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
+		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
+			       bnx2x_fp(bp, fp_index, rx_sge_mapping),
+			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+	}
+
+	/* Tx */
+	if (!skip_tx_queue(bp, fp_index)) {
+		/* fastpath tx rings: tx_buf tx_desc */
+		for_each_cos_in_tx_queue(fp, cos) {
+			struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+			DP(BNX2X_MSG_SP,
+			   "freeing tx memory of fp %d cos %d cid %d",
+			   fp_index, cos, txdata->cid);
+
+			BNX2X_FREE(txdata->tx_buf_ring);
+			BNX2X_PCI_FREE(txdata->tx_desc_ring,
+				txdata->tx_desc_mapping,
+				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+		}
+	}
+	/* end of fastpath */
+}
+
+void bnx2x_free_fp_mem(struct bnx2x *bp)
+{
+	int i;
+	for_each_queue(bp, i)
+		bnx2x_free_fp_mem_at(bp, i);
+}
+
+static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
+{
+	union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
+	if (!CHIP_IS_E1x(bp)) {
+		bnx2x_fp(bp, index, sb_index_values) =
+			(__le16 *)status_blk.e2_sb->sb.index_values;
+		bnx2x_fp(bp, index, sb_running_index) =
+			(__le16 *)status_blk.e2_sb->sb.running_index;
+	} else {
+		bnx2x_fp(bp, index, sb_index_values) =
+			(__le16 *)status_blk.e1x_sb->sb.index_values;
+		bnx2x_fp(bp, index, sb_running_index) =
+			(__le16 *)status_blk.e1x_sb->sb.running_index;
+	}
+}
+
+static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
+{
+	union host_hc_status_block *sb;
+	struct bnx2x_fastpath *fp = &bp->fp[index];
+	int ring_size = 0;
+	u8 cos;
+
+	/* if rx_ring_size specified - use it */
+	int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
+			   MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+
+	/* allocate at least number of buffers required by FW */
+	rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+						    MIN_RX_SIZE_TPA,
+				  rx_ring_size);
+
+	/* Common */
+	sb = &bnx2x_fp(bp, index, status_blk);
+#ifdef BCM_CNIC
+	if (!IS_FCOE_IDX(index)) {
+#endif
+		/* status blocks */
+		if (!CHIP_IS_E1x(bp))
+			BNX2X_PCI_ALLOC(sb->e2_sb,
+				&bnx2x_fp(bp, index, status_blk_mapping),
+				sizeof(struct host_hc_status_block_e2));
+		else
+			BNX2X_PCI_ALLOC(sb->e1x_sb,
+				&bnx2x_fp(bp, index, status_blk_mapping),
+			    sizeof(struct host_hc_status_block_e1x));
+#ifdef BCM_CNIC
+	}
+#endif
+
+	/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
+	 * set shortcuts for it.
+	 */
+	if (!IS_FCOE_IDX(index))
+		set_sb_shortcuts(bp, index);
+
+	/* Tx */
+	if (!skip_tx_queue(bp, index)) {
+		/* fastpath tx rings: tx_buf tx_desc */
+		for_each_cos_in_tx_queue(fp, cos) {
+			struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+			DP(BNX2X_MSG_SP, "allocating tx memory of "
+					 "fp %d cos %d",
+			   index, cos);
+
+			BNX2X_ALLOC(txdata->tx_buf_ring,
+				sizeof(struct sw_tx_bd) * NUM_TX_BD);
+			BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
+				&txdata->tx_desc_mapping,
+				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+		}
+	}
+
+	/* Rx */
+	if (!skip_rx_queue(bp, index)) {
+		/* fastpath rx rings: rx_buf rx_desc rx_comp */
+		BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
+				sizeof(struct sw_rx_bd) * NUM_RX_BD);
+		BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
+				&bnx2x_fp(bp, index, rx_desc_mapping),
+				sizeof(struct eth_rx_bd) * NUM_RX_BD);
+
+		BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
+				&bnx2x_fp(bp, index, rx_comp_mapping),
+				sizeof(struct eth_fast_path_rx_cqe) *
+				NUM_RCQ_BD);
+
+		/* SGE ring */
+		BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
+				sizeof(struct sw_rx_page) * NUM_RX_SGE);
+		BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
+				&bnx2x_fp(bp, index, rx_sge_mapping),
+				BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+		/* RX BD ring */
+		bnx2x_set_next_page_rx_bd(fp);
+
+		/* CQ ring */
+		bnx2x_set_next_page_rx_cq(fp);
+
+		/* BDs */
+		ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
+		if (ring_size < rx_ring_size)
+			goto alloc_mem_err;
+	}
+
+	return 0;
+
+/* handles low memory cases */
+alloc_mem_err:
+	BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
+						index, ring_size);
+	/* FW will drop all packets if queue is not big enough,
+	 * In these cases we disable the queue
+	 * Min size is different for OOO, TPA and non-TPA queues
+	 */
+	if (ring_size < (fp->disable_tpa ?
+				MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
+			/* release memory allocated for this queue */
+			bnx2x_free_fp_mem_at(bp, index);
+			return -ENOMEM;
+	}
+	return 0;
+}
+
+int bnx2x_alloc_fp_mem(struct bnx2x *bp)
+{
+	int i;
+
+	/**
+	 * 1. Allocate FP for leading - fatal if error
+	 * 2. {CNIC} Allocate FCoE FP - fatal if error
+	 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
+	 * 4. Allocate RSS - fix number of queues if error
+	 */
+
+	/* leading */
+	if (bnx2x_alloc_fp_mem_at(bp, 0))
+		return -ENOMEM;
+
+#ifdef BCM_CNIC
+	if (!NO_FCOE(bp))
+		/* FCoE */
+		if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
+			/* we will fail load process instead of mark
+			 * NO_FCOE_FLAG
+			 */
+			return -ENOMEM;
+#endif
+
+	/* RSS */
+	for_each_nondefault_eth_queue(bp, i)
+		if (bnx2x_alloc_fp_mem_at(bp, i))
+			break;
+
+	/* handle memory failures */
+	if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
+		int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
+
+		WARN_ON(delta < 0);
+#ifdef BCM_CNIC
+		/**
+		 * move non eth FPs next to last eth FP
+		 * must be done in that order
+		 * FCOE_IDX < FWD_IDX < OOO_IDX
+		 */
+
+		/* move FCoE fp even NO_FCOE_FLAG is on */
+		bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
+#endif
+		bp->num_queues -= delta;
+		BNX2X_ERR("Adjusted num of queues from %d to %d\n",
+			  bp->num_queues + delta, bp->num_queues);
+	}
+
+	return 0;
+}
+
+void bnx2x_free_mem_bp(struct bnx2x *bp)
+{
+	kfree(bp->fp);
+	kfree(bp->msix_table);
+	kfree(bp->ilt);
+}
+
+int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
+{
+	struct bnx2x_fastpath *fp;
+	struct msix_entry *tbl;
+	struct bnx2x_ilt *ilt;
+	int msix_table_size = 0;
+
+	/*
+	 * The biggest MSI-X table we might need is as a maximum number of fast
+	 * path IGU SBs plus default SB (for PF).
+	 */
+	msix_table_size = bp->igu_sb_cnt + 1;
+
+	/* fp array: RSS plus CNIC related L2 queues */
+	fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
+		     sizeof(*fp), GFP_KERNEL);
+	if (!fp)
+		goto alloc_err;
+	bp->fp = fp;
+
+	/* msix table */
+	tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
+	if (!tbl)
+		goto alloc_err;
+	bp->msix_table = tbl;
+
+	/* ilt */
+	ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
+	if (!ilt)
+		goto alloc_err;
+	bp->ilt = ilt;
+
+	return 0;
+alloc_err:
+	bnx2x_free_mem_bp(bp);
+	return -ENOMEM;
+
+}
+
+int bnx2x_reload_if_running(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (unlikely(!netif_running(dev)))
+		return 0;
+
+	bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+	return bnx2x_nic_load(bp, LOAD_NORMAL);
+}
+
+int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
+{
+	u32 sel_phy_idx = 0;
+	if (bp->link_params.num_phys <= 1)
+		return INT_PHY;
+
+	if (bp->link_vars.link_up) {
+		sel_phy_idx = EXT_PHY1;
+		/* In case link is SERDES, check if the EXT_PHY2 is the one */
+		if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
+		    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
+			sel_phy_idx = EXT_PHY2;
+	} else {
+
+		switch (bnx2x_phy_selection(&bp->link_params)) {
+		case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+		       sel_phy_idx = EXT_PHY1;
+		       break;
+		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+		       sel_phy_idx = EXT_PHY2;
+		       break;
+		}
+	}
+
+	return sel_phy_idx;
+
+}
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
+{
+	u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
+	/*
+	 * The selected actived PHY is always after swapping (in case PHY
+	 * swapping is enabled). So when swapping is enabled, we need to reverse
+	 * the configuration
+	 */
+
+	if (bp->link_params.multi_phy_config &
+	    PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+		if (sel_phy_idx == EXT_PHY1)
+			sel_phy_idx = EXT_PHY2;
+		else if (sel_phy_idx == EXT_PHY2)
+			sel_phy_idx = EXT_PHY1;
+	}
+	return LINK_CONFIG_IDX(sel_phy_idx);
+}
+
+#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	switch (type) {
+	case NETDEV_FCOE_WWNN:
+		*wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
+				cp->fcoe_wwn_node_name_lo);
+		break;
+	case NETDEV_FCOE_WWPN:
+		*wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
+				cp->fcoe_wwn_port_name_lo);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+#endif
+
+/* called with rtnl_lock */
+int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+		return -EAGAIN;
+	}
+
+	if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
+	    ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
+		return -EINVAL;
+
+	/* This does not race with packet allocation
+	 * because the actual alloc size is
+	 * only updated as part of load
+	 */
+	dev->mtu = new_mtu;
+
+	return bnx2x_reload_if_running(dev);
+}
+
+u32 bnx2x_fix_features(struct net_device *dev, u32 features)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	/* TPA requires Rx CSUM offloading */
+	if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
+		features &= ~NETIF_F_LRO;
+
+	return features;
+}
+
+int bnx2x_set_features(struct net_device *dev, u32 features)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 flags = bp->flags;
+	bool bnx2x_reload = false;
+
+	if (features & NETIF_F_LRO)
+		flags |= TPA_ENABLE_FLAG;
+	else
+		flags &= ~TPA_ENABLE_FLAG;
+
+	if (features & NETIF_F_LOOPBACK) {
+		if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
+			bp->link_params.loopback_mode = LOOPBACK_BMAC;
+			bnx2x_reload = true;
+		}
+	} else {
+		if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
+			bp->link_params.loopback_mode = LOOPBACK_NONE;
+			bnx2x_reload = true;
+		}
+	}
+
+	if (flags ^ bp->flags) {
+		bp->flags = flags;
+		bnx2x_reload = true;
+	}
+
+	if (bnx2x_reload) {
+		if (bp->recovery_state == BNX2X_RECOVERY_DONE)
+			return bnx2x_reload_if_running(dev);
+		/* else: bnx2x_nic_load() will be called at end of recovery */
+	}
+
+	return 0;
+}
+
+void bnx2x_tx_timeout(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (!bp->panic)
+		bnx2x_panic();
+#endif
+
+	smp_mb__before_clear_bit();
+	set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
+	smp_mb__after_clear_bit();
+
+	/* This allows the netif to be shutdown gracefully before resetting */
+	schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
+
+int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp;
+
+	if (!dev) {
+		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+		return -ENODEV;
+	}
+	bp = netdev_priv(dev);
+
+	rtnl_lock();
+
+	pci_save_state(pdev);
+
+	if (!netif_running(dev)) {
+		rtnl_unlock();
+		return 0;
+	}
+
+	netif_device_detach(dev);
+
+	bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+
+	bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
+
+	rtnl_unlock();
+
+	return 0;
+}
+
+int bnx2x_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp;
+	int rc;
+
+	if (!dev) {
+		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+		return -ENODEV;
+	}
+	bp = netdev_priv(dev);
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+		return -EAGAIN;
+	}
+
+	rtnl_lock();
+
+	pci_restore_state(pdev);
+
+	if (!netif_running(dev)) {
+		rtnl_unlock();
+		return 0;
+	}
+
+	bnx2x_set_power_state(bp, PCI_D0);
+	netif_device_attach(dev);
+
+	/* Since the chip was reset, clear the FW sequence number */
+	bp->fw_seq = 0;
+	rc = bnx2x_nic_load(bp, LOAD_OPEN);
+
+	rtnl_unlock();
+
+	return rc;
+}
+
+
+void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
+			      u32 cid)
+{
+	/* ustorm cxt validation */
+	cxt->ustorm_ag_context.cdu_usage =
+		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+			CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
+	/* xcontext validation */
+	cxt->xstorm_ag_context.cdu_reserved =
+		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+			CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
+}
+
+static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
+					     u8 fw_sb_id, u8 sb_index,
+					     u8 ticks)
+{
+
+	u32 addr = BAR_CSTRORM_INTMEM +
+		   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
+	REG_WR8(bp, addr, ticks);
+	DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
+			  port, fw_sb_id, sb_index, ticks);
+}
+
+static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
+					     u16 fw_sb_id, u8 sb_index,
+					     u8 disable)
+{
+	u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
+	u32 addr = BAR_CSTRORM_INTMEM +
+		   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
+	u16 flags = REG_RD16(bp, addr);
+	/* clear and set */
+	flags &= ~HC_INDEX_DATA_HC_ENABLED;
+	flags |= enable_flag;
+	REG_WR16(bp, addr, flags);
+	DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
+			  port, fw_sb_id, sb_index, disable);
+}
+
+void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
+				    u8 sb_index, u8 disable, u16 usec)
+{
+	int port = BP_PORT(bp);
+	u8 ticks = usec / BNX2X_BTR;
+
+	storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
+
+	disable = disable ? 1 : (usec ? 0 : 1);
+	storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
new file mode 100644
index 0000000..223bfee
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -0,0 +1,1491 @@
+/* bnx2x_cmn.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+#ifndef BNX2X_CMN_H
+#define BNX2X_CMN_H
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+
+
+#include "bnx2x.h"
+
+/* This is used as a replacement for an MCP if it's not present */
+extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */
+
+extern int num_queues;
+
+/************************ Macros ********************************/
+#define BNX2X_PCI_FREE(x, y, size) \
+	do { \
+		if (x) { \
+			dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
+			x = NULL; \
+			y = 0; \
+		} \
+	} while (0)
+
+#define BNX2X_FREE(x) \
+	do { \
+		if (x) { \
+			kfree((void *)x); \
+			x = NULL; \
+		} \
+	} while (0)
+
+#define BNX2X_PCI_ALLOC(x, y, size) \
+	do { \
+		x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+		if (x == NULL) \
+			goto alloc_mem_err; \
+		memset((void *)x, 0, size); \
+	} while (0)
+
+#define BNX2X_ALLOC(x, size) \
+	do { \
+		x = kzalloc(size, GFP_KERNEL); \
+		if (x == NULL) \
+			goto alloc_mem_err; \
+	} while (0)
+
+/*********************** Interfaces ****************************
+ *  Functions that need to be implemented by each driver version
+ */
+/* Init */
+
+/**
+ * bnx2x_send_unload_req - request unload mode from the MCP.
+ *
+ * @bp:			driver handle
+ * @unload_mode:	requested function's unload mode
+ *
+ * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
+ */
+u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode);
+
+/**
+ * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_send_unload_done(struct bnx2x *bp);
+
+/**
+ * bnx2x_config_rss_pf - configure RSS parameters.
+ *
+ * @bp:			driver handle
+ * @ind_table:		indirection table to configure
+ * @config_hash:	re-configure RSS hash keys configuration
+ */
+int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash);
+
+/**
+ * bnx2x__init_func_obj - init function object
+ *
+ * @bp:			driver handle
+ *
+ * Initializes the Function Object with the appropriate
+ * parameters which include a function slow path driver
+ * interface.
+ */
+void bnx2x__init_func_obj(struct bnx2x *bp);
+
+/**
+ * bnx2x_setup_queue - setup eth queue.
+ *
+ * @bp:		driver handle
+ * @fp:		pointer to the fastpath structure
+ * @leading:	boolean
+ *
+ */
+int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+		       bool leading);
+
+/**
+ * bnx2x_setup_leading - bring up a leading eth queue.
+ *
+ * @bp:		driver handle
+ */
+int bnx2x_setup_leading(struct bnx2x *bp);
+
+/**
+ * bnx2x_fw_command - send the MCP a request
+ *
+ * @bp:		driver handle
+ * @command:	request
+ * @param:	request's parameter
+ *
+ * block until there is a reply
+ */
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param);
+
+/**
+ * bnx2x_initial_phy_init - initialize link parameters structure variables.
+ *
+ * @bp:		driver handle
+ * @load_mode:	current mode
+ */
+u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
+
+/**
+ * bnx2x_link_set - configure hw according to link parameters structure.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_link_set(struct bnx2x *bp);
+
+/**
+ * bnx2x_link_test - query link status.
+ *
+ * @bp:		driver handle
+ * @is_serdes:	bool
+ *
+ * Returns 0 if link is UP.
+ */
+u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
+
+/**
+ * bnx2x_drv_pulse - write driver pulse to shmem
+ *
+ * @bp:		driver handle
+ *
+ * writes the value in bp->fw_drv_pulse_wr_seq to drv_pulse mbox
+ * in the shmem.
+ */
+void bnx2x_drv_pulse(struct bnx2x *bp);
+
+/**
+ * bnx2x_igu_ack_sb - update IGU with current SB value
+ *
+ * @bp:		driver handle
+ * @igu_sb_id:	SB id
+ * @segment:	SB segment
+ * @index:	SB index
+ * @op:		SB operation
+ * @update:	is HW update required
+ */
+void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+		      u16 index, u8 op, u8 update);
+
+/* Disable transactions from chip to host */
+void bnx2x_pf_disable(struct bnx2x *bp);
+
+/**
+ * bnx2x__link_status_update - handles link status change.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x__link_status_update(struct bnx2x *bp);
+
+/**
+ * bnx2x_link_report - report link status to upper layer.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_link_report(struct bnx2x *bp);
+
+/* None-atomic version of bnx2x_link_report() */
+void __bnx2x_link_report(struct bnx2x *bp);
+
+/**
+ * bnx2x_get_mf_speed - calculate MF speed.
+ *
+ * @bp:		driver handle
+ *
+ * Takes into account current linespeed and MF configuration.
+ */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp);
+
+/**
+ * bnx2x_msix_sp_int - MSI-X slowpath interrupt handler
+ *
+ * @irq:		irq number
+ * @dev_instance:	private instance
+ */
+irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
+
+/**
+ * bnx2x_interrupt - non MSI-X interrupt handler
+ *
+ * @irq:		irq number
+ * @dev_instance:	private instance
+ */
+irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
+#ifdef BCM_CNIC
+
+/**
+ * bnx2x_cnic_notify - send command to cnic driver
+ *
+ * @bp:		driver handle
+ * @cmd:	command
+ */
+int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
+
+/**
+ * bnx2x_setup_cnic_irq_info - provides cnic with IRQ information
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
+#endif
+
+/**
+ * bnx2x_int_enable - enable HW interrupts.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_int_enable(struct bnx2x *bp);
+
+/**
+ * bnx2x_int_disable_sync - disable interrupts.
+ *
+ * @bp:		driver handle
+ * @disable_hw:	true, disable HW interrupts.
+ *
+ * This function ensures that there are no
+ * ISRs or SP DPCs (sp_task) are running after it returns.
+ */
+void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
+
+/**
+ * bnx2x_nic_init - init driver internals.
+ *
+ * @bp:		driver handle
+ * @load_code:	COMMON, PORT or FUNCTION
+ *
+ * Initializes:
+ *  - rings
+ *  - status blocks
+ *  - etc.
+ */
+void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
+
+/**
+ * bnx2x_alloc_mem - allocate driver's memory.
+ *
+ * @bp:		driver handle
+ */
+int bnx2x_alloc_mem(struct bnx2x *bp);
+
+/**
+ * bnx2x_free_mem - release driver's memory.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_free_mem(struct bnx2x *bp);
+
+/**
+ * bnx2x_set_num_queues - set number of queues according to mode.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_set_num_queues(struct bnx2x *bp);
+
+/**
+ * bnx2x_chip_cleanup - cleanup chip internals.
+ *
+ * @bp:			driver handle
+ * @unload_mode:	COMMON, PORT, FUNCTION
+ *
+ * - Cleanup MAC configuration.
+ * - Closes clients.
+ * - etc.
+ */
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
+
+/**
+ * bnx2x_acquire_hw_lock - acquire HW lock.
+ *
+ * @bp:		driver handle
+ * @resource:	resource bit which was locked
+ */
+int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
+
+/**
+ * bnx2x_release_hw_lock - release HW lock.
+ *
+ * @bp:		driver handle
+ * @resource:	resource bit which was locked
+ */
+int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
+
+/**
+ * bnx2x_release_leader_lock - release recovery leader lock
+ *
+ * @bp:		driver handle
+ */
+int bnx2x_release_leader_lock(struct bnx2x *bp);
+
+/**
+ * bnx2x_set_eth_mac - configure eth MAC address in the HW
+ *
+ * @bp:		driver handle
+ * @set:	set or clear
+ *
+ * Configures according to the value in netdev->dev_addr.
+ */
+int bnx2x_set_eth_mac(struct bnx2x *bp, bool set);
+
+/**
+ * bnx2x_set_rx_mode - set MAC filtering configurations.
+ *
+ * @dev:	netdevice
+ *
+ * called with netif_tx_lock from dev_mcast.c
+ * If bp->state is OPEN, should be called with
+ * netif_addr_lock_bh()
+ */
+void bnx2x_set_rx_mode(struct net_device *dev);
+
+/**
+ * bnx2x_set_storm_rx_mode - configure MAC filtering rules in a FW.
+ *
+ * @bp:		driver handle
+ *
+ * If bp->state is OPEN, should be called with
+ * netif_addr_lock_bh().
+ */
+void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
+
+/**
+ * bnx2x_set_q_rx_mode - configures rx_mode for a single queue.
+ *
+ * @bp:			driver handle
+ * @cl_id:		client id
+ * @rx_mode_flags:	rx mode configuration
+ * @rx_accept_flags:	rx accept configuration
+ * @tx_accept_flags:	tx accept configuration (tx switch)
+ * @ramrod_flags:	ramrod configuration
+ */
+void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+			 unsigned long rx_mode_flags,
+			 unsigned long rx_accept_flags,
+			 unsigned long tx_accept_flags,
+			 unsigned long ramrod_flags);
+
+/* Parity errors related */
+void bnx2x_inc_load_cnt(struct bnx2x *bp);
+u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
+bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
+bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
+void bnx2x_set_reset_in_progress(struct bnx2x *bp);
+void bnx2x_set_reset_global(struct bnx2x *bp);
+void bnx2x_disable_close_the_gate(struct bnx2x *bp);
+
+/**
+ * bnx2x_sp_event - handle ramrods completion.
+ *
+ * @fp:		fastpath handle for the event
+ * @rr_cqe:	eth_rx_cqe
+ */
+void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
+
+/**
+ * bnx2x_ilt_set_info - prepare ILT configurations.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_ilt_set_info(struct bnx2x *bp);
+
+/**
+ * bnx2x_dcbx_init - initialize dcbx protocol.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_dcbx_init(struct bnx2x *bp);
+
+/**
+ * bnx2x_set_power_state - set power state to the requested value.
+ *
+ * @bp:		driver handle
+ * @state:	required state D0 or D3hot
+ *
+ * Currently only D0 and D3hot are supported.
+ */
+int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
+
+/**
+ * bnx2x_update_max_mf_config - update MAX part of MF configuration in HW.
+ *
+ * @bp:		driver handle
+ * @value:	new value
+ */
+void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
+/* Error handling */
+void bnx2x_panic_dump(struct bnx2x *bp);
+
+void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl);
+
+/* dev_close main block */
+int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
+
+/* dev_open main block */
+int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
+
+/* hard_xmit callback */
+netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
+
+/* setup_tc callback */
+int bnx2x_setup_tc(struct net_device *dev, u8 num_tc);
+
+/* select_queue callback */
+u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
+
+/* reload helper */
+int bnx2x_reload_if_running(struct net_device *dev);
+
+int bnx2x_change_mac_addr(struct net_device *dev, void *p);
+
+/* NAPI poll Rx part */
+int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
+
+void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod);
+
+/* NAPI poll Tx part */
+int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata);
+
+/* suspend/resume callbacks */
+int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
+int bnx2x_resume(struct pci_dev *pdev);
+
+/* Release IRQ vectors */
+void bnx2x_free_irq(struct bnx2x *bp);
+
+void bnx2x_free_fp_mem(struct bnx2x *bp);
+int bnx2x_alloc_fp_mem(struct bnx2x *bp);
+void bnx2x_init_rx_rings(struct bnx2x *bp);
+void bnx2x_free_skbs(struct bnx2x *bp);
+void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
+void bnx2x_netif_start(struct bnx2x *bp);
+
+/**
+ * bnx2x_enable_msix - set msix configuration.
+ *
+ * @bp:		driver handle
+ *
+ * fills msix_table, requests vectors, updates num_queues
+ * according to number of available vectors.
+ */
+int bnx2x_enable_msix(struct bnx2x *bp);
+
+/**
+ * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly
+ *
+ * @bp:		driver handle
+ */
+int bnx2x_enable_msi(struct bnx2x *bp);
+
+/**
+ * bnx2x_poll - NAPI callback
+ *
+ * @napi:	napi structure
+ * @budget:
+ *
+ */
+int bnx2x_poll(struct napi_struct *napi, int budget);
+
+/**
+ * bnx2x_alloc_mem_bp - allocate memories outsize main driver structure
+ *
+ * @bp:		driver handle
+ */
+int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp);
+
+/**
+ * bnx2x_free_mem_bp - release memories outsize main driver structure
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_free_mem_bp(struct bnx2x *bp);
+
+/**
+ * bnx2x_change_mtu - change mtu netdev callback
+ *
+ * @dev:	net device
+ * @new_mtu:	requested mtu
+ *
+ */
+int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
+
+#if defined(BCM_CNIC) && (defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE))
+/**
+ * bnx2x_fcoe_get_wwn - return the requested WWN value for this port
+ *
+ * @dev:	net_device
+ * @wwn:	output buffer
+ * @type:	WWN type: NETDEV_FCOE_WWNN (node) or NETDEV_FCOE_WWPN (port)
+ *
+ */
+int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
+#endif
+u32 bnx2x_fix_features(struct net_device *dev, u32 features);
+int bnx2x_set_features(struct net_device *dev, u32 features);
+
+/**
+ * bnx2x_tx_timeout - tx timeout netdev callback
+ *
+ * @dev:	net device
+ */
+void bnx2x_tx_timeout(struct net_device *dev);
+
+/*********************** Inlines **********************************/
+/*********************** Fast path ********************************/
+static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
+{
+	barrier(); /* status block is written to by the chip */
+	fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID];
+}
+
+static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp,
+			struct bnx2x_fastpath *fp, u16 bd_prod,
+			u16 rx_comp_prod, u16 rx_sge_prod, u32 start)
+{
+	struct ustorm_eth_rx_producers rx_prods = {0};
+	u32 i;
+
+	/* Update producers */
+	rx_prods.bd_prod = bd_prod;
+	rx_prods.cqe_prod = rx_comp_prod;
+	rx_prods.sge_prod = rx_sge_prod;
+
+	/*
+	 * Make sure that the BD and SGE data is updated before updating the
+	 * producers since FW might read the BD/SGE right after the producer
+	 * is updated.
+	 * This is only applicable for weak-ordered memory model archs such
+	 * as IA-64. The following barrier is also mandatory since FW will
+	 * assumes BDs must have buffers.
+	 */
+	wmb();
+
+	for (i = 0; i < sizeof(rx_prods)/4; i++)
+		REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]);
+
+	mmiowb(); /* keep prod updates ordered */
+
+	DP(NETIF_MSG_RX_STATUS,
+	   "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
+	   fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
+}
+
+static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
+					u8 segment, u16 index, u8 op,
+					u8 update, u32 igu_addr)
+{
+	struct igu_regular cmd_data = {0};
+
+	cmd_data.sb_id_and_flags =
+			((index << IGU_REGULAR_SB_INDEX_SHIFT) |
+			 (segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+			 (update << IGU_REGULAR_BUPDATE_SHIFT) |
+			 (op << IGU_REGULAR_ENABLE_INT_SHIFT));
+
+	DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n",
+	   cmd_data.sb_id_and_flags, igu_addr);
+	REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
+
+	/* Make sure that ACK is written */
+	mmiowb();
+	barrier();
+}
+
+static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
+					  u8 idu_sb_id, bool is_Pf)
+{
+	u32 data, ctl, cnt = 100;
+	u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
+	u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
+	u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
+	u32 sb_bit =  1 << (idu_sb_id%32);
+	u32 func_encode = func |
+			((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
+	u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
+
+	/* Not supported in BC mode */
+	if (CHIP_INT_MODE_IS_BC(bp))
+		return;
+
+	data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
+			<< IGU_REGULAR_CLEANUP_TYPE_SHIFT)	|
+		IGU_REGULAR_CLEANUP_SET				|
+		IGU_REGULAR_BCLEANUP;
+
+	ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT		|
+	      func_encode << IGU_CTRL_REG_FID_SHIFT		|
+	      IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
+
+	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+			 data, igu_addr_data);
+	REG_WR(bp, igu_addr_data, data);
+	mmiowb();
+	barrier();
+	DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
+			  ctl, igu_addr_ctl);
+	REG_WR(bp, igu_addr_ctl, ctl);
+	mmiowb();
+	barrier();
+
+	/* wait for clean up to finish */
+	while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
+		msleep(20);
+
+
+	if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
+		DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: "
+			  "idu_sb_id %d offset %d bit %d (cnt %d)\n",
+			  idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
+	}
+}
+
+static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
+				   u8 storm, u16 index, u8 op, u8 update)
+{
+	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+		       COMMAND_REG_INT_ACK);
+	struct igu_ack_register igu_ack;
+
+	igu_ack.status_block_index = index;
+	igu_ack.sb_id_and_flags =
+			((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
+			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
+			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
+			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
+
+	DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
+	   (*(u32 *)&igu_ack), hc_addr);
+	REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
+
+	/* Make sure that ACK is written */
+	mmiowb();
+	barrier();
+}
+
+static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 storm,
+				u16 index, u8 op, u8 update)
+{
+	if (bp->common.int_block == INT_BLOCK_HC)
+		bnx2x_hc_ack_sb(bp, igu_sb_id, storm, index, op, update);
+	else {
+		u8 segment;
+
+		if (CHIP_INT_MODE_IS_BC(bp))
+			segment = storm;
+		else if (igu_sb_id != bp->igu_dsb_id)
+			segment = IGU_SEG_ACCESS_DEF;
+		else if (storm == ATTENTION_ID)
+			segment = IGU_SEG_ACCESS_ATTN;
+		else
+			segment = IGU_SEG_ACCESS_DEF;
+		bnx2x_igu_ack_sb(bp, igu_sb_id, segment, index, op, update);
+	}
+}
+
+static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
+{
+	u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
+		       COMMAND_REG_SIMD_MASK);
+	u32 result = REG_RD(bp, hc_addr);
+
+	DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
+	   result, hc_addr);
+
+	barrier();
+	return result;
+}
+
+static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
+{
+	u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
+	u32 result = REG_RD(bp, igu_addr);
+
+	DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n",
+	   result, igu_addr);
+
+	barrier();
+	return result;
+}
+
+static inline u16 bnx2x_ack_int(struct bnx2x *bp)
+{
+	barrier();
+	if (bp->common.int_block == INT_BLOCK_HC)
+		return bnx2x_hc_ack_int(bp);
+	else
+		return bnx2x_igu_ack_int(bp);
+}
+
+static inline int bnx2x_has_tx_work_unload(struct bnx2x_fp_txdata *txdata)
+{
+	/* Tell compiler that consumer and producer can change */
+	barrier();
+	return txdata->tx_pkt_prod != txdata->tx_pkt_cons;
+}
+
+static inline u16 bnx2x_tx_avail(struct bnx2x *bp,
+				 struct bnx2x_fp_txdata *txdata)
+{
+	s16 used;
+	u16 prod;
+	u16 cons;
+
+	prod = txdata->tx_bd_prod;
+	cons = txdata->tx_bd_cons;
+
+	/* NUM_TX_RINGS = number of "next-page" entries
+	   It will be used as a threshold */
+	used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	WARN_ON(used < 0);
+	WARN_ON(used > bp->tx_ring_size);
+	WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL);
+#endif
+
+	return (s16)(bp->tx_ring_size) - used;
+}
+
+static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata)
+{
+	u16 hw_cons;
+
+	/* Tell compiler that status block fields can change */
+	barrier();
+	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
+	return hw_cons != txdata->tx_pkt_cons;
+}
+
+static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
+{
+	u8 cos;
+	for_each_cos_in_tx_queue(fp, cos)
+		if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
+			return true;
+	return false;
+}
+
+static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
+{
+	u16 rx_cons_sb;
+
+	/* Tell compiler that status block fields can change */
+	barrier();
+	rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
+	if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
+		rx_cons_sb++;
+	return (fp->rx_comp_cons != rx_cons_sb);
+}
+
+/**
+ * bnx2x_tx_disable - disables tx from stack point of view
+ *
+ * @bp:		driver handle
+ */
+static inline void bnx2x_tx_disable(struct bnx2x *bp)
+{
+	netif_tx_disable(bp->dev);
+	netif_carrier_off(bp->dev);
+}
+
+static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
+				     struct bnx2x_fastpath *fp, u16 index)
+{
+	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
+	struct page *page = sw_buf->page;
+	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+
+	/* Skip "next page" elements */
+	if (!page)
+		return;
+
+	dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
+		       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+	__free_pages(page, PAGES_PER_SGE_SHIFT);
+
+	sw_buf->page = NULL;
+	sge->addr_hi = 0;
+	sge->addr_lo = 0;
+}
+
+static inline void bnx2x_add_all_napi(struct bnx2x *bp)
+{
+	int i;
+
+	/* Add NAPI objects */
+	for_each_rx_queue(bp, i)
+		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
+			       bnx2x_poll, BNX2X_NAPI_WEIGHT);
+}
+
+static inline void bnx2x_del_all_napi(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_rx_queue(bp, i)
+		netif_napi_del(&bnx2x_fp(bp, i, napi));
+}
+
+static inline void bnx2x_disable_msi(struct bnx2x *bp)
+{
+	if (bp->flags & USING_MSIX_FLAG) {
+		pci_disable_msix(bp->pdev);
+		bp->flags &= ~USING_MSIX_FLAG;
+	} else if (bp->flags & USING_MSI_FLAG) {
+		pci_disable_msi(bp->pdev);
+		bp->flags &= ~USING_MSI_FLAG;
+	}
+}
+
+static inline int bnx2x_calc_num_queues(struct bnx2x *bp)
+{
+	return  num_queues ?
+		 min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) :
+		 min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp));
+}
+
+static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
+{
+	int i, j;
+
+	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
+		int idx = RX_SGE_CNT * i - 1;
+
+		for (j = 0; j < 2; j++) {
+			BIT_VEC64_CLEAR_BIT(fp->sge_mask, idx);
+			idx--;
+		}
+	}
+}
+
+static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
+{
+	/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
+	memset(fp->sge_mask, 0xff,
+	       (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
+
+	/* Clear the two last indices in the page to 1:
+	   these are the indices that correspond to the "next" element,
+	   hence will never be indicated and should be removed from
+	   the calculations. */
+	bnx2x_clear_sge_mask_next_elems(fp);
+}
+
+static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
+				     struct bnx2x_fastpath *fp, u16 index)
+{
+	struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
+	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
+	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+	dma_addr_t mapping;
+
+	if (unlikely(page == NULL))
+		return -ENOMEM;
+
+	mapping = dma_map_page(&bp->pdev->dev, page, 0,
+			       SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		__free_pages(page, PAGES_PER_SGE_SHIFT);
+		return -ENOMEM;
+	}
+
+	sw_buf->page = page;
+	dma_unmap_addr_set(sw_buf, mapping, mapping);
+
+	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
+	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+	return 0;
+}
+
+static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
+				     struct bnx2x_fastpath *fp, u16 index)
+{
+	struct sk_buff *skb;
+	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
+	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
+	dma_addr_t mapping;
+
+	skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
+	if (unlikely(skb == NULL))
+		return -ENOMEM;
+
+	mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
+				 DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		dev_kfree_skb_any(skb);
+		return -ENOMEM;
+	}
+
+	rx_buf->skb = skb;
+	dma_unmap_addr_set(rx_buf, mapping, mapping);
+
+	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+
+	return 0;
+}
+
+/* note that we are not allocating a new skb,
+ * we are just moving one from cons to prod
+ * we are not creating a new mapping,
+ * so there is no need to check for dma_mapping_error().
+ */
+static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
+				      u16 cons, u16 prod)
+{
+	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
+	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
+	struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
+	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
+
+	dma_unmap_addr_set(prod_rx_buf, mapping,
+			   dma_unmap_addr(cons_rx_buf, mapping));
+	prod_rx_buf->skb = cons_rx_buf->skb;
+	*prod_bd = *cons_bd;
+}
+
+/************************* Init ******************************************/
+
+/**
+ * bnx2x_func_start - init function
+ *
+ * @bp:		driver handle
+ *
+ * Must be called before sending CLIENT_SETUP for the first client.
+ */
+static inline int bnx2x_func_start(struct bnx2x *bp)
+{
+	struct bnx2x_func_state_params func_params = {0};
+	struct bnx2x_func_start_params *start_params =
+		&func_params.params.start;
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_START;
+
+	/* Function parameters */
+	start_params->mf_mode = bp->mf_mode;
+	start_params->sd_vlan_tag = bp->mf_ov;
+	if (CHIP_IS_E1x(bp))
+		start_params->network_cos_mode = OVERRIDE_COS;
+	else
+		start_params->network_cos_mode = STATIC_COS;
+
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+
+/**
+ * bnx2x_set_fw_mac_addr - fill in a MAC address in FW format
+ *
+ * @fw_hi:	pointer to upper part
+ * @fw_mid:	pointer to middle part
+ * @fw_lo:	pointer to lower part
+ * @mac:	pointer to MAC address
+ */
+static inline void bnx2x_set_fw_mac_addr(u16 *fw_hi, u16 *fw_mid, u16 *fw_lo,
+					 u8 *mac)
+{
+	((u8 *)fw_hi)[0]  = mac[1];
+	((u8 *)fw_hi)[1]  = mac[0];
+	((u8 *)fw_mid)[0] = mac[3];
+	((u8 *)fw_mid)[1] = mac[2];
+	((u8 *)fw_lo)[0]  = mac[5];
+	((u8 *)fw_lo)[1]  = mac[4];
+}
+
+static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
+					   struct bnx2x_fastpath *fp, int last)
+{
+	int i;
+
+	if (fp->disable_tpa)
+		return;
+
+	for (i = 0; i < last; i++)
+		bnx2x_free_rx_sge(bp, fp, i);
+}
+
+static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
+				       struct bnx2x_fastpath *fp, int last)
+{
+	int i;
+
+	for (i = 0; i < last; i++) {
+		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
+		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
+		struct sk_buff *skb = first_buf->skb;
+
+		if (skb == NULL) {
+			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
+			continue;
+		}
+		if (tpa_info->tpa_state == BNX2X_TPA_START)
+			dma_unmap_single(&bp->pdev->dev,
+					 dma_unmap_addr(first_buf, mapping),
+					 fp->rx_buf_size, DMA_FROM_DEVICE);
+		dev_kfree_skb(skb);
+		first_buf->skb = NULL;
+	}
+}
+
+static inline void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
+{
+	int i;
+
+	for (i = 1; i <= NUM_TX_RINGS; i++) {
+		struct eth_tx_next_bd *tx_next_bd =
+			&txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
+
+		tx_next_bd->addr_hi =
+			cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
+				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+		tx_next_bd->addr_lo =
+			cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
+				    BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
+	}
+
+	SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
+	txdata->tx_db.data.zero_fill1 = 0;
+	txdata->tx_db.data.prod = 0;
+
+	txdata->tx_pkt_prod = 0;
+	txdata->tx_pkt_cons = 0;
+	txdata->tx_bd_prod = 0;
+	txdata->tx_bd_cons = 0;
+	txdata->tx_pkt = 0;
+}
+
+static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
+{
+	int i;
+	u8 cos;
+
+	for_each_tx_queue(bp, i)
+		for_each_cos_in_tx_queue(&bp->fp[i], cos)
+			bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]);
+}
+
+static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
+{
+	int i;
+
+	for (i = 1; i <= NUM_RX_RINGS; i++) {
+		struct eth_rx_bd *rx_bd;
+
+		rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
+		rx_bd->addr_hi =
+			cpu_to_le32(U64_HI(fp->rx_desc_mapping +
+				    BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+		rx_bd->addr_lo =
+			cpu_to_le32(U64_LO(fp->rx_desc_mapping +
+				    BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
+	}
+}
+
+static inline void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
+{
+	int i;
+
+	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
+		struct eth_rx_sge *sge;
+
+		sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
+		sge->addr_hi =
+			cpu_to_le32(U64_HI(fp->rx_sge_mapping +
+			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+
+		sge->addr_lo =
+			cpu_to_le32(U64_LO(fp->rx_sge_mapping +
+			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
+	}
+}
+
+static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
+{
+	int i;
+	for (i = 1; i <= NUM_RCQ_RINGS; i++) {
+		struct eth_rx_cqe_next_page *nextpg;
+
+		nextpg = (struct eth_rx_cqe_next_page *)
+			&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
+		nextpg->addr_hi =
+			cpu_to_le32(U64_HI(fp->rx_comp_mapping +
+				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+		nextpg->addr_lo =
+			cpu_to_le32(U64_LO(fp->rx_comp_mapping +
+				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
+	}
+}
+
+/* Returns the number of actually allocated BDs */
+static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
+				      int rx_ring_size)
+{
+	struct bnx2x *bp = fp->bp;
+	u16 ring_prod, cqe_ring_prod;
+	int i;
+
+	fp->rx_comp_cons = 0;
+	cqe_ring_prod = ring_prod = 0;
+
+	/* This routine is called only during fo init so
+	 * fp->eth_q_stats.rx_skb_alloc_failed = 0
+	 */
+	for (i = 0; i < rx_ring_size; i++) {
+		if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
+			fp->eth_q_stats.rx_skb_alloc_failed++;
+			continue;
+		}
+		ring_prod = NEXT_RX_IDX(ring_prod);
+		cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
+		WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed));
+	}
+
+	if (fp->eth_q_stats.rx_skb_alloc_failed)
+		BNX2X_ERR("was only able to allocate "
+			  "%d rx skbs on queue[%d]\n",
+			  (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index);
+
+	fp->rx_bd_prod = ring_prod;
+	/* Limit the CQE producer by the CQE ring size */
+	fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
+			       cqe_ring_prod);
+	fp->rx_pkt = fp->rx_calls = 0;
+
+	return i - fp->eth_q_stats.rx_skb_alloc_failed;
+}
+
+/* Statistics ID are global per chip/path, while Client IDs for E1x are per
+ * port.
+ */
+static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
+{
+	if (!CHIP_IS_E1x(fp->bp))
+		return fp->cl_id;
+	else
+		return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x;
+}
+
+static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
+					       bnx2x_obj_type obj_type)
+{
+	struct bnx2x *bp = fp->bp;
+
+	/* Configure classification DBs */
+	bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid,
+			   BP_FUNC(bp), bnx2x_sp(bp, mac_rdata),
+			   bnx2x_sp_mapping(bp, mac_rdata),
+			   BNX2X_FILTER_MAC_PENDING,
+			   &bp->sp_state, obj_type,
+			   &bp->macs_pool);
+}
+
+/**
+ * bnx2x_get_path_func_num - get number of active functions
+ *
+ * @bp:		driver handle
+ *
+ * Calculates the number of active (not hidden) functions on the
+ * current path.
+ */
+static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
+{
+	u8 func_num = 0, i;
+
+	/* 57710 has only one function per-port */
+	if (CHIP_IS_E1(bp))
+		return 1;
+
+	/* Calculate a number of functions enabled on the current
+	 * PATH/PORT.
+	 */
+	if (CHIP_REV_IS_SLOW(bp)) {
+		if (IS_MF(bp))
+			func_num = 4;
+		else
+			func_num = 2;
+	} else {
+		for (i = 0; i < E1H_FUNC_MAX / 2; i++) {
+			u32 func_config =
+				MF_CFG_RD(bp,
+					  func_mf_config[BP_PORT(bp) + 2 * i].
+					  config);
+			func_num +=
+				((func_config & FUNC_MF_CFG_FUNC_HIDE) ? 0 : 1);
+		}
+	}
+
+	WARN_ON(!func_num);
+
+	return func_num;
+}
+
+static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
+{
+	/* RX_MODE controlling object */
+	bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
+
+	/* multicast configuration controlling object */
+	bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
+			     BP_FUNC(bp), BP_FUNC(bp),
+			     bnx2x_sp(bp, mcast_rdata),
+			     bnx2x_sp_mapping(bp, mcast_rdata),
+			     BNX2X_FILTER_MCAST_PENDING, &bp->sp_state,
+			     BNX2X_OBJ_TYPE_RX);
+
+	/* Setup CAM credit pools */
+	bnx2x_init_mac_credit_pool(bp, &bp->macs_pool, BP_FUNC(bp),
+				   bnx2x_get_path_func_num(bp));
+
+	/* RSS configuration object */
+	bnx2x_init_rss_config_obj(bp, &bp->rss_conf_obj, bp->fp->cl_id,
+				  bp->fp->cid, BP_FUNC(bp), BP_FUNC(bp),
+				  bnx2x_sp(bp, rss_rdata),
+				  bnx2x_sp_mapping(bp, rss_rdata),
+				  BNX2X_FILTER_RSS_CONF_PENDING, &bp->sp_state,
+				  BNX2X_OBJ_TYPE_RX);
+}
+
+static inline u8 bnx2x_fp_qzone_id(struct bnx2x_fastpath *fp)
+{
+	if (CHIP_IS_E1x(fp->bp))
+		return fp->cl_id + BP_PORT(fp->bp) * ETH_MAX_RX_CLIENTS_E1H;
+	else
+		return fp->cl_id;
+}
+
+static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
+{
+	struct bnx2x *bp = fp->bp;
+
+	if (!CHIP_IS_E1x(bp))
+		return USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
+	else
+		return USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
+}
+
+static inline void bnx2x_init_txdata(struct bnx2x *bp,
+	struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index,
+	__le16 *tx_cons_sb)
+{
+	txdata->cid = cid;
+	txdata->txq_index = txq_index;
+	txdata->tx_cons_sb = tx_cons_sb;
+
+	DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d",
+	   txdata->cid, txdata->txq_index);
+}
+
+#ifdef BCM_CNIC
+static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx)
+{
+	return bp->cnic_base_cl_id + cl_idx +
+		(bp->pf_num >> 1) * NON_ETH_CONTEXT_USE;
+}
+
+static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp)
+{
+
+	/* the 'first' id is allocated for the cnic */
+	return bp->base_fw_ndsb;
+}
+
+static inline u8 bnx2x_cnic_igu_sb_id(struct bnx2x *bp)
+{
+	return bp->igu_base_sb;
+}
+
+
+static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
+{
+	struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+	unsigned long q_type = 0;
+
+	bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
+						     BNX2X_FCOE_ETH_CL_ID_IDX);
+	/** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
+	 *  16 ETH clients per function when CNIC is enabled!
+	 *
+	 *  Fix it ASAP!!!
+	 */
+	bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
+	bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
+	bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
+	bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
+
+	bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
+			  fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
+
+	DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)", fp->index);
+
+	/* qZone id equals to FW (per path) client id */
+	bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
+	/* init shortcut */
+	bnx2x_fcoe(bp, ustorm_rx_prods_offset) =
+		bnx2x_rx_ustorm_prods_offset(fp);
+
+	/* Configure Queue State object */
+	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+	/* No multi-CoS for FCoE L2 client */
+	BUG_ON(fp->max_cos != 1);
+
+	bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1,
+			     BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+			     bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+	DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d "
+			   "igu_sb %d\n",
+	   fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+	   fp->igu_sb_id);
+}
+#endif
+
+static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
+				       struct bnx2x_fp_txdata *txdata)
+{
+	int cnt = 1000;
+
+	while (bnx2x_has_tx_work_unload(txdata)) {
+		if (!cnt) {
+			BNX2X_ERR("timeout waiting for queue[%d]: "
+				 "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
+				  txdata->txq_index, txdata->tx_pkt_prod,
+				  txdata->tx_pkt_cons);
+#ifdef BNX2X_STOP_ON_ERROR
+			bnx2x_panic();
+			return -EBUSY;
+#else
+			break;
+#endif
+		}
+		cnt--;
+		usleep_range(1000, 1000);
+	}
+
+	return 0;
+}
+
+int bnx2x_get_link_cfg_idx(struct bnx2x *bp);
+
+static inline void __storm_memset_struct(struct bnx2x *bp,
+					 u32 addr, size_t size, u32 *data)
+{
+	int i;
+	for (i = 0; i < size/4; i++)
+		REG_WR(bp, addr + (i * 4), data[i]);
+}
+
+static inline void storm_memset_func_cfg(struct bnx2x *bp,
+				struct tstorm_eth_function_common_config *tcfg,
+				u16 abs_fid)
+{
+	size_t size = sizeof(struct tstorm_eth_function_common_config);
+
+	u32 addr = BAR_TSTRORM_INTMEM +
+			TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
+
+	__storm_memset_struct(bp, addr, size, (u32 *)tcfg);
+}
+
+static inline void storm_memset_cmng(struct bnx2x *bp,
+				struct cmng_struct_per_port *cmng,
+				u8 port)
+{
+	size_t size = sizeof(struct cmng_struct_per_port);
+
+	u32 addr = BAR_XSTRORM_INTMEM +
+			XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
+
+	__storm_memset_struct(bp, addr, size, (u32 *)cmng);
+}
+
+/**
+ * bnx2x_wait_sp_comp - wait for the outstanding SP commands.
+ *
+ * @bp:		driver handle
+ * @mask:	bits that need to be cleared
+ */
+static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
+{
+	int tout = 5000; /* Wait for 5 secs tops */
+
+	while (tout--) {
+		smp_mb();
+		netif_addr_lock_bh(bp->dev);
+		if (!(bp->sp_state & mask)) {
+			netif_addr_unlock_bh(bp->dev);
+			return true;
+		}
+		netif_addr_unlock_bh(bp->dev);
+
+		usleep_range(1000, 1000);
+	}
+
+	smp_mb();
+
+	netif_addr_lock_bh(bp->dev);
+	if (bp->sp_state & mask) {
+		BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, "
+			  "mask 0x%lx\n", bp->sp_state, mask);
+		netif_addr_unlock_bh(bp->dev);
+		return false;
+	}
+	netif_addr_unlock_bh(bp->dev);
+
+	return true;
+}
+
+/**
+ * bnx2x_set_ctx_validation - set CDU context validation values
+ *
+ * @bp:		driver handle
+ * @cxt:	context of the connection on the host memory
+ * @cid:	SW CID of the connection to be configured
+ */
+void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
+			      u32 cid);
+
+void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
+				    u8 sb_index, u8 disable, u16 usec);
+void bnx2x_acquire_phy_lock(struct bnx2x *bp);
+void bnx2x_release_phy_lock(struct bnx2x *bp);
+
+/**
+ * bnx2x_extract_max_cfg - extract MAX BW part from MF configuration.
+ *
+ * @bp:		driver handle
+ * @mf_cfg:	MF configuration
+ *
+ */
+static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
+{
+	u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
+			      FUNC_MF_CFG_MAX_BW_SHIFT;
+	if (!max_cfg) {
+		BNX2X_ERR("Illegal configuration detected for Max BW - "
+			  "using 100 instead\n");
+		max_cfg = 100;
+	}
+	return max_cfg;
+}
+
+#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
new file mode 100644
index 0000000..a4ea35f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -0,0 +1,2507 @@
+/* bnx2x_dcb.c: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Dmitry Kravkov
+ *
+ */
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/rtnetlink.h>
+#include <net/dcbnl.h>
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dcb.h"
+
+/* forward declarations of dcbx related functions */
+static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
+static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
+static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
+static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
+					  u32 *set_configuration_ets_pg,
+					  u32 *pri_pg_tbl);
+static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
+					    u32 *pg_pri_orginal_spread,
+					    struct pg_help_data *help_data);
+static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+				       struct pg_help_data *help_data,
+				       struct dcbx_ets_feature *ets,
+				       u32 *pg_pri_orginal_spread);
+static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
+				struct cos_help_data *cos_data,
+				u32 *pg_pri_orginal_spread,
+				struct dcbx_ets_feature *ets);
+static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
+				 struct bnx2x_func_tx_start_params*);
+
+/* helpers: read/write len bytes from addr into buff by REG_RD/REG_WR */
+static void bnx2x_read_data(struct bnx2x *bp, u32 *buff,
+				   u32 addr, u32 len)
+{
+	int i;
+	for (i = 0; i < len; i += 4, buff++)
+		*buff = REG_RD(bp, addr + i);
+}
+
+static void bnx2x_write_data(struct bnx2x *bp, u32 *buff,
+				    u32 addr, u32 len)
+{
+	int i;
+	for (i = 0; i < len; i += 4, buff++)
+		REG_WR(bp, addr + i, *buff);
+}
+
+static void bnx2x_pfc_set(struct bnx2x *bp)
+{
+	struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
+	u32 pri_bit, val = 0;
+	int i;
+
+	pfc_params.num_of_rx_cos_priority_mask =
+					bp->dcbx_port_params.ets.num_of_cos;
+
+	/* Tx COS configuration */
+	for (i = 0; i < bp->dcbx_port_params.ets.num_of_cos; i++)
+		/*
+		 * We configure only the pauseable bits (non pauseable aren't
+		 * configured at all) it's done to avoid false pauses from
+		 * network
+		 */
+		pfc_params.rx_cos_priority_mask[i] =
+			bp->dcbx_port_params.ets.cos_params[i].pri_bitmask
+				& DCBX_PFC_PRI_PAUSE_MASK(bp);
+
+	/*
+	 * Rx COS configuration
+	 * Changing PFC RX configuration .
+	 * In RX COS0 will always be configured to lossy and COS1 to lossless
+	 */
+	for (i = 0 ; i < MAX_PFC_PRIORITIES ; i++) {
+		pri_bit = 1 << i;
+
+		if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
+			val |= 1 << (i * 4);
+	}
+
+	pfc_params.pkt_priority_to_cos = val;
+
+	/* RX COS0 */
+	pfc_params.llfc_low_priority_classes = 0;
+	/* RX COS1 */
+	pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
+
+	/* BRB configuration */
+	pfc_params.cos0_pauseable = false;
+	pfc_params.cos1_pauseable = true;
+
+	bnx2x_acquire_phy_lock(bp);
+	bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
+	bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params);
+	bnx2x_release_phy_lock(bp);
+}
+
+static void bnx2x_pfc_clear(struct bnx2x *bp)
+{
+	struct bnx2x_nig_brb_pfc_port_params nig_params = {0};
+	nig_params.pause_enable = 1;
+#ifdef BNX2X_SAFC
+	if (bp->flags & SAFC_TX_FLAG) {
+		u32 high = 0, low = 0;
+		int i;
+
+		for (i = 0; i < BNX2X_MAX_PRIORITY; i++) {
+			if (bp->pri_map[i] == 1)
+				high |= (1 << i);
+			if (bp->pri_map[i] == 0)
+				low |= (1 << i);
+		}
+
+		nig_params.llfc_low_priority_classes = high;
+		nig_params.llfc_low_priority_classes = low;
+
+		nig_params.pause_enable = 0;
+		nig_params.llfc_enable = 1;
+		nig_params.llfc_out_en = 1;
+	}
+#endif /* BNX2X_SAFC */
+	bnx2x_acquire_phy_lock(bp);
+	bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED;
+	bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params);
+	bnx2x_release_phy_lock(bp);
+}
+
+static void  bnx2x_dump_dcbx_drv_param(struct bnx2x *bp,
+				       struct dcbx_features *features,
+				       u32 error)
+{
+	u8 i = 0;
+	DP(NETIF_MSG_LINK, "local_mib.error %x\n", error);
+
+	/* PG */
+	DP(NETIF_MSG_LINK,
+	   "local_mib.features.ets.enabled %x\n", features->ets.enabled);
+	for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++)
+		DP(NETIF_MSG_LINK,
+		   "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i,
+		   DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i));
+	for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++)
+		DP(NETIF_MSG_LINK,
+		   "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i,
+		   DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i));
+
+	/* pfc */
+	DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n",
+					features->pfc.pri_en_bitmap);
+	DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n",
+					features->pfc.pfc_caps);
+	DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n",
+					features->pfc.enabled);
+
+	DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n",
+					features->app.default_pri);
+	DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n",
+					features->app.tc_supported);
+	DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n",
+					features->app.enabled);
+	for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+		DP(NETIF_MSG_LINK,
+		   "dcbx_features.app.app_pri_tbl[%x].app_id %x\n",
+		   i, features->app.app_pri_tbl[i].app_id);
+		DP(NETIF_MSG_LINK,
+		   "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n",
+		   i, features->app.app_pri_tbl[i].pri_bitmap);
+		DP(NETIF_MSG_LINK,
+		   "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n",
+		   i, features->app.app_pri_tbl[i].appBitfield);
+	}
+}
+
+static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp,
+				       u8 pri_bitmap,
+				       u8 llfc_traf_type)
+{
+	u32 pri = MAX_PFC_PRIORITIES;
+	u32 index = MAX_PFC_PRIORITIES - 1;
+	u32 pri_mask;
+	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+	/* Choose the highest priority */
+	while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) {
+		pri_mask = 1 << index;
+		if (GET_FLAGS(pri_bitmap, pri_mask))
+			pri = index ;
+		index--;
+	}
+
+	if (pri < MAX_PFC_PRIORITIES)
+		ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri);
+}
+
+static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
+				   struct dcbx_app_priority_feature *app,
+				   u32 error) {
+	u8 index;
+	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+	if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
+		DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n");
+
+	if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH))
+		DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_MISMATCH\n");
+
+	if (app->enabled &&
+	    !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) {
+
+		bp->dcbx_port_params.app.enabled = true;
+
+		for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
+			ttp[index] = 0;
+
+		if (app->default_pri < MAX_PFC_PRIORITIES)
+			ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri;
+
+		for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) {
+			struct dcbx_app_priority_entry *entry =
+							app->app_pri_tbl;
+
+			if (GET_FLAGS(entry[index].appBitfield,
+				     DCBX_APP_SF_ETH_TYPE) &&
+			   ETH_TYPE_FCOE == entry[index].app_id)
+				bnx2x_dcbx_get_ap_priority(bp,
+						entry[index].pri_bitmap,
+						LLFC_TRAFFIC_TYPE_FCOE);
+
+			if (GET_FLAGS(entry[index].appBitfield,
+				     DCBX_APP_SF_PORT) &&
+			   TCP_PORT_ISCSI == entry[index].app_id)
+				bnx2x_dcbx_get_ap_priority(bp,
+						entry[index].pri_bitmap,
+						LLFC_TRAFFIC_TYPE_ISCSI);
+		}
+	} else {
+		DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n");
+		bp->dcbx_port_params.app.enabled = false;
+		for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
+			ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY;
+	}
+}
+
+static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
+				       struct dcbx_ets_feature *ets,
+				       u32 error) {
+	int i = 0;
+	u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0};
+	struct pg_help_data pg_help_data;
+	struct bnx2x_dcbx_cos_params *cos_params =
+			bp->dcbx_port_params.ets.cos_params;
+
+	memset(&pg_help_data, 0, sizeof(struct pg_help_data));
+
+
+	if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
+		DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n");
+
+
+	/* Clean up old settings of ets on COS */
+	for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) {
+		cos_params[i].pauseable = false;
+		cos_params[i].strict = BNX2X_DCBX_STRICT_INVALID;
+		cos_params[i].bw_tbl = DCBX_INVALID_COS_BW;
+		cos_params[i].pri_bitmask = 0;
+	}
+
+	if (bp->dcbx_port_params.app.enabled &&
+	   !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) &&
+	   ets->enabled) {
+		DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n");
+		bp->dcbx_port_params.ets.enabled = true;
+
+		bnx2x_dcbx_get_ets_pri_pg_tbl(bp,
+					      pg_pri_orginal_spread,
+					      ets->pri_pg_tbl);
+
+		bnx2x_dcbx_get_num_pg_traf_type(bp,
+						pg_pri_orginal_spread,
+						&pg_help_data);
+
+		bnx2x_dcbx_fill_cos_params(bp, &pg_help_data,
+					   ets, pg_pri_orginal_spread);
+
+	} else {
+		DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n");
+		bp->dcbx_port_params.ets.enabled = false;
+		ets->pri_pg_tbl[0] = 0;
+
+		for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++)
+			DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1);
+	}
+}
+
+static void  bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
+					struct dcbx_pfc_feature *pfc, u32 error)
+{
+
+	if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
+		DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n");
+
+	if (bp->dcbx_port_params.app.enabled &&
+	   !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH) &&
+	   pfc->enabled) {
+		bp->dcbx_port_params.pfc.enabled = true;
+		bp->dcbx_port_params.pfc.priority_non_pauseable_mask =
+			~(pfc->pri_en_bitmap);
+	} else {
+		DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n");
+		bp->dcbx_port_params.pfc.enabled = false;
+		bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0;
+	}
+}
+
+/* maps unmapped priorities to to the same COS as L2 */
+static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
+{
+	int i;
+	u32 unmapped = (1 << MAX_PFC_PRIORITIES) - 1; /* all ones */
+	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+	u32 nw_prio = 1 << ttp[LLFC_TRAFFIC_TYPE_NW];
+	struct bnx2x_dcbx_cos_params *cos_params =
+			bp->dcbx_port_params.ets.cos_params;
+
+	/* get unmapped priorities by clearing mapped bits */
+	for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
+		unmapped &= ~(1 << ttp[i]);
+
+	/* find cos for nw prio and extend it with unmapped */
+	for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) {
+		if (cos_params[i].pri_bitmask & nw_prio) {
+			/* extend the bitmask with unmapped */
+			DP(NETIF_MSG_LINK,
+			   "cos %d extended with 0x%08x", i, unmapped);
+			cos_params[i].pri_bitmask |= unmapped;
+			break;
+		}
+	}
+}
+
+static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
+				     struct dcbx_features *features,
+				     u32 error)
+{
+	bnx2x_dcbx_get_ap_feature(bp, &features->app, error);
+
+	bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error);
+
+	bnx2x_dcbx_get_ets_feature(bp, &features->ets, error);
+
+	bnx2x_dcbx_map_nw(bp);
+}
+
+#define DCBX_LOCAL_MIB_MAX_TRY_READ		(100)
+static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
+			       u32 *base_mib_addr,
+			       u32 offset,
+			       int read_mib_type)
+{
+	int max_try_read = 0;
+	u32 mib_size, prefix_seq_num, suffix_seq_num;
+	struct lldp_remote_mib *remote_mib ;
+	struct lldp_local_mib  *local_mib;
+
+
+	switch (read_mib_type) {
+	case DCBX_READ_LOCAL_MIB:
+		mib_size = sizeof(struct lldp_local_mib);
+		break;
+	case DCBX_READ_REMOTE_MIB:
+		mib_size = sizeof(struct lldp_remote_mib);
+		break;
+	default:
+		return 1; /*error*/
+	}
+
+	offset += BP_PORT(bp) * mib_size;
+
+	do {
+		bnx2x_read_data(bp, base_mib_addr, offset, mib_size);
+
+		max_try_read++;
+
+		switch (read_mib_type) {
+		case DCBX_READ_LOCAL_MIB:
+			local_mib = (struct lldp_local_mib *) base_mib_addr;
+			prefix_seq_num = local_mib->prefix_seq_num;
+			suffix_seq_num = local_mib->suffix_seq_num;
+			break;
+		case DCBX_READ_REMOTE_MIB:
+			remote_mib = (struct lldp_remote_mib *) base_mib_addr;
+			prefix_seq_num = remote_mib->prefix_seq_num;
+			suffix_seq_num = remote_mib->suffix_seq_num;
+			break;
+		default:
+			return 1; /*error*/
+		}
+	} while ((prefix_seq_num != suffix_seq_num) &&
+	       (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ));
+
+	if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) {
+		BNX2X_ERR("MIB could not be read\n");
+		return 1;
+	}
+
+	return 0;
+}
+
+static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
+{
+	if (bp->dcbx_port_params.pfc.enabled &&
+	    !(bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
+		/*
+		 * 1. Fills up common PFC structures if required
+		 * 2. Configure NIG, MAC and BRB via the elink
+		 */
+		bnx2x_pfc_set(bp);
+	else
+		bnx2x_pfc_clear(bp);
+}
+
+static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
+{
+	struct bnx2x_func_state_params func_params = {0};
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_TX_STOP;
+
+	DP(NETIF_MSG_LINK, "STOP TRAFFIC\n");
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
+{
+	struct bnx2x_func_state_params func_params = {0};
+	struct bnx2x_func_tx_start_params *tx_params =
+		&func_params.params.tx_start;
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_TX_START;
+
+	bnx2x_dcbx_fw_struct(bp, tx_params);
+
+	DP(NETIF_MSG_LINK, "START TRAFFIC\n");
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
+{
+	struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
+	int rc = 0;
+
+	if (ets->num_of_cos == 0 || ets->num_of_cos > DCBX_COS_MAX_NUM_E2) {
+		BNX2X_ERR("Illegal number of COSes %d\n", ets->num_of_cos);
+		return;
+	}
+
+	/* valid COS entries */
+	if (ets->num_of_cos == 1)   /* no ETS */
+		return;
+
+	/* sanity */
+	if (((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[0].strict) &&
+	     (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) ||
+	    ((BNX2X_DCBX_STRICT_INVALID == ets->cos_params[1].strict) &&
+	     (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) {
+		BNX2X_ERR("all COS should have at least bw_limit or strict"
+			    "ets->cos_params[0].strict= %x"
+			    "ets->cos_params[0].bw_tbl= %x"
+			    "ets->cos_params[1].strict= %x"
+			    "ets->cos_params[1].bw_tbl= %x",
+			  ets->cos_params[0].strict,
+			  ets->cos_params[0].bw_tbl,
+			  ets->cos_params[1].strict,
+			  ets->cos_params[1].bw_tbl);
+		return;
+	}
+	/* If we join a group and there is bw_tbl and strict then bw rules */
+	if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) &&
+	    (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) {
+		u32 bw_tbl_0 = ets->cos_params[0].bw_tbl;
+		u32 bw_tbl_1 = ets->cos_params[1].bw_tbl;
+		/* Do not allow 0-100 configuration
+		 * since PBF does not support it
+		 * force 1-99 instead
+		 */
+		if (bw_tbl_0 == 0) {
+			bw_tbl_0 = 1;
+			bw_tbl_1 = 99;
+		} else if (bw_tbl_1 == 0) {
+			bw_tbl_1 = 1;
+			bw_tbl_0 = 99;
+		}
+
+		bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1);
+	} else {
+		if (ets->cos_params[0].strict == BNX2X_DCBX_STRICT_COS_HIGHEST)
+			rc = bnx2x_ets_strict(&bp->link_params, 0);
+		else if (ets->cos_params[1].strict
+					== BNX2X_DCBX_STRICT_COS_HIGHEST)
+			rc = bnx2x_ets_strict(&bp->link_params, 1);
+		if (rc)
+			BNX2X_ERR("update_ets_params failed\n");
+	}
+}
+
+/*
+ * In E3B0 the configuration may have more than 2 COS.
+ */
+void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
+{
+	struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
+	struct bnx2x_ets_params ets_params = { 0 };
+	u8 i;
+
+	ets_params.num_of_cos = ets->num_of_cos;
+
+	for (i = 0; i < ets->num_of_cos; i++) {
+		/* COS is SP */
+		if (ets->cos_params[i].strict != BNX2X_DCBX_STRICT_INVALID) {
+			if (ets->cos_params[i].bw_tbl != DCBX_INVALID_COS_BW) {
+				BNX2X_ERR("COS can't be not BW and not SP\n");
+				return;
+			}
+
+			ets_params.cos[i].state = bnx2x_cos_state_strict;
+			ets_params.cos[i].params.sp_params.pri =
+						ets->cos_params[i].strict;
+		} else { /* COS is BW */
+			if (ets->cos_params[i].bw_tbl == DCBX_INVALID_COS_BW) {
+				BNX2X_ERR("COS can't be not BW and not SP\n");
+				return;
+			}
+			ets_params.cos[i].state = bnx2x_cos_state_bw;
+			ets_params.cos[i].params.bw_params.bw =
+						(u8)ets->cos_params[i].bw_tbl;
+		}
+	}
+
+	/* Configure the ETS in HW */
+	if (bnx2x_ets_e3b0_config(&bp->link_params, &bp->link_vars,
+				  &ets_params)) {
+		BNX2X_ERR("bnx2x_ets_e3b0_config failed\n");
+		bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
+	}
+}
+
+static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
+{
+	bnx2x_ets_disabled(&bp->link_params, &bp->link_vars);
+
+	if (!bp->dcbx_port_params.ets.enabled ||
+	    (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR))
+		return;
+
+	if (CHIP_IS_E3B0(bp))
+		bnx2x_dcbx_update_ets_config(bp);
+	else
+		bnx2x_dcbx_2cos_limit_update_ets_config(bp);
+}
+
+#ifdef BCM_DCBNL
+static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp)
+{
+	struct lldp_remote_mib remote_mib = {0};
+	u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset);
+	int rc;
+
+	DP(NETIF_MSG_LINK, "dcbx_remote_mib_offset 0x%x\n",
+	   dcbx_remote_mib_offset);
+
+	if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) {
+		BNX2X_ERR("FW doesn't support dcbx_remote_mib_offset\n");
+		return -EINVAL;
+	}
+
+	rc = bnx2x_dcbx_read_mib(bp, (u32 *)&remote_mib, dcbx_remote_mib_offset,
+				 DCBX_READ_REMOTE_MIB);
+
+	if (rc) {
+		BNX2X_ERR("Faild to read remote mib from FW\n");
+		return rc;
+	}
+
+	/* save features and flags */
+	bp->dcbx_remote_feat = remote_mib.features;
+	bp->dcbx_remote_flags = remote_mib.flags;
+	return 0;
+}
+#endif
+
+static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
+{
+	struct lldp_local_mib local_mib = {0};
+	u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset);
+	int rc;
+
+	DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
+
+	if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) {
+		BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
+		return -EINVAL;
+	}
+
+	rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
+				 DCBX_READ_LOCAL_MIB);
+
+	if (rc) {
+		BNX2X_ERR("Faild to read local mib from FW\n");
+		return rc;
+	}
+
+	/* save features and error */
+	bp->dcbx_local_feat = local_mib.features;
+	bp->dcbx_error = local_mib.error;
+	return 0;
+}
+
+
+#ifdef BCM_DCBNL
+static inline
+u8 bnx2x_dcbx_dcbnl_app_up(struct dcbx_app_priority_entry *ent)
+{
+	u8 pri;
+
+	/* Choose the highest priority */
+	for (pri = MAX_PFC_PRIORITIES - 1; pri > 0; pri--)
+		if (ent->pri_bitmap & (1 << pri))
+			break;
+	return pri;
+}
+
+static inline
+u8 bnx2x_dcbx_dcbnl_app_idtype(struct dcbx_app_priority_entry *ent)
+{
+	return ((ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) ==
+		DCBX_APP_SF_PORT) ? DCB_APP_IDTYPE_PORTNUM :
+		DCB_APP_IDTYPE_ETHTYPE;
+}
+
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
+{
+	int i, err = 0;
+
+	for (i = 0; i < DCBX_MAX_APP_PROTOCOL && err == 0; i++) {
+		struct dcbx_app_priority_entry *ent =
+			&bp->dcbx_local_feat.app.app_pri_tbl[i];
+
+		if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
+			u8 up = bnx2x_dcbx_dcbnl_app_up(ent);
+
+			/* avoid invalid user-priority */
+			if (up) {
+				struct dcb_app app;
+				app.selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
+				app.protocol = ent->app_id;
+				app.priority = delall ? 0 : up;
+				err = dcb_setapp(bp->dev, &app);
+			}
+		}
+	}
+	return err;
+}
+#endif
+
+static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
+{
+	if (SHMEM2_HAS(bp, drv_flags)) {
+		u32 drv_flags;
+		bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+		drv_flags = SHMEM2_RD(bp, drv_flags);
+
+		if (set)
+			SET_FLAGS(drv_flags, flags);
+		else
+			RESET_FLAGS(drv_flags, flags);
+
+		SHMEM2_WR(bp, drv_flags, drv_flags);
+		DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
+		bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+	}
+}
+
+static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
+{
+	u8 prio, cos;
+	for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++) {
+		for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
+			if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask
+			    & (1 << prio)) {
+				bp->prio_to_cos[prio] = cos;
+				DP(NETIF_MSG_LINK,
+				   "tx_mapping %d --> %d\n", prio, cos);
+			}
+		}
+	}
+
+	/* setup tc must be called under rtnl lock, but we can't take it here
+	 * as we are handling an attetntion on a work queue which must be
+	 * flushed at some rtnl-locked contexts (e.g. if down)
+	 */
+	if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
+		schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
+
+void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
+{
+	switch (state) {
+	case BNX2X_DCBX_STATE_NEG_RECEIVED:
+		{
+			DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
+#ifdef BCM_DCBNL
+			/**
+			 * Delete app tlvs from dcbnl before reading new
+			 * negotiation results
+			 */
+			bnx2x_dcbnl_update_applist(bp, true);
+
+			/* Read rmeote mib if dcbx is in the FW */
+			if (bnx2x_dcbx_read_shmem_remote_mib(bp))
+				return;
+#endif
+			/* Read neg results if dcbx is in the FW */
+			if (bnx2x_dcbx_read_shmem_neg_results(bp))
+				return;
+
+			bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+						  bp->dcbx_error);
+
+			bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+						 bp->dcbx_error);
+
+			/* mark DCBX result for PMF migration */
+			bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
+#ifdef BCM_DCBNL
+			/**
+			 * Add new app tlvs to dcbnl
+			 */
+			bnx2x_dcbnl_update_applist(bp, false);
+#endif
+			bnx2x_dcbx_stop_hw_tx(bp);
+
+			/* reconfigure the netdevice with the results of the new
+			 * dcbx negotiation.
+			 */
+			bnx2x_dcbx_update_tc_mapping(bp);
+
+			return;
+		}
+	case BNX2X_DCBX_STATE_TX_PAUSED:
+		DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
+		bnx2x_pfc_set_pfc(bp);
+
+		bnx2x_dcbx_update_ets_params(bp);
+		bnx2x_dcbx_resume_hw_tx(bp);
+		return;
+	case BNX2X_DCBX_STATE_TX_RELEASED:
+		DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
+		bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
+#ifdef BCM_DCBNL
+		/*
+		 * Send a notification for the new negotiated parameters
+		 */
+		dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+#endif
+		return;
+	default:
+		BNX2X_ERR("Unknown DCBX_STATE\n");
+	}
+}
+
+#define LLDP_ADMIN_MIB_OFFSET(bp)	(PORT_MAX*sizeof(struct lldp_params) + \
+				      BP_PORT(bp)*sizeof(struct lldp_admin_mib))
+
+static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
+				u32 dcbx_lldp_params_offset)
+{
+	struct lldp_admin_mib admin_mib;
+	u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0;
+	u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp);
+
+	/*shortcuts*/
+	struct dcbx_features *af = &admin_mib.features;
+	struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params;
+
+	memset(&admin_mib, 0, sizeof(struct lldp_admin_mib));
+
+	/* Read the data first */
+	bnx2x_read_data(bp, (u32 *)&admin_mib, offset,
+			sizeof(struct lldp_admin_mib));
+
+	if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON)
+		SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
+	else
+		RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
+
+	if (dp->overwrite_settings == BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE) {
+
+		RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK);
+		admin_mib.ver_cfg_flags |=
+			(dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) &
+			 DCBX_CEE_VERSION_MASK;
+
+		af->ets.enabled = (u8)dp->admin_ets_enable;
+
+		af->pfc.enabled = (u8)dp->admin_pfc_enable;
+
+		/* FOR IEEE dp->admin_tc_supported_tx_enable */
+		if (dp->admin_ets_configuration_tx_enable)
+			SET_FLAGS(admin_mib.ver_cfg_flags,
+				  DCBX_ETS_CONFIG_TX_ENABLED);
+		else
+			RESET_FLAGS(admin_mib.ver_cfg_flags,
+				    DCBX_ETS_CONFIG_TX_ENABLED);
+		/* For IEEE admin_ets_recommendation_tx_enable */
+		if (dp->admin_pfc_tx_enable)
+			SET_FLAGS(admin_mib.ver_cfg_flags,
+				  DCBX_PFC_CONFIG_TX_ENABLED);
+		else
+			RESET_FLAGS(admin_mib.ver_cfg_flags,
+				  DCBX_PFC_CONFIG_TX_ENABLED);
+
+		if (dp->admin_application_priority_tx_enable)
+			SET_FLAGS(admin_mib.ver_cfg_flags,
+				  DCBX_APP_CONFIG_TX_ENABLED);
+		else
+			RESET_FLAGS(admin_mib.ver_cfg_flags,
+				  DCBX_APP_CONFIG_TX_ENABLED);
+
+		if (dp->admin_ets_willing)
+			SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
+		else
+			RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
+		/* For IEEE admin_ets_reco_valid */
+		if (dp->admin_pfc_willing)
+			SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
+		else
+			RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
+
+		if (dp->admin_app_priority_willing)
+			SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
+		else
+			RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
+
+		for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) {
+			DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i,
+				(u8)dp->admin_configuration_bw_precentage[i]);
+
+			DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n",
+			   i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i));
+		}
+
+		for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
+			DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i,
+					(u8)dp->admin_configuration_ets_pg[i]);
+
+			DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n",
+			   i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
+		}
+
+		/*For IEEE admin_recommendation_bw_precentage
+		 *For IEEE admin_recommendation_ets_pg */
+		af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
+		for (i = 0; i < 4; i++) {
+			if (dp->admin_priority_app_table[i].valid) {
+				struct bnx2x_admin_priority_app_table *table =
+					dp->admin_priority_app_table;
+				if ((ETH_TYPE_FCOE == table[i].app_id) &&
+				   (TRAFFIC_TYPE_ETH == table[i].traffic_type))
+					traf_type = FCOE_APP_IDX;
+				else if ((TCP_PORT_ISCSI == table[i].app_id) &&
+				   (TRAFFIC_TYPE_PORT == table[i].traffic_type))
+					traf_type = ISCSI_APP_IDX;
+				else
+					traf_type = other_traf_type++;
+
+				af->app.app_pri_tbl[traf_type].app_id =
+					table[i].app_id;
+
+				af->app.app_pri_tbl[traf_type].pri_bitmap =
+					(u8)(1 << table[i].priority);
+
+				af->app.app_pri_tbl[traf_type].appBitfield =
+				    (DCBX_APP_ENTRY_VALID);
+
+				af->app.app_pri_tbl[traf_type].appBitfield |=
+				   (TRAFFIC_TYPE_ETH == table[i].traffic_type) ?
+					DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT;
+			}
+		}
+
+		af->app.default_pri = (u8)dp->admin_default_priority;
+
+	}
+
+	/* Write the data. */
+	bnx2x_write_data(bp, (u32 *)&admin_mib, offset,
+			 sizeof(struct lldp_admin_mib));
+
+}
+
+void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
+{
+	if (!CHIP_IS_E1x(bp)) {
+		bp->dcb_state = dcb_on;
+		bp->dcbx_enabled = dcbx_enabled;
+	} else {
+		bp->dcb_state = false;
+		bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
+	}
+	DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n",
+	   dcb_on ? "ON" : "OFF",
+	   dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" :
+	   dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" :
+	   dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ?
+	   "on-chip with negotiation" : "invalid");
+}
+
+void bnx2x_dcbx_init_params(struct bnx2x *bp)
+{
+	bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */
+	bp->dcbx_config_params.admin_ets_willing = 1;
+	bp->dcbx_config_params.admin_pfc_willing = 1;
+	bp->dcbx_config_params.overwrite_settings = 1;
+	bp->dcbx_config_params.admin_ets_enable = 1;
+	bp->dcbx_config_params.admin_pfc_enable = 1;
+	bp->dcbx_config_params.admin_tc_supported_tx_enable = 1;
+	bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+	bp->dcbx_config_params.admin_pfc_tx_enable = 1;
+	bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
+	bp->dcbx_config_params.admin_ets_reco_valid = 1;
+	bp->dcbx_config_params.admin_app_priority_willing = 1;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0;
+	bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0;
+	bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1;
+	bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0;
+	bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0;
+	bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2;
+	bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0;
+	bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0;
+	bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0;
+	bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6;
+	bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6;
+	bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7;
+	bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */
+	bp->dcbx_config_params.admin_priority_app_table[0].valid = 1;
+	bp->dcbx_config_params.admin_priority_app_table[1].valid = 1;
+	bp->dcbx_config_params.admin_priority_app_table[2].valid = 0;
+	bp->dcbx_config_params.admin_priority_app_table[3].valid = 0;
+	bp->dcbx_config_params.admin_priority_app_table[0].priority = 3;
+	bp->dcbx_config_params.admin_priority_app_table[1].priority = 0;
+	bp->dcbx_config_params.admin_priority_app_table[2].priority = 0;
+	bp->dcbx_config_params.admin_priority_app_table[3].priority = 0;
+	bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0;
+	bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1;
+	bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0;
+	bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0;
+	bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906;
+	bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260;
+	bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0;
+	bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0;
+	bp->dcbx_config_params.admin_default_priority =
+		bp->dcbx_config_params.admin_priority_app_table[1].priority;
+}
+
+void bnx2x_dcbx_init(struct bnx2x *bp)
+{
+	u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
+
+	if (bp->dcbx_enabled <= 0)
+		return;
+
+	/* validate:
+	 * chip of good for dcbx version,
+	 * dcb is wanted
+	 * the function is pmf
+	 * shmem2 contains DCBX support fields
+	 */
+	DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n",
+	   bp->dcb_state, bp->port.pmf);
+
+	if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
+	    SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
+		dcbx_lldp_params_offset =
+			SHMEM2_RD(bp, dcbx_lldp_params_offset);
+
+		DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
+		   dcbx_lldp_params_offset);
+
+		bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
+
+		if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
+			bnx2x_dcbx_admin_mib_updated_params(bp,
+				dcbx_lldp_params_offset);
+
+			/* Let HW start negotiation */
+			bnx2x_fw_command(bp,
+					 DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
+		}
+	}
+}
+static void
+bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
+			    struct bnx2x_func_tx_start_params *pfc_fw_cfg)
+{
+	u8 pri = 0;
+	u8 cos = 0;
+
+	DP(NETIF_MSG_LINK,
+	   "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version);
+	DP(NETIF_MSG_LINK,
+	   "pdev->params.dcbx_port_params.pfc."
+	   "priority_non_pauseable_mask %x\n",
+	   bp->dcbx_port_params.pfc.priority_non_pauseable_mask);
+
+	for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) {
+		DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+		   "cos_params[%d].pri_bitmask %x\n", cos,
+		   bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask);
+
+		DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+		   "cos_params[%d].bw_tbl %x\n", cos,
+		   bp->dcbx_port_params.ets.cos_params[cos].bw_tbl);
+
+		DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+		   "cos_params[%d].strict %x\n", cos,
+		   bp->dcbx_port_params.ets.cos_params[cos].strict);
+
+		DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
+		   "cos_params[%d].pauseable %x\n", cos,
+		   bp->dcbx_port_params.ets.cos_params[cos].pauseable);
+	}
+
+	for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
+		DP(NETIF_MSG_LINK,
+		   "pfc_fw_cfg->traffic_type_to_priority_cos[%d]."
+		   "priority %x\n", pri,
+		   pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority);
+
+		DP(NETIF_MSG_LINK,
+		   "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n",
+		   pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos);
+	}
+}
+
+/* fills help_data according to pg_info */
+static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
+					    u32 *pg_pri_orginal_spread,
+					    struct pg_help_data *help_data)
+{
+	bool pg_found  = false;
+	u32 i, traf_type, add_traf_type, add_pg;
+	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+	struct pg_entry_help_data *data = help_data->data; /*shotcut*/
+
+	/* Set to invalid */
+	for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
+		data[i].pg = DCBX_ILLEGAL_PG;
+
+	for (add_traf_type = 0;
+	     add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) {
+		pg_found = false;
+		if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) {
+			add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]];
+			for (traf_type = 0;
+			     traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+			     traf_type++) {
+				if (data[traf_type].pg == add_pg) {
+					if (!(data[traf_type].pg_priority &
+					     (1 << ttp[add_traf_type])))
+						data[traf_type].
+							num_of_dif_pri++;
+					data[traf_type].pg_priority |=
+						(1 << ttp[add_traf_type]);
+					pg_found = true;
+					break;
+				}
+			}
+			if (false == pg_found) {
+				data[help_data->num_of_pg].pg = add_pg;
+				data[help_data->num_of_pg].pg_priority =
+						(1 << ttp[add_traf_type]);
+				data[help_data->num_of_pg].num_of_dif_pri = 1;
+				help_data->num_of_pg++;
+			}
+		}
+		DP(NETIF_MSG_LINK,
+		   "add_traf_type %d pg_found %s num_of_pg %d\n",
+		   add_traf_type, (false == pg_found) ? "NO" : "YES",
+		   help_data->num_of_pg);
+	}
+}
+
+static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp,
+					       struct cos_help_data *cos_data,
+					       u32 pri_join_mask)
+{
+	/* Only one priority than only one COS */
+	cos_data->data[0].pausable =
+		IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+	cos_data->data[0].pri_join_mask = pri_join_mask;
+	cos_data->data[0].cos_bw = 100;
+	cos_data->num_of_cos = 1;
+}
+
+static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp,
+					    struct cos_entry_help_data *data,
+					    u8 pg_bw)
+{
+	if (data->cos_bw == DCBX_INVALID_COS_BW)
+		data->cos_bw = pg_bw;
+	else
+		data->cos_bw += pg_bw;
+}
+
+static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
+			struct cos_help_data *cos_data,
+			u32 *pg_pri_orginal_spread,
+			struct dcbx_ets_feature *ets)
+{
+	u32	pri_tested	= 0;
+	u8	i		= 0;
+	u8	entry		= 0;
+	u8	pg_entry	= 0;
+	u8	num_of_pri	= LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+
+	cos_data->data[0].pausable = true;
+	cos_data->data[1].pausable = false;
+	cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
+
+	for (i = 0 ; i < num_of_pri ; i++) {
+		pri_tested = 1 << bp->dcbx_port_params.
+					app.traffic_type_priority[i];
+
+		if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) {
+			cos_data->data[1].pri_join_mask |= pri_tested;
+			entry = 1;
+		} else {
+			cos_data->data[0].pri_join_mask |= pri_tested;
+			entry = 0;
+		}
+		pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params.
+						app.traffic_type_priority[i]];
+		/* There can be only one strict pg */
+		if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES)
+			bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry],
+				DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
+		else
+			/* If we join a group and one is strict
+			 * than the bw rulls */
+			cos_data->data[entry].strict =
+						BNX2X_DCBX_STRICT_COS_HIGHEST;
+	}
+	if ((0 == cos_data->data[0].pri_join_mask) &&
+	    (0 == cos_data->data[1].pri_join_mask))
+		BNX2X_ERR("dcbx error: Both groups must have priorities\n");
+}
+
+
+#ifndef POWER_OF_2
+#define POWER_OF_2(x)	((0 != x) && (0 == (x & (x-1))))
+#endif
+
+static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
+					      struct pg_help_data *pg_help_data,
+					      struct cos_help_data *cos_data,
+					      u32 pri_join_mask,
+					      u8 num_of_dif_pri)
+{
+	u8 i = 0;
+	u32 pri_tested = 0;
+	u32 pri_mask_without_pri = 0;
+	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+	/*debug*/
+	if (num_of_dif_pri == 1) {
+		bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask);
+		return;
+	}
+	/* single priority group */
+	if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+		/* If there are both pauseable and non-pauseable priorities,
+		 * the pauseable priorities go to the first queue and
+		 * the non-pauseable priorities go to the second queue.
+		 */
+		if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+			/* Pauseable */
+			cos_data->data[0].pausable = true;
+			/* Non pauseable.*/
+			cos_data->data[1].pausable = false;
+
+			if (2 == num_of_dif_pri) {
+				cos_data->data[0].cos_bw = 50;
+				cos_data->data[1].cos_bw = 50;
+			}
+
+			if (3 == num_of_dif_pri) {
+				if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp,
+							pri_join_mask))) {
+					cos_data->data[0].cos_bw = 33;
+					cos_data->data[1].cos_bw = 67;
+				} else {
+					cos_data->data[0].cos_bw = 67;
+					cos_data->data[1].cos_bw = 33;
+				}
+			}
+
+		} else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) {
+			/* If there are only pauseable priorities,
+			 * then one/two priorities go to the first queue
+			 * and one priority goes to the second queue.
+			 */
+			if (2 == num_of_dif_pri) {
+				cos_data->data[0].cos_bw = 50;
+				cos_data->data[1].cos_bw = 50;
+			} else {
+				cos_data->data[0].cos_bw = 67;
+				cos_data->data[1].cos_bw = 33;
+			}
+			cos_data->data[1].pausable = true;
+			cos_data->data[0].pausable = true;
+			/* All priorities except FCOE */
+			cos_data->data[0].pri_join_mask = (pri_join_mask &
+				((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE])));
+			/* Only FCOE priority.*/
+			cos_data->data[1].pri_join_mask =
+				(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]);
+		} else
+			/* If there are only non-pauseable priorities,
+			 * they will all go to the same queue.
+			 */
+			bnx2x_dcbx_ets_disabled_entry_data(bp,
+						cos_data, pri_join_mask);
+	} else {
+		/* priority group which is not BW limited (PG#15):*/
+		if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+			/* If there are both pauseable and non-pauseable
+			 * priorities, the pauseable priorities go to the first
+			 * queue and the non-pauseable priorities
+			 * go to the second queue.
+			 */
+			if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) >
+			    DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) {
+				cos_data->data[0].strict =
+					BNX2X_DCBX_STRICT_COS_HIGHEST;
+				cos_data->data[1].strict =
+					BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+						BNX2X_DCBX_STRICT_COS_HIGHEST);
+			} else {
+				cos_data->data[0].strict =
+					BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+						BNX2X_DCBX_STRICT_COS_HIGHEST);
+				cos_data->data[1].strict =
+					BNX2X_DCBX_STRICT_COS_HIGHEST;
+			}
+			/* Pauseable */
+			cos_data->data[0].pausable = true;
+			/* Non pause-able.*/
+			cos_data->data[1].pausable = false;
+		} else {
+			/* If there are only pauseable priorities or
+			 * only non-pauseable,* the lower priorities go
+			 * to the first queue and the higherpriorities go
+			 * to the second queue.
+			 */
+			cos_data->data[0].pausable =
+				cos_data->data[1].pausable =
+				IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+
+			for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) {
+				pri_tested = 1 << bp->dcbx_port_params.
+					app.traffic_type_priority[i];
+				/* Remove priority tested */
+				pri_mask_without_pri =
+					(pri_join_mask & ((u8)(~pri_tested)));
+				if (pri_mask_without_pri < pri_tested)
+					break;
+			}
+
+			if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX)
+				BNX2X_ERR("Invalid value for pri_join_mask -"
+					  " could not find a priority\n");
+
+			cos_data->data[0].pri_join_mask = pri_mask_without_pri;
+			cos_data->data[1].pri_join_mask = pri_tested;
+			/* Both queues are strict priority,
+			 * and that with the highest priority
+			 * gets the highest strict priority in the arbiter.
+			 */
+			cos_data->data[0].strict =
+					BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(
+						BNX2X_DCBX_STRICT_COS_HIGHEST);
+			cos_data->data[1].strict =
+					BNX2X_DCBX_STRICT_COS_HIGHEST;
+		}
+	}
+}
+
+static void bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
+			    struct bnx2x		*bp,
+			    struct  pg_help_data	*pg_help_data,
+			    struct dcbx_ets_feature	*ets,
+			    struct cos_help_data	*cos_data,
+			    u32			*pg_pri_orginal_spread,
+			    u32				pri_join_mask,
+			    u8				num_of_dif_pri)
+{
+	u8 i = 0;
+	u8 pg[DCBX_COS_MAX_NUM_E2] = { 0 };
+
+	/* If there are both pauseable and non-pauseable priorities,
+	 * the pauseable priorities go to the first queue and
+	 * the non-pauseable priorities go to the second queue.
+	 */
+	if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
+		if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
+					 pg_help_data->data[0].pg_priority) ||
+		    IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
+					 pg_help_data->data[1].pg_priority)) {
+			/* If one PG contains both pauseable and
+			 * non-pauseable priorities then ETS is disabled.
+			 */
+			bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data,
+					pg_pri_orginal_spread, ets);
+			bp->dcbx_port_params.ets.enabled = false;
+			return;
+		}
+
+		/* Pauseable */
+		cos_data->data[0].pausable = true;
+		/* Non pauseable. */
+		cos_data->data[1].pausable = false;
+		if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp,
+				pg_help_data->data[0].pg_priority)) {
+			/* 0 is pauseable */
+			cos_data->data[0].pri_join_mask =
+				pg_help_data->data[0].pg_priority;
+			pg[0] = pg_help_data->data[0].pg;
+			cos_data->data[1].pri_join_mask =
+				pg_help_data->data[1].pg_priority;
+			pg[1] = pg_help_data->data[1].pg;
+		} else {/* 1 is pauseable */
+			cos_data->data[0].pri_join_mask =
+				pg_help_data->data[1].pg_priority;
+			pg[0] = pg_help_data->data[1].pg;
+			cos_data->data[1].pri_join_mask =
+				pg_help_data->data[0].pg_priority;
+			pg[1] = pg_help_data->data[0].pg;
+		}
+	} else {
+		/* If there are only pauseable priorities or
+		 * only non-pauseable, each PG goes to a queue.
+		 */
+		cos_data->data[0].pausable = cos_data->data[1].pausable =
+			IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+		cos_data->data[0].pri_join_mask =
+			pg_help_data->data[0].pg_priority;
+		pg[0] = pg_help_data->data[0].pg;
+		cos_data->data[1].pri_join_mask =
+			pg_help_data->data[1].pg_priority;
+		pg[1] = pg_help_data->data[1].pg;
+	}
+
+	/* There can be only one strict pg */
+	for (i = 0 ; i < ARRAY_SIZE(pg); i++) {
+		if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES)
+			cos_data->data[i].cos_bw =
+				DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]);
+		else
+			cos_data->data[i].strict =
+						BNX2X_DCBX_STRICT_COS_HIGHEST;
+	}
+}
+
+static int bnx2x_dcbx_join_pgs(
+			      struct bnx2x            *bp,
+			      struct dcbx_ets_feature *ets,
+			      struct pg_help_data     *pg_help_data,
+			      u8                      required_num_of_pg)
+{
+	u8 entry_joined    = pg_help_data->num_of_pg - 1;
+	u8 entry_removed   = entry_joined + 1;
+	u8 pg_joined       = 0;
+
+	if (required_num_of_pg == 0 || ARRAY_SIZE(pg_help_data->data)
+						<= pg_help_data->num_of_pg) {
+
+		BNX2X_ERR("required_num_of_pg can't be zero\n");
+		return -EINVAL;
+	}
+
+	while (required_num_of_pg < pg_help_data->num_of_pg) {
+		entry_joined = pg_help_data->num_of_pg - 2;
+		entry_removed = entry_joined + 1;
+		/* protect index */
+		entry_removed %= ARRAY_SIZE(pg_help_data->data);
+
+		pg_help_data->data[entry_joined].pg_priority |=
+			pg_help_data->data[entry_removed].pg_priority;
+
+		pg_help_data->data[entry_joined].num_of_dif_pri +=
+			pg_help_data->data[entry_removed].num_of_dif_pri;
+
+		if (pg_help_data->data[entry_joined].pg == DCBX_STRICT_PRI_PG ||
+		    pg_help_data->data[entry_removed].pg == DCBX_STRICT_PRI_PG)
+			/* Entries joined strict priority rules */
+			pg_help_data->data[entry_joined].pg =
+							DCBX_STRICT_PRI_PG;
+		else {
+			/* Entries can be joined join BW */
+			pg_joined = DCBX_PG_BW_GET(ets->pg_bw_tbl,
+					pg_help_data->data[entry_joined].pg) +
+				    DCBX_PG_BW_GET(ets->pg_bw_tbl,
+					pg_help_data->data[entry_removed].pg);
+
+			DCBX_PG_BW_SET(ets->pg_bw_tbl,
+				pg_help_data->data[entry_joined].pg, pg_joined);
+		}
+		/* Joined the entries */
+		pg_help_data->num_of_pg--;
+	}
+
+	return 0;
+}
+
+static void bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
+			      struct bnx2x		*bp,
+			      struct pg_help_data	*pg_help_data,
+			      struct dcbx_ets_feature	*ets,
+			      struct cos_help_data	*cos_data,
+			      u32			*pg_pri_orginal_spread,
+			      u32			pri_join_mask,
+			      u8			num_of_dif_pri)
+{
+	u8 i = 0;
+	u32 pri_tested = 0;
+	u8 entry = 0;
+	u8 pg_entry = 0;
+	bool b_found_strict = false;
+	u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
+
+	cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
+	/* If there are both pauseable and non-pauseable priorities,
+	 * the pauseable priorities go to the first queue and the
+	 * non-pauseable priorities go to the second queue.
+	 */
+	if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask))
+		bnx2x_dcbx_separate_pauseable_from_non(bp,
+				cos_data, pg_pri_orginal_spread, ets);
+	else {
+		/* If two BW-limited PG-s were combined to one queue,
+		 * the BW is their sum.
+		 *
+		 * If there are only pauseable priorities or only non-pauseable,
+		 * and there are both BW-limited and non-BW-limited PG-s,
+		 * the BW-limited PG/s go to one queue and the non-BW-limited
+		 * PG/s go to the second queue.
+		 *
+		 * If there are only pauseable priorities or only non-pauseable
+		 * and all are BW limited, then	two priorities go to the first
+		 * queue and one priority goes to the second queue.
+		 *
+		 * We will join this two cases:
+		 * if one is BW limited it will go to the secoend queue
+		 * otherwise the last priority will get it
+		 */
+
+		cos_data->data[0].pausable = cos_data->data[1].pausable =
+			IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
+
+		for (i = 0 ; i < num_of_pri; i++) {
+			pri_tested = 1 << bp->dcbx_port_params.
+				app.traffic_type_priority[i];
+			pg_entry = (u8)pg_pri_orginal_spread[bp->
+				dcbx_port_params.app.traffic_type_priority[i]];
+
+			if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+				entry = 0;
+
+				if (i == (num_of_pri-1) &&
+				    false == b_found_strict)
+					/* last entry will be handled separately
+					 * If no priority is strict than last
+					 * enty goes to last queue.*/
+					entry = 1;
+				cos_data->data[entry].pri_join_mask |=
+								pri_tested;
+				bnx2x_dcbx_add_to_cos_bw(bp,
+					&cos_data->data[entry],
+					DCBX_PG_BW_GET(ets->pg_bw_tbl,
+						       pg_entry));
+			} else {
+				b_found_strict = true;
+				cos_data->data[1].pri_join_mask |= pri_tested;
+				/* If we join a group and one is strict
+				 * than the bw rulls */
+				cos_data->data[1].strict =
+					BNX2X_DCBX_STRICT_COS_HIGHEST;
+			}
+		}
+	}
+}
+
+
+static void bnx2x_dcbx_2cos_limit_cee_fill_cos_params(struct bnx2x *bp,
+				       struct pg_help_data *help_data,
+				       struct dcbx_ets_feature *ets,
+				       struct cos_help_data *cos_data,
+				       u32 *pg_pri_orginal_spread,
+				       u32 pri_join_mask,
+				       u8 num_of_dif_pri)
+{
+
+	/* default E2 settings */
+	cos_data->num_of_cos = DCBX_COS_MAX_NUM_E2;
+
+	switch (help_data->num_of_pg) {
+	case 1:
+		bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(
+					       bp,
+					       help_data,
+					       cos_data,
+					       pri_join_mask,
+					       num_of_dif_pri);
+		break;
+	case 2:
+		bnx2x_dcbx_2cos_limit_cee_two_pg_to_cos_params(
+					    bp,
+					    help_data,
+					    ets,
+					    cos_data,
+					    pg_pri_orginal_spread,
+					    pri_join_mask,
+					    num_of_dif_pri);
+		break;
+
+	case 3:
+		bnx2x_dcbx_2cos_limit_cee_three_pg_to_cos_params(
+					      bp,
+					      help_data,
+					      ets,
+					      cos_data,
+					      pg_pri_orginal_spread,
+					      pri_join_mask,
+					      num_of_dif_pri);
+		break;
+	default:
+		BNX2X_ERR("Wrong pg_help_data.num_of_pg\n");
+		bnx2x_dcbx_ets_disabled_entry_data(bp,
+						   cos_data, pri_join_mask);
+	}
+}
+
+static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp,
+					struct cos_help_data *cos_data,
+					u8 entry,
+					u8 num_spread_of_entries,
+					u8 strict_app_pris)
+{
+	u8 strict_pri = BNX2X_DCBX_STRICT_COS_HIGHEST;
+	u8 num_of_app_pri = MAX_PFC_PRIORITIES;
+	u8 app_pri_bit = 0;
+
+	while (num_spread_of_entries && num_of_app_pri > 0) {
+		app_pri_bit = 1 << (num_of_app_pri - 1);
+		if (app_pri_bit & strict_app_pris) {
+			struct cos_entry_help_data *data = &cos_data->
+								data[entry];
+			num_spread_of_entries--;
+			if (num_spread_of_entries == 0) {
+				/* last entry needed put all the entries left */
+				data->cos_bw = DCBX_INVALID_COS_BW;
+				data->strict = strict_pri;
+				data->pri_join_mask = strict_app_pris;
+				data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+							data->pri_join_mask);
+			} else {
+				strict_app_pris &= ~app_pri_bit;
+
+				data->cos_bw = DCBX_INVALID_COS_BW;
+				data->strict = strict_pri;
+				data->pri_join_mask = app_pri_bit;
+				data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+							data->pri_join_mask);
+			}
+
+			strict_pri =
+			    BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(strict_pri);
+			entry++;
+		}
+
+		num_of_app_pri--;
+	}
+
+	if (num_spread_of_entries)
+		return -EINVAL;
+
+	return 0;
+}
+
+static u8 bnx2x_dcbx_cee_fill_strict_pri(struct bnx2x *bp,
+					 struct cos_help_data *cos_data,
+					 u8 entry,
+					 u8 num_spread_of_entries,
+					 u8 strict_app_pris)
+{
+
+	if (bnx2x_dcbx_spread_strict_pri(bp, cos_data, entry,
+					 num_spread_of_entries,
+					 strict_app_pris)) {
+		struct cos_entry_help_data *data = &cos_data->
+						    data[entry];
+		/* Fill BW entry */
+		data->cos_bw = DCBX_INVALID_COS_BW;
+		data->strict = BNX2X_DCBX_STRICT_COS_HIGHEST;
+		data->pri_join_mask = strict_app_pris;
+		data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+				 data->pri_join_mask);
+		return 1;
+	}
+
+	return num_spread_of_entries;
+}
+
+static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp,
+					   struct pg_help_data *help_data,
+					   struct dcbx_ets_feature *ets,
+					   struct cos_help_data *cos_data,
+					   u32 pri_join_mask)
+
+{
+	u8 need_num_of_entries = 0;
+	u8 i = 0;
+	u8 entry = 0;
+
+	/*
+	 * if the number of requested PG-s in CEE is greater than 3
+	 * then the results are not determined since this is a violation
+	 * of the standard.
+	 */
+	if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) {
+		if (bnx2x_dcbx_join_pgs(bp, ets, help_data,
+					DCBX_COS_MAX_NUM_E3B0)) {
+			BNX2X_ERR("Unable to reduce the number of PGs -"
+				  "we will disables ETS\n");
+			bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data,
+							   pri_join_mask);
+			return;
+		}
+	}
+
+	for (i = 0 ; i < help_data->num_of_pg; i++) {
+		struct pg_entry_help_data *pg =  &help_data->data[i];
+		if (pg->pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
+			struct cos_entry_help_data *data = &cos_data->
+							    data[entry];
+			/* Fill BW entry */
+			data->cos_bw = DCBX_PG_BW_GET(ets->pg_bw_tbl, pg->pg);
+			data->strict = BNX2X_DCBX_STRICT_INVALID;
+			data->pri_join_mask = pg->pg_priority;
+			data->pausable = DCBX_IS_PFC_PRI_SOME_PAUSE(bp,
+						data->pri_join_mask);
+
+			entry++;
+		} else {
+			need_num_of_entries =  min_t(u8,
+				(u8)pg->num_of_dif_pri,
+				(u8)DCBX_COS_MAX_NUM_E3B0 -
+						 help_data->num_of_pg + 1);
+			/*
+			 * If there are still VOQ-s which have no associated PG,
+			 * then associate these VOQ-s to PG15. These PG-s will
+			 * be used for SP between priorities on PG15.
+			 */
+			entry += bnx2x_dcbx_cee_fill_strict_pri(bp, cos_data,
+				entry, need_num_of_entries, pg->pg_priority);
+		}
+	}
+
+	/* the entry will represent the number of COSes used */
+	cos_data->num_of_cos = entry;
+}
+static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
+				       struct pg_help_data *help_data,
+				       struct dcbx_ets_feature *ets,
+				       u32 *pg_pri_orginal_spread)
+{
+	struct cos_help_data         cos_data;
+	u8                    i                           = 0;
+	u32                   pri_join_mask               = 0;
+	u8                    num_of_dif_pri              = 0;
+
+	memset(&cos_data, 0, sizeof(cos_data));
+
+	/* Validate the pg value */
+	for (i = 0; i < help_data->num_of_pg ; i++) {
+		if (DCBX_STRICT_PRIORITY != help_data->data[i].pg &&
+		    DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg)
+			BNX2X_ERR("Invalid pg[%d] data %x\n", i,
+				  help_data->data[i].pg);
+		pri_join_mask   |=  help_data->data[i].pg_priority;
+		num_of_dif_pri  += help_data->data[i].num_of_dif_pri;
+	}
+
+	/* defaults */
+	cos_data.num_of_cos = 1;
+	for (i = 0; i < ARRAY_SIZE(cos_data.data); i++) {
+		cos_data.data[i].pri_join_mask = 0;
+		cos_data.data[i].pausable = false;
+		cos_data.data[i].strict = BNX2X_DCBX_STRICT_INVALID;
+		cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW;
+	}
+
+	if (CHIP_IS_E3B0(bp))
+		bnx2x_dcbx_cee_fill_cos_params(bp, help_data, ets,
+					       &cos_data, pri_join_mask);
+	else /* E2 + E3A0 */
+		bnx2x_dcbx_2cos_limit_cee_fill_cos_params(bp,
+							  help_data, ets,
+							  &cos_data,
+							  pg_pri_orginal_spread,
+							  pri_join_mask,
+							  num_of_dif_pri);
+
+	for (i = 0; i < cos_data.num_of_cos ; i++) {
+		struct bnx2x_dcbx_cos_params *p =
+			&bp->dcbx_port_params.ets.cos_params[i];
+
+		p->strict = cos_data.data[i].strict;
+		p->bw_tbl = cos_data.data[i].cos_bw;
+		p->pri_bitmask = cos_data.data[i].pri_join_mask;
+		p->pauseable = cos_data.data[i].pausable;
+
+		/* sanity */
+		if (p->bw_tbl != DCBX_INVALID_COS_BW ||
+		    p->strict != BNX2X_DCBX_STRICT_INVALID) {
+			if (p->pri_bitmask == 0)
+				BNX2X_ERR("Invalid pri_bitmask for %d\n", i);
+
+			if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp)) {
+
+				if (p->pauseable &&
+				    DCBX_PFC_PRI_GET_NON_PAUSE(bp,
+						p->pri_bitmask) != 0)
+					BNX2X_ERR("Inconsistent config for "
+						  "pausable COS %d\n", i);
+
+				if (!p->pauseable &&
+				    DCBX_PFC_PRI_GET_PAUSE(bp,
+						p->pri_bitmask) != 0)
+					BNX2X_ERR("Inconsistent config for "
+						  "nonpausable COS %d\n", i);
+			}
+		}
+
+		if (p->pauseable)
+			DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n",
+				  i, cos_data.data[i].pri_join_mask);
+		else
+			DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask "
+					  "0x%x\n",
+				  i, cos_data.data[i].pri_join_mask);
+	}
+
+	bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ;
+}
+
+static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
+				u32 *set_configuration_ets_pg,
+				u32 *pri_pg_tbl)
+{
+	int i;
+
+	for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
+		set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i);
+
+		DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n",
+		   i, set_configuration_ets_pg[i]);
+	}
+}
+
+static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
+				 struct bnx2x_func_tx_start_params *pfc_fw_cfg)
+{
+	u16 pri_bit = 0;
+	u8 cos = 0, pri = 0;
+	struct priority_cos *tt2cos;
+	u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
+
+	memset(pfc_fw_cfg, 0, sizeof(*pfc_fw_cfg));
+
+	/* to disable DCB - the structure must be zeroed */
+	if (bp->dcbx_error & DCBX_REMOTE_MIB_ERROR)
+		return;
+
+	/*shortcut*/
+	tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos;
+
+	/* Fw version should be incremented each update */
+	pfc_fw_cfg->dcb_version = ++bp->dcb_version;
+	pfc_fw_cfg->dcb_enabled = 1;
+
+	/* Fill priority parameters */
+	for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
+		tt2cos[pri].priority = ttp[pri];
+		pri_bit = 1 << tt2cos[pri].priority;
+
+		/* Fill COS parameters based on COS calculated to
+		 * make it more general for future use */
+		for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++)
+			if (bp->dcbx_port_params.ets.cos_params[cos].
+						pri_bitmask & pri_bit)
+					tt2cos[pri].cos = cos;
+	}
+
+	/* we never want the FW to add a 0 vlan tag */
+	pfc_fw_cfg->dont_add_pri_0_en = 1;
+
+	bnx2x_dcbx_print_cos_params(bp,	pfc_fw_cfg);
+}
+
+void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
+{
+	/* if we need to syncronize DCBX result from prev PMF
+	 * read it from shmem and update bp accordingly
+	 */
+	if (SHMEM2_HAS(bp, drv_flags) &&
+	   GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
+		/* Read neg results if dcbx is in the FW */
+		if (bnx2x_dcbx_read_shmem_neg_results(bp))
+			return;
+
+		bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+					  bp->dcbx_error);
+		bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
+					 bp->dcbx_error);
+	}
+}
+
+/* DCB netlink */
+#ifdef BCM_DCBNL
+
+#define BNX2X_DCBX_CAPS		(DCB_CAP_DCBX_LLD_MANAGED | \
+				DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
+
+static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp)
+{
+	/* validate dcbnl call that may change HW state:
+	 * DCB is on and DCBX mode was SUCCESSFULLY set by the user.
+	 */
+	return bp->dcb_state && bp->dcbx_mode_uset;
+}
+
+static u8 bnx2x_dcbnl_get_state(struct net_device *netdev)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "state = %d\n", bp->dcb_state);
+	return bp->dcb_state;
+}
+
+static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
+
+	bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
+	return 0;
+}
+
+static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
+					 u8 *perm_addr)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "GET-PERM-ADDR\n");
+
+	/* first the HW mac address */
+	memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
+
+#ifdef BCM_CNIC
+	/* second SAN address */
+	memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len);
+#endif
+}
+
+static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
+					u8 prio_type, u8 pgid, u8 bw_pct,
+					u8 up_map)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+
+	DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, pgid);
+	if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
+		return;
+
+	/**
+	 * bw_pct ingnored -	band-width percentage devision between user
+	 *			priorities within the same group is not
+	 *			standard and hence not supported
+	 *
+	 * prio_type igonred -	priority levels within the same group are not
+	 *			standard and hence are not supported. According
+	 *			to the standard pgid 15 is dedicated to strict
+	 *			prioirty traffic (on the port level).
+	 *
+	 * up_map ignored
+	 */
+
+	bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid;
+	bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev,
+					 int pgid, u8 bw_pct)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "pgid[%d] = %d\n", pgid, bw_pct);
+
+	if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
+		return;
+
+	bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct;
+	bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio,
+					u8 prio_type, u8 pgid, u8 bw_pct,
+					u8 up_map)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
+}
+
+static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev,
+					 int pgid, u8 bw_pct)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
+}
+
+static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
+					u8 *prio_type, u8 *pgid, u8 *bw_pct,
+					u8 *up_map)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "prio = %d\n", prio);
+
+	/**
+	 * bw_pct ingnored -	band-width percentage devision between user
+	 *			priorities within the same group is not
+	 *			standard and hence not supported
+	 *
+	 * prio_type igonred -	priority levels within the same group are not
+	 *			standard and hence are not supported. According
+	 *			to the standard pgid 15 is dedicated to strict
+	 *			prioirty traffic (on the port level).
+	 *
+	 * up_map ignored
+	 */
+	*up_map = *bw_pct = *prio_type = *pgid = 0;
+
+	if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
+		return;
+
+	*pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio);
+}
+
+static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev,
+					 int pgid, u8 *bw_pct)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "pgid = %d\n", pgid);
+
+	*bw_pct = 0;
+
+	if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
+		return;
+
+	*bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid);
+}
+
+static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio,
+					u8 *prio_type, u8 *pgid, u8 *bw_pct,
+					u8 *up_map)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
+
+	*prio_type = *pgid = *bw_pct = *up_map = 0;
+}
+
+static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev,
+					 int pgid, u8 *bw_pct)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
+
+	*bw_pct = 0;
+}
+
+static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
+				    u8 setting)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, setting);
+
+	if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
+		return;
+
+	bp->dcbx_config_params.admin_pfc_bitmap |= ((setting ? 1 : 0) << prio);
+
+	if (setting)
+		bp->dcbx_config_params.admin_pfc_tx_enable = 1;
+}
+
+static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
+				    u8 *setting)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "prio = %d\n", prio);
+
+	*setting = 0;
+
+	if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES)
+		return;
+
+	*setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1;
+}
+
+static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	int rc = 0;
+
+	DP(NETIF_MSG_LINK, "SET-ALL\n");
+
+	if (!bnx2x_dcbnl_set_valid(bp))
+		return 1;
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		netdev_err(bp->dev, "Handling parity error recovery. "
+				"Try again later\n");
+		return 1;
+	}
+	if (netif_running(bp->dev)) {
+		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+		rc = bnx2x_nic_load(bp, LOAD_NORMAL);
+	}
+	DP(NETIF_MSG_LINK, "set_dcbx_params done (%d)\n", rc);
+	if (rc)
+		return 1;
+
+	return 0;
+}
+
+static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u8 rval = 0;
+
+	if (bp->dcb_state) {
+		switch (capid) {
+		case DCB_CAP_ATTR_PG:
+			*cap = true;
+			break;
+		case DCB_CAP_ATTR_PFC:
+			*cap = true;
+			break;
+		case DCB_CAP_ATTR_UP2TC:
+			*cap = false;
+			break;
+		case DCB_CAP_ATTR_PG_TCS:
+			*cap = 0x80;	/* 8 priorities for PGs */
+			break;
+		case DCB_CAP_ATTR_PFC_TCS:
+			*cap = 0x80;	/* 8 priorities for PFC */
+			break;
+		case DCB_CAP_ATTR_GSP:
+			*cap = true;
+			break;
+		case DCB_CAP_ATTR_BCN:
+			*cap = false;
+			break;
+		case DCB_CAP_ATTR_DCBX:
+			*cap = BNX2X_DCBX_CAPS;
+		default:
+			rval = -EINVAL;
+			break;
+		}
+	} else
+		rval = -EINVAL;
+
+	DP(NETIF_MSG_LINK, "capid %d:%x\n", capid, *cap);
+	return rval;
+}
+
+static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u8 rval = 0;
+
+	DP(NETIF_MSG_LINK, "tcid %d\n", tcid);
+
+	if (bp->dcb_state) {
+		switch (tcid) {
+		case DCB_NUMTCS_ATTR_PG:
+			*num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
+						  DCBX_COS_MAX_NUM_E2;
+			break;
+		case DCB_NUMTCS_ATTR_PFC:
+			*num = CHIP_IS_E3B0(bp) ? DCBX_COS_MAX_NUM_E3B0 :
+						  DCBX_COS_MAX_NUM_E2;
+			break;
+		default:
+			rval = -EINVAL;
+			break;
+		}
+	} else
+		rval = -EINVAL;
+
+	return rval;
+}
+
+static u8 bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "num tcs = %d; Not supported\n", num);
+	return -EINVAL;
+}
+
+static u8  bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
+
+	if (!bp->dcb_state)
+		return 0;
+
+	return bp->dcbx_local_feat.pfc.enabled;
+}
+
+static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
+
+	if (!bnx2x_dcbnl_set_valid(bp))
+		return;
+
+	bp->dcbx_config_params.admin_pfc_tx_enable =
+	bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
+}
+
+static void bnx2x_admin_app_set_ent(
+	struct bnx2x_admin_priority_app_table *app_ent,
+	u8 idtype, u16 idval, u8 up)
+{
+	app_ent->valid = 1;
+
+	switch (idtype) {
+	case DCB_APP_IDTYPE_ETHTYPE:
+		app_ent->traffic_type = TRAFFIC_TYPE_ETH;
+		break;
+	case DCB_APP_IDTYPE_PORTNUM:
+		app_ent->traffic_type = TRAFFIC_TYPE_PORT;
+		break;
+	default:
+		break; /* never gets here */
+	}
+	app_ent->app_id = idval;
+	app_ent->priority = up;
+}
+
+static bool bnx2x_admin_app_is_equal(
+	struct bnx2x_admin_priority_app_table *app_ent,
+	u8 idtype, u16 idval)
+{
+	if (!app_ent->valid)
+		return false;
+
+	switch (idtype) {
+	case DCB_APP_IDTYPE_ETHTYPE:
+		if (app_ent->traffic_type != TRAFFIC_TYPE_ETH)
+			return false;
+		break;
+	case DCB_APP_IDTYPE_PORTNUM:
+		if (app_ent->traffic_type != TRAFFIC_TYPE_PORT)
+			return false;
+		break;
+	default:
+		return false;
+	}
+	if (app_ent->app_id != idval)
+		return false;
+
+	return true;
+}
+
+static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
+{
+	int i, ff;
+
+	/* iterate over the app entries looking for idtype and idval */
+	for (i = 0, ff = -1; i < 4; i++) {
+		struct bnx2x_admin_priority_app_table *app_ent =
+			&bp->dcbx_config_params.admin_priority_app_table[i];
+		if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
+			break;
+
+		if (ff < 0 && !app_ent->valid)
+			ff = i;
+	}
+	if (i < 4)
+		/* if found overwrite up */
+		bp->dcbx_config_params.
+			admin_priority_app_table[i].priority = up;
+	else if (ff >= 0)
+		/* not found use first-free */
+		bnx2x_admin_app_set_ent(
+			&bp->dcbx_config_params.admin_priority_app_table[ff],
+			idtype, idval, up);
+	else
+		/* app table is full */
+		return -EBUSY;
+
+	/* up configured, if not 0 make sure feature is enabled */
+	if (up)
+		bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
+
+	return 0;
+}
+
+static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
+				 u16 idval, u8 up)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+
+	DP(NETIF_MSG_LINK, "app_type %d, app_id %x, prio bitmap %d\n",
+	   idtype, idval, up);
+
+	if (!bnx2x_dcbnl_set_valid(bp))
+		return -EINVAL;
+
+	/* verify idtype */
+	switch (idtype) {
+	case DCB_APP_IDTYPE_ETHTYPE:
+	case DCB_APP_IDTYPE_PORTNUM:
+		break;
+	default:
+		return -EINVAL;
+	}
+	return bnx2x_set_admin_app_up(bp, idtype, idval, up);
+}
+
+static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u8 state;
+
+	state = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE;
+
+	if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF)
+		state |= DCB_CAP_DCBX_STATIC;
+
+	return state;
+}
+
+static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	DP(NETIF_MSG_LINK, "state = %02x\n", state);
+
+	/* set dcbx mode */
+
+	if ((state & BNX2X_DCBX_CAPS) != state) {
+		BNX2X_ERR("Requested DCBX mode %x is beyond advertised "
+			  "capabilities\n", state);
+		return 1;
+	}
+
+	if (bp->dcb_state != BNX2X_DCB_STATE_ON) {
+		BNX2X_ERR("DCB turned off, DCBX configuration is invalid\n");
+		return 1;
+	}
+
+	if (state & DCB_CAP_DCBX_STATIC)
+		bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF;
+	else
+		bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON;
+
+	bp->dcbx_mode_uset = true;
+	return 0;
+}
+
+static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
+				  u8 *flags)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u8 rval = 0;
+
+	DP(NETIF_MSG_LINK, "featid %d\n", featid);
+
+	if (bp->dcb_state) {
+		*flags = 0;
+		switch (featid) {
+		case DCB_FEATCFG_ATTR_PG:
+			if (bp->dcbx_local_feat.ets.enabled)
+				*flags |= DCB_FEATCFG_ENABLE;
+			if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
+				*flags |= DCB_FEATCFG_ERROR;
+			break;
+		case DCB_FEATCFG_ATTR_PFC:
+			if (bp->dcbx_local_feat.pfc.enabled)
+				*flags |= DCB_FEATCFG_ENABLE;
+			if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
+			    DCBX_LOCAL_PFC_MISMATCH))
+				*flags |= DCB_FEATCFG_ERROR;
+			break;
+		case DCB_FEATCFG_ATTR_APP:
+			if (bp->dcbx_local_feat.app.enabled)
+				*flags |= DCB_FEATCFG_ENABLE;
+			if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
+			    DCBX_LOCAL_APP_MISMATCH))
+				*flags |= DCB_FEATCFG_ERROR;
+			break;
+		default:
+			rval = -EINVAL;
+			break;
+		}
+	} else
+		rval = -EINVAL;
+
+	return rval;
+}
+
+static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
+				  u8 flags)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u8 rval = 0;
+
+	DP(NETIF_MSG_LINK, "featid = %d flags = %02x\n", featid, flags);
+
+	/* ignore the 'advertise' flag */
+	if (bnx2x_dcbnl_set_valid(bp)) {
+		switch (featid) {
+		case DCB_FEATCFG_ATTR_PG:
+			bp->dcbx_config_params.admin_ets_enable =
+				flags & DCB_FEATCFG_ENABLE ? 1 : 0;
+			bp->dcbx_config_params.admin_ets_willing =
+				flags & DCB_FEATCFG_WILLING ? 1 : 0;
+			break;
+		case DCB_FEATCFG_ATTR_PFC:
+			bp->dcbx_config_params.admin_pfc_enable =
+				flags & DCB_FEATCFG_ENABLE ? 1 : 0;
+			bp->dcbx_config_params.admin_pfc_willing =
+				flags & DCB_FEATCFG_WILLING ? 1 : 0;
+			break;
+		case DCB_FEATCFG_ATTR_APP:
+			/* ignore enable, always enabled */
+			bp->dcbx_config_params.admin_app_priority_willing =
+				flags & DCB_FEATCFG_WILLING ? 1 : 0;
+			break;
+		default:
+			rval = -EINVAL;
+			break;
+		}
+	} else
+		rval = -EINVAL;
+
+	return rval;
+}
+
+static int bnx2x_peer_appinfo(struct net_device *netdev,
+			      struct dcb_peer_app_info *info, u16* app_count)
+{
+	int i;
+	struct bnx2x *bp = netdev_priv(netdev);
+
+	DP(NETIF_MSG_LINK, "APP-INFO\n");
+
+	info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0;
+	info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0;
+	*app_count = 0;
+
+	for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
+		if (bp->dcbx_remote_feat.app.app_pri_tbl[i].appBitfield &
+		    DCBX_APP_ENTRY_VALID)
+			(*app_count)++;
+	return 0;
+}
+
+static int bnx2x_peer_apptable(struct net_device *netdev,
+			       struct dcb_app *table)
+{
+	int i, j;
+	struct bnx2x *bp = netdev_priv(netdev);
+
+	DP(NETIF_MSG_LINK, "APP-TABLE\n");
+
+	for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+		struct dcbx_app_priority_entry *ent =
+			&bp->dcbx_remote_feat.app.app_pri_tbl[i];
+
+		if (ent->appBitfield & DCBX_APP_ENTRY_VALID) {
+			table[j].selector = bnx2x_dcbx_dcbnl_app_idtype(ent);
+			table[j].priority = bnx2x_dcbx_dcbnl_app_up(ent);
+			table[j++].protocol = ent->app_id;
+		}
+	}
+	return 0;
+}
+
+static int bnx2x_cee_peer_getpg(struct net_device *netdev, struct cee_pg *pg)
+{
+	int i;
+	struct bnx2x *bp = netdev_priv(netdev);
+
+	pg->willing = (bp->dcbx_remote_flags & DCBX_ETS_REM_WILLING) ?: 0;
+
+	for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
+		pg->pg_bw[i] =
+			DCBX_PG_BW_GET(bp->dcbx_remote_feat.ets.pg_bw_tbl, i);
+		pg->prio_pg[i] =
+			DCBX_PRI_PG_GET(bp->dcbx_remote_feat.ets.pri_pg_tbl, i);
+	}
+	return 0;
+}
+
+static int bnx2x_cee_peer_getpfc(struct net_device *netdev,
+				 struct cee_pfc *pfc)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	pfc->tcs_supported = bp->dcbx_remote_feat.pfc.pfc_caps;
+	pfc->pfc_en = bp->dcbx_remote_feat.pfc.pri_en_bitmap;
+	return 0;
+}
+
+const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
+	.getstate		= bnx2x_dcbnl_get_state,
+	.setstate		= bnx2x_dcbnl_set_state,
+	.getpermhwaddr		= bnx2x_dcbnl_get_perm_hw_addr,
+	.setpgtccfgtx		= bnx2x_dcbnl_set_pg_tccfg_tx,
+	.setpgbwgcfgtx		= bnx2x_dcbnl_set_pg_bwgcfg_tx,
+	.setpgtccfgrx		= bnx2x_dcbnl_set_pg_tccfg_rx,
+	.setpgbwgcfgrx		= bnx2x_dcbnl_set_pg_bwgcfg_rx,
+	.getpgtccfgtx		= bnx2x_dcbnl_get_pg_tccfg_tx,
+	.getpgbwgcfgtx		= bnx2x_dcbnl_get_pg_bwgcfg_tx,
+	.getpgtccfgrx		= bnx2x_dcbnl_get_pg_tccfg_rx,
+	.getpgbwgcfgrx		= bnx2x_dcbnl_get_pg_bwgcfg_rx,
+	.setpfccfg		= bnx2x_dcbnl_set_pfc_cfg,
+	.getpfccfg		= bnx2x_dcbnl_get_pfc_cfg,
+	.setall			= bnx2x_dcbnl_set_all,
+	.getcap			= bnx2x_dcbnl_get_cap,
+	.getnumtcs		= bnx2x_dcbnl_get_numtcs,
+	.setnumtcs		= bnx2x_dcbnl_set_numtcs,
+	.getpfcstate		= bnx2x_dcbnl_get_pfc_state,
+	.setpfcstate		= bnx2x_dcbnl_set_pfc_state,
+	.setapp			= bnx2x_dcbnl_set_app_up,
+	.getdcbx		= bnx2x_dcbnl_get_dcbx,
+	.setdcbx		= bnx2x_dcbnl_set_dcbx,
+	.getfeatcfg		= bnx2x_dcbnl_get_featcfg,
+	.setfeatcfg		= bnx2x_dcbnl_set_featcfg,
+	.peer_getappinfo	= bnx2x_peer_appinfo,
+	.peer_getapptable	= bnx2x_peer_apptable,
+	.cee_peer_getpg		= bnx2x_cee_peer_getpg,
+	.cee_peer_getpfc	= bnx2x_cee_peer_getpfc,
+};
+
+#endif /* BCM_DCBNL */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
new file mode 100644
index 0000000..2c6a3bc
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -0,0 +1,203 @@
+/* bnx2x_dcb.h: Broadcom Everest network driver.
+ *
+ * Copyright 2009-2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Dmitry Kravkov
+ *
+ */
+#ifndef BNX2X_DCB_H
+#define BNX2X_DCB_H
+
+#include "bnx2x_hsi.h"
+
+#define LLFC_DRIVER_TRAFFIC_TYPE_MAX 3 /* NW, iSCSI, FCoE */
+struct bnx2x_dcbx_app_params {
+	u32 enabled;
+	u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
+};
+
+#define DCBX_COS_MAX_NUM_E2	DCBX_E2E3_MAX_NUM_COS
+/* bnx2x currently limits numbers of supported COSes to 3 to be extended to 6 */
+#define BNX2X_MAX_COS_SUPPORT	3
+#define DCBX_COS_MAX_NUM_E3B0	BNX2X_MAX_COS_SUPPORT
+#define DCBX_COS_MAX_NUM	BNX2X_MAX_COS_SUPPORT
+
+struct bnx2x_dcbx_cos_params {
+	u32	bw_tbl;
+	u32	pri_bitmask;
+	/*
+	 * strict priority: valid values are 0..5; 0 is highest priority.
+	 * There can't be two COSes with the same priority.
+	 */
+	u8	strict;
+#define BNX2X_DCBX_STRICT_INVALID			DCBX_COS_MAX_NUM
+#define BNX2X_DCBX_STRICT_COS_HIGHEST			0
+#define BNX2X_DCBX_STRICT_COS_NEXT_LOWER_PRI(sp)	((sp) + 1)
+	u8	pauseable;
+};
+
+struct bnx2x_dcbx_pg_params {
+	u32 enabled;
+	u8 num_of_cos; /* valid COS entries */
+	struct bnx2x_dcbx_cos_params	cos_params[DCBX_COS_MAX_NUM];
+};
+
+struct bnx2x_dcbx_pfc_params {
+	u32 enabled;
+	u32 priority_non_pauseable_mask;
+};
+
+struct bnx2x_dcbx_port_params {
+	struct bnx2x_dcbx_pfc_params pfc;
+	struct bnx2x_dcbx_pg_params  ets;
+	struct bnx2x_dcbx_app_params app;
+};
+
+#define BNX2X_DCBX_CONFIG_INV_VALUE			(0xFFFFFFFF)
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE		0
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE		1
+#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID	(BNX2X_DCBX_CONFIG_INV_VALUE)
+#define BNX2X_IS_ETS_ENABLED(bp) ((bp)->dcb_state == BNX2X_DCB_STATE_ON &&\
+				  (bp)->dcbx_port_params.ets.enabled)
+
+struct bnx2x_config_lldp_params {
+	u32 overwrite_settings;
+	u32 msg_tx_hold;
+	u32 msg_fast_tx;
+	u32 tx_credit_max;
+	u32 msg_tx_interval;
+	u32 tx_fast;
+};
+
+struct bnx2x_admin_priority_app_table {
+		u32 valid;
+		u32 priority;
+#define INVALID_TRAFFIC_TYPE_PRIORITY	(0xFFFFFFFF)
+		u32 traffic_type;
+#define TRAFFIC_TYPE_ETH		0
+#define TRAFFIC_TYPE_PORT		1
+		u32 app_id;
+};
+
+struct bnx2x_config_dcbx_params {
+	u32 overwrite_settings;
+	u32 admin_dcbx_version;
+	u32 admin_ets_enable;
+	u32 admin_pfc_enable;
+	u32 admin_tc_supported_tx_enable;
+	u32 admin_ets_configuration_tx_enable;
+	u32 admin_ets_recommendation_tx_enable;
+	u32 admin_pfc_tx_enable;
+	u32 admin_application_priority_tx_enable;
+	u32 admin_ets_willing;
+	u32 admin_ets_reco_valid;
+	u32 admin_pfc_willing;
+	u32 admin_app_priority_willing;
+	u32 admin_configuration_bw_precentage[8];
+	u32 admin_configuration_ets_pg[8];
+	u32 admin_recommendation_bw_precentage[8];
+	u32 admin_recommendation_ets_pg[8];
+	u32 admin_pfc_bitmap;
+	struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
+	u32 admin_default_priority;
+};
+
+#define GET_FLAGS(flags, bits)		((flags) & (bits))
+#define SET_FLAGS(flags, bits)		((flags) |= (bits))
+#define RESET_FLAGS(flags, bits)	((flags) &= ~(bits))
+
+enum {
+	DCBX_READ_LOCAL_MIB,
+	DCBX_READ_REMOTE_MIB
+};
+
+#define ETH_TYPE_FCOE		(0x8906)
+#define TCP_PORT_ISCSI		(0xCBC)
+
+#define PFC_VALUE_FRAME_SIZE				(512)
+#define PFC_QUANTA_IN_NANOSEC_FROM_SPEED_MEGA(mega_speed)  \
+				((1000 * PFC_VALUE_FRAME_SIZE)/(mega_speed))
+
+#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD			130
+#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD			170
+
+
+
+struct cos_entry_help_data {
+	u32			pri_join_mask;
+	u32			cos_bw;
+	u8			strict;
+	bool			pausable;
+};
+
+struct cos_help_data {
+	struct cos_entry_help_data	data[DCBX_COS_MAX_NUM];
+	u8				num_of_cos;
+};
+
+#define DCBX_ILLEGAL_PG				(0xFF)
+#define DCBX_PFC_PRI_MASK			(0xFF)
+#define DCBX_STRICT_PRIORITY			(15)
+#define DCBX_INVALID_COS_BW			(0xFFFFFFFF)
+#define DCBX_PFC_PRI_NON_PAUSE_MASK(bp)		\
+			((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask)
+#define DCBX_PFC_PRI_PAUSE_MASK(bp)		\
+					((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp))
+#define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri)	\
+				((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp)))
+#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri)	\
+			(DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri))
+#define DCBX_IS_PFC_PRI_SOME_PAUSE(bp, pg_pri)	\
+			(0 != DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri))
+#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri)	\
+			(pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri)))
+#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\
+			((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri)))
+#define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri)	\
+			(!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
+			 IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
+
+
+struct pg_entry_help_data {
+	u8	num_of_dif_pri;
+	u8	pg;
+	u32	pg_priority;
+};
+
+struct pg_help_data {
+	struct pg_entry_help_data	data[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
+	u8				num_of_pg;
+};
+
+/* forward DCB/PFC related declarations */
+struct bnx2x;
+void bnx2x_dcbx_update(struct work_struct *work);
+void bnx2x_dcbx_init_params(struct bnx2x *bp);
+void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
+
+enum {
+	BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
+	BNX2X_DCBX_STATE_TX_PAUSED,
+	BNX2X_DCBX_STATE_TX_RELEASED
+};
+
+void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
+void bnx2x_dcbx_pmf_update(struct bnx2x *bp);
+/* DCB netlink */
+#ifdef BCM_DCBNL
+extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
+int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall);
+#endif /* BCM_DCBNL */
+
+#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
new file mode 100644
index 0000000..b983825
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -0,0 +1,1156 @@
+/* bnx2x_dump.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ */
+
+
+/* This struct holds a signature to ensure the dump returned from the driver
+ * match the meta data file inserted to grc_dump.tcl
+ * The signature is time stamp, diag version and grc_dump version
+ */
+
+#ifndef BNX2X_DUMP_H
+#define BNX2X_DUMP_H
+
+
+
+/*definitions */
+#define XSTORM_WAITP_ADDR	0x2b8a80
+#define TSTORM_WAITP_ADDR	0x1b8a80
+#define USTORM_WAITP_ADDR	0x338a80
+#define CSTORM_WAITP_ADDR	0x238a80
+#define TSTORM_CAM_MODE	0x1B1440
+
+#define MAX_TIMER_PENDING	200
+#define TIMER_SCAN_DONT_CARE	0xFF
+#define RI_E1				0x1
+#define RI_E1H				0x2
+#define RI_E2				0x4
+#define RI_E3				0x8
+#define RI_E3B0				0x10
+#define RI_ONLINE			0x100
+#define RI_OFFLINE			0x0
+#define RI_PATH0_DUMP			0x200
+#define RI_PATH1_DUMP			0x400
+
+#define RI_E1_ONLINE		(RI_E1 | RI_ONLINE)
+#define RI_E1H_ONLINE		(RI_E1H | RI_ONLINE)
+#define RI_E1E1H_ONLINE		(RI_E1 | RI_E1H | RI_ONLINE)
+#define RI_E2_ONLINE		(RI_E2 | RI_ONLINE)
+#define RI_E1E2_ONLINE		(RI_E1 | RI_E2 | RI_ONLINE)
+#define RI_E1HE2_ONLINE		(RI_E1H | RI_E2 | RI_ONLINE)
+#define RI_E1E1HE2_ONLINE	(RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
+#define RI_E3_ONLINE		(RI_E3 | RI_ONLINE)
+#define RI_E1E3_ONLINE		(RI_E1 | RI_E3 | RI_ONLINE)
+#define RI_E1HE3_ONLINE		(RI_E1H | RI_E3 | RI_ONLINE)
+#define RI_E1E1HE3_ONLINE	(RI_E1 | RI_E1H | RI_E3 | RI_ONLINE)
+#define RI_E2E3_ONLINE		(RI_E2 | RI_E3 | RI_ONLINE)
+#define RI_E1E2E3_ONLINE	(RI_E1 | RI_E2 | RI_E3 | RI_ONLINE)
+#define RI_E1HE2E3_ONLINE	(RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
+#define RI_E1E1HE2E3_ONLINE	(RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_ONLINE)
+#define RI_E3B0_ONLINE		(RI_E3B0 | RI_ONLINE)
+#define RI_E1E3B0_ONLINE	(RI_E1 | RI_E3B0 | RI_ONLINE)
+#define RI_E1HE3B0_ONLINE	(RI_E1H | RI_E3B0 | RI_ONLINE)
+#define RI_E1E1HE3B0_ONLINE	(RI_E1 | RI_E1H | RI_E3B0 | RI_ONLINE)
+#define RI_E2E3B0_ONLINE	(RI_E2 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E2E3B0_ONLINE	(RI_E1 | RI_E2 | RI_E3B0 | RI_ONLINE)
+#define RI_E1HE2E3B0_ONLINE	(RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E1HE2E3B0_ONLINE	(RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_ONLINE)
+#define RI_E3E3B0_ONLINE	(RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E3E3B0_ONLINE	(RI_E1 | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1HE3E3B0_ONLINE	(RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E1HE3E3B0_ONLINE	(RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E2E3E3B0_ONLINE	(RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E2E3E3B0_ONLINE	(RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1HE2E3E3B0_ONLINE	(RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1E1HE2E3E3B0_ONLINE	\
+	(RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_ONLINE)
+#define RI_E1_OFFLINE		(RI_E1 | RI_OFFLINE)
+#define RI_E1H_OFFLINE		(RI_E1H | RI_OFFLINE)
+#define RI_E1E1H_OFFLINE	(RI_E1 | RI_E1H | RI_OFFLINE)
+#define RI_E2_OFFLINE		(RI_E2 | RI_OFFLINE)
+#define RI_E1E2_OFFLINE		(RI_E1 | RI_E2 | RI_OFFLINE)
+#define RI_E1HE2_OFFLINE	(RI_E1H | RI_E2 | RI_OFFLINE)
+#define RI_E1E1HE2_OFFLINE	(RI_E1 | RI_E1H | RI_E2 | RI_OFFLINE)
+#define RI_E3_OFFLINE		(RI_E3 | RI_OFFLINE)
+#define RI_E1E3_OFFLINE		(RI_E1 | RI_E3 | RI_OFFLINE)
+#define RI_E1HE3_OFFLINE	(RI_E1H | RI_E3 | RI_OFFLINE)
+#define RI_E1E1HE3_OFFLINE	(RI_E1 | RI_E1H | RI_E3 | RI_OFFLINE)
+#define RI_E2E3_OFFLINE		(RI_E2 | RI_E3 | RI_OFFLINE)
+#define RI_E1E2E3_OFFLINE	(RI_E1 | RI_E2 | RI_E3 | RI_OFFLINE)
+#define RI_E1HE2E3_OFFLINE	(RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
+#define RI_E1E1HE2E3_OFFLINE	(RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_OFFLINE)
+#define RI_E3B0_OFFLINE		(RI_E3B0 | RI_OFFLINE)
+#define RI_E1E3B0_OFFLINE	(RI_E1 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1HE3B0_OFFLINE	(RI_E1H | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E1HE3B0_OFFLINE	(RI_E1 | RI_E1H | RI_E3B0 | RI_OFFLINE)
+#define RI_E2E3B0_OFFLINE	(RI_E2 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E2E3B0_OFFLINE	(RI_E1 | RI_E2 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1HE2E3B0_OFFLINE	(RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E1HE2E3B0_OFFLINE	(RI_E1 | RI_E1H | RI_E2 | RI_E3B0 | RI_OFFLINE)
+#define RI_E3E3B0_OFFLINE	(RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E3E3B0_OFFLINE	(RI_E1 | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1HE3E3B0_OFFLINE	(RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E1HE3E3B0_OFFLINE	(RI_E1 | RI_E1H | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E2E3E3B0_OFFLINE	(RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E2E3E3B0_OFFLINE	(RI_E1 | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1HE2E3E3B0_OFFLINE	(RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_E1E1HE2E3E3B0_OFFLINE	\
+	(RI_E1 | RI_E1H | RI_E2 | RI_E3 | RI_E3B0 | RI_OFFLINE)
+#define RI_ALL_ONLINE		RI_E1E1HE2E3E3B0_ONLINE
+#define RI_ALL_OFFLINE		RI_E1E1HE2E3E3B0_OFFLINE
+
+#define DBG_DMP_TRACE_BUFFER_SIZE	0x800
+#define DBG_DMP_TRACE_BUFFER_OFFSET(shmem0_offset) \
+	((shmem0_offset) - DBG_DMP_TRACE_BUFFER_SIZE)
+
+struct dump_sign {
+	u32 time_stamp;
+	u32 diag_ver;
+	u32 grc_dump_ver;
+};
+
+struct dump_hdr {
+	u32  hdr_size;	/* in dwords, excluding this field */
+	struct dump_sign	dump_sign;
+	u32  xstorm_waitp;
+	u32  tstorm_waitp;
+	u32  ustorm_waitp;
+	u32  cstorm_waitp;
+	u16  info;
+	u8   idle_chk;
+	u8   reserved;
+};
+
+struct reg_addr {
+	u32 addr;
+	u32 size;
+	u16 info;
+};
+
+struct wreg_addr {
+	u32 addr;
+	u32 size;
+	u32 read_regs_count;
+	const u32 *read_regs;
+	u16 info;
+};
+
+static const struct reg_addr reg_addrs[] = {
+	{ 0x2000, 341, RI_ALL_ONLINE },
+	{ 0x2800, 103, RI_ALL_ONLINE },
+	{ 0x3000, 287, RI_ALL_ONLINE },
+	{ 0x3800, 331, RI_ALL_ONLINE },
+	{ 0x8800, 6, RI_ALL_ONLINE },
+	{ 0x8818, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x9000, 147, RI_E2E3E3B0_ONLINE },
+	{ 0x924c, 1, RI_E2_ONLINE },
+	{ 0x9250, 16, RI_E2E3E3B0_ONLINE },
+	{ 0x9400, 33, RI_E2E3E3B0_ONLINE },
+	{ 0x9484, 5, RI_E3E3B0_ONLINE },
+	{ 0xa000, 27, RI_ALL_ONLINE },
+	{ 0xa06c, 1, RI_E1E1H_ONLINE },
+	{ 0xa070, 71, RI_ALL_ONLINE },
+	{ 0xa18c, 4, RI_E1E1H_ONLINE },
+	{ 0xa19c, 62, RI_ALL_ONLINE },
+	{ 0xa294, 2, RI_E1E1H_ONLINE },
+	{ 0xa29c, 2, RI_ALL_ONLINE },
+	{ 0xa2a4, 2, RI_E1E1HE2_ONLINE },
+	{ 0xa2ac, 52, RI_ALL_ONLINE },
+	{ 0xa39c, 7, RI_E1HE2E3E3B0_ONLINE },
+	{ 0xa3b8, 2, RI_E3E3B0_ONLINE },
+	{ 0xa3c0, 3, RI_E1HE2E3E3B0_ONLINE },
+	{ 0xa3d0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0xa3d8, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0xa3e0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0xa3e8, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0xa3f0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0xa3f8, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0xa400, 40, RI_ALL_ONLINE },
+	{ 0xa4a0, 1, RI_E1E1HE2_ONLINE },
+	{ 0xa4a4, 2, RI_ALL_ONLINE },
+	{ 0xa4ac, 2, RI_E1E1H_ONLINE },
+	{ 0xa4b4, 1, RI_E1E1HE2_ONLINE },
+	{ 0xa4b8, 2, RI_E1E1H_ONLINE },
+	{ 0xa4c0, 3, RI_ALL_ONLINE },
+	{ 0xa4cc, 5, RI_E1E1H_ONLINE },
+	{ 0xa4e0, 3, RI_ALL_ONLINE },
+	{ 0xa4fc, 2, RI_ALL_ONLINE },
+	{ 0xa504, 1, RI_E1E1H_ONLINE },
+	{ 0xa508, 3, RI_ALL_ONLINE },
+	{ 0xa518, 1, RI_ALL_ONLINE },
+	{ 0xa520, 1, RI_ALL_ONLINE },
+	{ 0xa528, 1, RI_ALL_ONLINE },
+	{ 0xa530, 1, RI_ALL_ONLINE },
+	{ 0xa538, 1, RI_ALL_ONLINE },
+	{ 0xa540, 1, RI_ALL_ONLINE },
+	{ 0xa548, 1, RI_E1E1H_ONLINE },
+	{ 0xa550, 1, RI_E1E1H_ONLINE },
+	{ 0xa558, 1, RI_E1E1H_ONLINE },
+	{ 0xa560, 1, RI_E1E1H_ONLINE },
+	{ 0xa568, 1, RI_E1E1H_ONLINE },
+	{ 0xa570, 1, RI_ALL_ONLINE },
+	{ 0xa580, 1, RI_ALL_ONLINE },
+	{ 0xa590, 1, RI_ALL_ONLINE },
+	{ 0xa5a0, 1, RI_E1E1HE2_ONLINE },
+	{ 0xa5c0, 1, RI_ALL_ONLINE },
+	{ 0xa5e0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0xa5e8, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0xa5f0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0xa5f8, 1, RI_E1HE2_ONLINE },
+	{ 0xa5fc, 9, RI_E1HE2E3E3B0_ONLINE },
+	{ 0xa620, 6, RI_E2E3E3B0_ONLINE },
+	{ 0xa638, 20, RI_E2_ONLINE },
+	{ 0xa688, 42, RI_E2E3E3B0_ONLINE },
+	{ 0xa730, 1, RI_E2_ONLINE },
+	{ 0xa734, 2, RI_E2E3E3B0_ONLINE },
+	{ 0xa73c, 4, RI_E2_ONLINE },
+	{ 0xa74c, 5, RI_E2E3E3B0_ONLINE },
+	{ 0xa760, 5, RI_E2_ONLINE },
+	{ 0xa774, 7, RI_E2E3E3B0_ONLINE },
+	{ 0xa790, 15, RI_E2_ONLINE },
+	{ 0xa7cc, 4, RI_E2E3E3B0_ONLINE },
+	{ 0xa7e0, 6, RI_E3E3B0_ONLINE },
+	{ 0xa800, 18, RI_E2_ONLINE },
+	{ 0xa848, 33, RI_E2E3E3B0_ONLINE },
+	{ 0xa8cc, 2, RI_E3E3B0_ONLINE },
+	{ 0xa8d4, 4, RI_E2E3E3B0_ONLINE },
+	{ 0xa8e4, 1, RI_E3E3B0_ONLINE },
+	{ 0xa8e8, 1, RI_E2E3E3B0_ONLINE },
+	{ 0xa8f0, 1, RI_E2E3E3B0_ONLINE },
+	{ 0xa8f8, 30, RI_E3E3B0_ONLINE },
+	{ 0xa974, 73, RI_E3E3B0_ONLINE },
+	{ 0xac30, 1, RI_E3E3B0_ONLINE },
+	{ 0xac40, 1, RI_E3E3B0_ONLINE },
+	{ 0xac50, 1, RI_E3E3B0_ONLINE },
+	{ 0xac60, 1, RI_E3B0_ONLINE },
+	{ 0x10000, 9, RI_ALL_ONLINE },
+	{ 0x10024, 1, RI_E1E1HE2_ONLINE },
+	{ 0x10028, 5, RI_ALL_ONLINE },
+	{ 0x1003c, 6, RI_E1E1HE2_ONLINE },
+	{ 0x10054, 20, RI_ALL_ONLINE },
+	{ 0x100a4, 4, RI_E1E1HE2_ONLINE },
+	{ 0x100b4, 11, RI_ALL_ONLINE },
+	{ 0x100e0, 4, RI_E1E1HE2_ONLINE },
+	{ 0x100f0, 8, RI_ALL_ONLINE },
+	{ 0x10110, 6, RI_E1E1HE2_ONLINE },
+	{ 0x10128, 110, RI_ALL_ONLINE },
+	{ 0x102e0, 4, RI_E1E1HE2_ONLINE },
+	{ 0x102f0, 18, RI_ALL_ONLINE },
+	{ 0x10338, 20, RI_E1E1HE2_ONLINE },
+	{ 0x10388, 10, RI_ALL_ONLINE },
+	{ 0x10400, 6, RI_E1E1HE2_ONLINE },
+	{ 0x10418, 6, RI_ALL_ONLINE },
+	{ 0x10430, 10, RI_E1E1HE2_ONLINE },
+	{ 0x10458, 22, RI_ALL_ONLINE },
+	{ 0x104b0, 12, RI_E1E1HE2_ONLINE },
+	{ 0x104e0, 1, RI_ALL_ONLINE },
+	{ 0x104e8, 2, RI_ALL_ONLINE },
+	{ 0x104f4, 2, RI_ALL_ONLINE },
+	{ 0x10500, 146, RI_ALL_ONLINE },
+	{ 0x10750, 2, RI_E1E1HE2_ONLINE },
+	{ 0x10760, 2, RI_E1E1HE2_ONLINE },
+	{ 0x10770, 2, RI_E1E1HE2_ONLINE },
+	{ 0x10780, 2, RI_E1E1HE2_ONLINE },
+	{ 0x10790, 2, RI_ALL_ONLINE },
+	{ 0x107a0, 2, RI_E1E1HE2_ONLINE },
+	{ 0x107b0, 2, RI_E1E1HE2_ONLINE },
+	{ 0x107c0, 2, RI_E1E1HE2_ONLINE },
+	{ 0x107d0, 2, RI_E1E1HE2_ONLINE },
+	{ 0x107e0, 2, RI_ALL_ONLINE },
+	{ 0x10880, 2, RI_ALL_ONLINE },
+	{ 0x10900, 2, RI_ALL_ONLINE },
+	{ 0x16000, 1, RI_E1HE2_ONLINE },
+	{ 0x16004, 25, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x16070, 8, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x16090, 4, RI_E1HE2E3_ONLINE },
+	{ 0x160a0, 6, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x160c0, 7, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x160dc, 2, RI_E1HE2_ONLINE },
+	{ 0x160e4, 10, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1610c, 2, RI_E1HE2_ONLINE },
+	{ 0x16114, 6, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x16140, 48, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x16204, 5, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x18000, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x18008, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x18010, 35, RI_E2E3E3B0_ONLINE },
+	{ 0x180a4, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x180c0, 9, RI_E2E3E3B0_ONLINE },
+	{ 0x180e4, 1, RI_E2E3_ONLINE },
+	{ 0x180e8, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x180f0, 1, RI_E2E3_ONLINE },
+	{ 0x180f4, 79, RI_E2E3E3B0_ONLINE },
+	{ 0x18230, 1, RI_E2E3_ONLINE },
+	{ 0x18234, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x1823c, 1, RI_E2E3_ONLINE },
+	{ 0x18240, 13, RI_E2E3E3B0_ONLINE },
+	{ 0x18274, 1, RI_E2_ONLINE },
+	{ 0x18278, 81, RI_E2E3E3B0_ONLINE },
+	{ 0x18440, 63, RI_E2E3E3B0_ONLINE },
+	{ 0x18570, 42, RI_E3E3B0_ONLINE },
+	{ 0x18618, 25, RI_E3B0_ONLINE },
+	{ 0x18680, 44, RI_E3B0_ONLINE },
+	{ 0x18748, 12, RI_E3B0_ONLINE },
+	{ 0x18788, 1, RI_E3B0_ONLINE },
+	{ 0x1879c, 6, RI_E3B0_ONLINE },
+	{ 0x187c4, 51, RI_E3B0_ONLINE },
+	{ 0x18a00, 48, RI_E3B0_ONLINE },
+	{ 0x20000, 24, RI_ALL_ONLINE },
+	{ 0x20060, 8, RI_ALL_ONLINE },
+	{ 0x20080, 94, RI_ALL_ONLINE },
+	{ 0x201f8, 1, RI_E1E1H_ONLINE },
+	{ 0x201fc, 1, RI_ALL_ONLINE },
+	{ 0x20200, 1, RI_E1E1H_ONLINE },
+	{ 0x20204, 1, RI_ALL_ONLINE },
+	{ 0x20208, 1, RI_E1E1H_ONLINE },
+	{ 0x2020c, 39, RI_ALL_ONLINE },
+	{ 0x202c8, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x202d8, 4, RI_E2E3E3B0_ONLINE },
+	{ 0x202f0, 1, RI_E3B0_ONLINE },
+	{ 0x20400, 2, RI_ALL_ONLINE },
+	{ 0x2040c, 8, RI_ALL_ONLINE },
+	{ 0x2042c, 18, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x20480, 1, RI_ALL_ONLINE },
+	{ 0x20500, 1, RI_ALL_ONLINE },
+	{ 0x20600, 1, RI_ALL_ONLINE },
+	{ 0x28000, 1, RI_ALL_ONLINE },
+	{ 0x28004, 8191, RI_ALL_OFFLINE },
+	{ 0x30000, 1, RI_ALL_ONLINE },
+	{ 0x30004, 16383, RI_ALL_OFFLINE },
+	{ 0x40000, 98, RI_ALL_ONLINE },
+	{ 0x401a8, 8, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x401c8, 1, RI_E1H_ONLINE },
+	{ 0x401cc, 2, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x401d4, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x40200, 4, RI_ALL_ONLINE },
+	{ 0x40220, 6, RI_E2E3E3B0_ONLINE },
+	{ 0x40238, 8, RI_E2E3_ONLINE },
+	{ 0x40258, 4, RI_E2E3E3B0_ONLINE },
+	{ 0x40268, 2, RI_E3E3B0_ONLINE },
+	{ 0x40270, 17, RI_E3B0_ONLINE },
+	{ 0x40400, 43, RI_ALL_ONLINE },
+	{ 0x404cc, 3, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x404e0, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x40500, 2, RI_ALL_ONLINE },
+	{ 0x40510, 2, RI_ALL_ONLINE },
+	{ 0x40520, 2, RI_ALL_ONLINE },
+	{ 0x40530, 2, RI_ALL_ONLINE },
+	{ 0x40540, 2, RI_ALL_ONLINE },
+	{ 0x40550, 10, RI_E2E3E3B0_ONLINE },
+	{ 0x40610, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x42000, 164, RI_ALL_ONLINE },
+	{ 0x422c0, 4, RI_E2E3E3B0_ONLINE },
+	{ 0x422d4, 5, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x422e8, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x42400, 49, RI_ALL_ONLINE },
+	{ 0x424c8, 38, RI_ALL_ONLINE },
+	{ 0x42568, 2, RI_ALL_ONLINE },
+	{ 0x42640, 5, RI_E2E3E3B0_ONLINE },
+	{ 0x42800, 1, RI_ALL_ONLINE },
+	{ 0x50000, 1, RI_ALL_ONLINE },
+	{ 0x50004, 19, RI_ALL_ONLINE },
+	{ 0x50050, 8, RI_ALL_ONLINE },
+	{ 0x50070, 88, RI_ALL_ONLINE },
+	{ 0x501f0, 4, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x50200, 2, RI_ALL_ONLINE },
+	{ 0x5020c, 7, RI_ALL_ONLINE },
+	{ 0x50228, 6, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x50240, 1, RI_ALL_ONLINE },
+	{ 0x50280, 1, RI_ALL_ONLINE },
+	{ 0x50300, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x5030c, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x50318, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x5031c, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x50320, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x50330, 1, RI_E3B0_ONLINE },
+	{ 0x52000, 1, RI_ALL_ONLINE },
+	{ 0x54000, 1, RI_ALL_ONLINE },
+	{ 0x54004, 3327, RI_ALL_OFFLINE },
+	{ 0x58000, 1, RI_ALL_ONLINE },
+	{ 0x58004, 8191, RI_E1E1H_OFFLINE },
+	{ 0x60000, 26, RI_ALL_ONLINE },
+	{ 0x60068, 8, RI_E1E1H_ONLINE },
+	{ 0x60088, 12, RI_ALL_ONLINE },
+	{ 0x600b8, 9, RI_E1E1H_ONLINE },
+	{ 0x600dc, 1, RI_ALL_ONLINE },
+	{ 0x600e0, 5, RI_E1E1H_ONLINE },
+	{ 0x600f4, 1, RI_E1E1HE2_ONLINE },
+	{ 0x600f8, 1, RI_E1E1H_ONLINE },
+	{ 0x600fc, 8, RI_ALL_ONLINE },
+	{ 0x6013c, 24, RI_E1H_ONLINE },
+	{ 0x6019c, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x601ac, 18, RI_E2E3E3B0_ONLINE },
+	{ 0x60200, 1, RI_ALL_ONLINE },
+	{ 0x60204, 2, RI_ALL_OFFLINE },
+	{ 0x60210, 13, RI_E2E3E3B0_ONLINE },
+	{ 0x60244, 16, RI_E3B0_ONLINE },
+	{ 0x61000, 1, RI_ALL_ONLINE },
+	{ 0x61004, 511, RI_ALL_OFFLINE },
+	{ 0x61800, 512, RI_E3E3B0_OFFLINE },
+	{ 0x70000, 8, RI_ALL_ONLINE },
+	{ 0x70020, 8184, RI_ALL_OFFLINE },
+	{ 0x78000, 8192, RI_E3E3B0_OFFLINE },
+	{ 0x85000, 3, RI_ALL_ONLINE },
+	{ 0x8501c, 7, RI_ALL_ONLINE },
+	{ 0x85048, 1, RI_ALL_ONLINE },
+	{ 0x85200, 32, RI_ALL_ONLINE },
+	{ 0xb0000, 16384, RI_E1H_ONLINE },
+	{ 0xc1000, 7, RI_ALL_ONLINE },
+	{ 0xc103c, 2, RI_E2E3E3B0_ONLINE },
+	{ 0xc1800, 2, RI_ALL_ONLINE },
+	{ 0xc2000, 164, RI_ALL_ONLINE },
+	{ 0xc22c0, 5, RI_E2E3E3B0_ONLINE },
+	{ 0xc22d8, 4, RI_E2E3E3B0_ONLINE },
+	{ 0xc2400, 49, RI_ALL_ONLINE },
+	{ 0xc24c8, 38, RI_ALL_ONLINE },
+	{ 0xc2568, 2, RI_ALL_ONLINE },
+	{ 0xc2600, 1, RI_ALL_ONLINE },
+	{ 0xc4000, 165, RI_ALL_ONLINE },
+	{ 0xc42d8, 2, RI_E2E3E3B0_ONLINE },
+	{ 0xc42e0, 7, RI_E1HE2E3E3B0_ONLINE },
+	{ 0xc42fc, 1, RI_E2E3E3B0_ONLINE },
+	{ 0xc4400, 51, RI_ALL_ONLINE },
+	{ 0xc44d0, 38, RI_ALL_ONLINE },
+	{ 0xc4570, 2, RI_ALL_ONLINE },
+	{ 0xc4578, 5, RI_E2E3E3B0_ONLINE },
+	{ 0xc4600, 1, RI_ALL_ONLINE },
+	{ 0xd0000, 19, RI_ALL_ONLINE },
+	{ 0xd004c, 8, RI_ALL_ONLINE },
+	{ 0xd006c, 91, RI_ALL_ONLINE },
+	{ 0xd01fc, 1, RI_E2E3E3B0_ONLINE },
+	{ 0xd0200, 2, RI_ALL_ONLINE },
+	{ 0xd020c, 7, RI_ALL_ONLINE },
+	{ 0xd0228, 18, RI_E1HE2E3E3B0_ONLINE },
+	{ 0xd0280, 1, RI_ALL_ONLINE },
+	{ 0xd0300, 1, RI_ALL_ONLINE },
+	{ 0xd0400, 1, RI_ALL_ONLINE },
+	{ 0xd0818, 1, RI_E3B0_ONLINE },
+	{ 0xd4000, 1, RI_ALL_ONLINE },
+	{ 0xd4004, 2559, RI_ALL_OFFLINE },
+	{ 0xd8000, 1, RI_ALL_ONLINE },
+	{ 0xd8004, 8191, RI_ALL_OFFLINE },
+	{ 0xe0000, 21, RI_ALL_ONLINE },
+	{ 0xe0054, 8, RI_ALL_ONLINE },
+	{ 0xe0074, 49, RI_ALL_ONLINE },
+	{ 0xe0138, 1, RI_E1E1H_ONLINE },
+	{ 0xe013c, 35, RI_ALL_ONLINE },
+	{ 0xe01f4, 1, RI_E2_ONLINE },
+	{ 0xe01f8, 1, RI_E2E3E3B0_ONLINE },
+	{ 0xe0200, 2, RI_ALL_ONLINE },
+	{ 0xe020c, 8, RI_ALL_ONLINE },
+	{ 0xe022c, 18, RI_E1HE2E3E3B0_ONLINE },
+	{ 0xe0280, 1, RI_ALL_ONLINE },
+	{ 0xe0300, 1, RI_ALL_ONLINE },
+	{ 0xe0400, 1, RI_E3B0_ONLINE },
+	{ 0xe1000, 1, RI_ALL_ONLINE },
+	{ 0xe2000, 1, RI_ALL_ONLINE },
+	{ 0xe2004, 2047, RI_ALL_OFFLINE },
+	{ 0xf0000, 1, RI_ALL_ONLINE },
+	{ 0xf0004, 16383, RI_ALL_OFFLINE },
+	{ 0x101000, 12, RI_ALL_ONLINE },
+	{ 0x101050, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x101054, 3, RI_E2E3E3B0_ONLINE },
+	{ 0x101100, 1, RI_ALL_ONLINE },
+	{ 0x101800, 8, RI_ALL_ONLINE },
+	{ 0x102000, 18, RI_ALL_ONLINE },
+	{ 0x102068, 6, RI_E2E3E3B0_ONLINE },
+	{ 0x102080, 17, RI_ALL_ONLINE },
+	{ 0x1020c8, 8, RI_E1H_ONLINE },
+	{ 0x1020e8, 9, RI_E2E3E3B0_ONLINE },
+	{ 0x102400, 1, RI_ALL_ONLINE },
+	{ 0x103000, 26, RI_ALL_ONLINE },
+	{ 0x103098, 5, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1030ac, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x1030b4, 1, RI_E2_ONLINE },
+	{ 0x1030b8, 7, RI_E2E3E3B0_ONLINE },
+	{ 0x1030d8, 8, RI_E2E3E3B0_ONLINE },
+	{ 0x103400, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x103404, 135, RI_E2E3E3B0_OFFLINE },
+	{ 0x103800, 8, RI_ALL_ONLINE },
+	{ 0x104000, 63, RI_ALL_ONLINE },
+	{ 0x10411c, 16, RI_E2E3E3B0_ONLINE },
+	{ 0x104200, 17, RI_ALL_ONLINE },
+	{ 0x104400, 64, RI_ALL_ONLINE },
+	{ 0x104500, 192, RI_ALL_OFFLINE },
+	{ 0x104800, 64, RI_ALL_ONLINE },
+	{ 0x104900, 192, RI_ALL_OFFLINE },
+	{ 0x105000, 256, RI_ALL_ONLINE },
+	{ 0x105400, 768, RI_ALL_OFFLINE },
+	{ 0x107000, 7, RI_E2E3E3B0_ONLINE },
+	{ 0x10701c, 1, RI_E3E3B0_ONLINE },
+	{ 0x108000, 33, RI_E1E1H_ONLINE },
+	{ 0x1080ac, 5, RI_E1H_ONLINE },
+	{ 0x108100, 5, RI_E1E1H_ONLINE },
+	{ 0x108120, 5, RI_E1E1H_ONLINE },
+	{ 0x108200, 74, RI_E1E1H_ONLINE },
+	{ 0x108400, 74, RI_E1E1H_ONLINE },
+	{ 0x108800, 152, RI_E1E1H_ONLINE },
+	{ 0x110000, 111, RI_E2E3E3B0_ONLINE },
+	{ 0x1101dc, 1, RI_E3E3B0_ONLINE },
+	{ 0x110200, 4, RI_E2E3E3B0_ONLINE },
+	{ 0x120000, 2, RI_ALL_ONLINE },
+	{ 0x120008, 4, RI_ALL_ONLINE },
+	{ 0x120018, 3, RI_ALL_ONLINE },
+	{ 0x120024, 4, RI_ALL_ONLINE },
+	{ 0x120034, 3, RI_ALL_ONLINE },
+	{ 0x120040, 4, RI_ALL_ONLINE },
+	{ 0x120050, 3, RI_ALL_ONLINE },
+	{ 0x12005c, 4, RI_ALL_ONLINE },
+	{ 0x12006c, 3, RI_ALL_ONLINE },
+	{ 0x120078, 4, RI_ALL_ONLINE },
+	{ 0x120088, 3, RI_ALL_ONLINE },
+	{ 0x120094, 4, RI_ALL_ONLINE },
+	{ 0x1200a4, 3, RI_ALL_ONLINE },
+	{ 0x1200b0, 4, RI_ALL_ONLINE },
+	{ 0x1200c0, 3, RI_ALL_ONLINE },
+	{ 0x1200cc, 4, RI_ALL_ONLINE },
+	{ 0x1200dc, 3, RI_ALL_ONLINE },
+	{ 0x1200e8, 4, RI_ALL_ONLINE },
+	{ 0x1200f8, 3, RI_ALL_ONLINE },
+	{ 0x120104, 4, RI_ALL_ONLINE },
+	{ 0x120114, 1, RI_ALL_ONLINE },
+	{ 0x120118, 22, RI_ALL_ONLINE },
+	{ 0x120170, 2, RI_E1E1H_ONLINE },
+	{ 0x120178, 243, RI_ALL_ONLINE },
+	{ 0x120544, 4, RI_E1E1H_ONLINE },
+	{ 0x120554, 6, RI_ALL_ONLINE },
+	{ 0x12059c, 6, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1205b4, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1205b8, 15, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1205f4, 1, RI_E1HE2_ONLINE },
+	{ 0x1205f8, 4, RI_E2E3E3B0_ONLINE },
+	{ 0x120618, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x12061c, 20, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x12066c, 11, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x120698, 3, RI_E2E3E3B0_ONLINE },
+	{ 0x1206a4, 1, RI_E2_ONLINE },
+	{ 0x1206a8, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x1206b0, 75, RI_E2E3E3B0_ONLINE },
+	{ 0x1207dc, 1, RI_E2_ONLINE },
+	{ 0x1207fc, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x12080c, 65, RI_ALL_ONLINE },
+	{ 0x120910, 7, RI_E2E3E3B0_ONLINE },
+	{ 0x120930, 9, RI_E2E3E3B0_ONLINE },
+	{ 0x12095c, 37, RI_E3E3B0_ONLINE },
+	{ 0x120a00, 2, RI_E1E1HE2_ONLINE },
+	{ 0x120b00, 1, RI_E3E3B0_ONLINE },
+	{ 0x122000, 2, RI_ALL_ONLINE },
+	{ 0x122008, 2046, RI_E1_OFFLINE },
+	{ 0x128000, 2, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x128008, 6142, RI_E1HE2E3E3B0_OFFLINE },
+	{ 0x130000, 35, RI_E2E3E3B0_ONLINE },
+	{ 0x130100, 29, RI_E2E3E3B0_ONLINE },
+	{ 0x130180, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x130200, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x130280, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x130300, 5, RI_E2E3E3B0_ONLINE },
+	{ 0x130380, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x130400, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x130480, 5, RI_E2E3E3B0_ONLINE },
+	{ 0x130800, 72, RI_E2E3E3B0_ONLINE },
+	{ 0x131000, 136, RI_E2E3E3B0_ONLINE },
+	{ 0x132000, 148, RI_E2E3E3B0_ONLINE },
+	{ 0x134000, 544, RI_E2E3E3B0_ONLINE },
+	{ 0x140000, 1, RI_ALL_ONLINE },
+	{ 0x140004, 9, RI_E1E1HE2E3_ONLINE },
+	{ 0x140028, 8, RI_ALL_ONLINE },
+	{ 0x140048, 10, RI_E1E1HE2E3_ONLINE },
+	{ 0x140070, 1, RI_ALL_ONLINE },
+	{ 0x140074, 10, RI_E1E1HE2E3_ONLINE },
+	{ 0x14009c, 1, RI_ALL_ONLINE },
+	{ 0x1400a0, 5, RI_E1E1HE2E3_ONLINE },
+	{ 0x1400b4, 7, RI_ALL_ONLINE },
+	{ 0x1400d0, 10, RI_E1E1HE2E3_ONLINE },
+	{ 0x1400f8, 2, RI_ALL_ONLINE },
+	{ 0x140100, 5, RI_E1E1H_ONLINE },
+	{ 0x140114, 5, RI_E1E1HE2E3_ONLINE },
+	{ 0x140128, 7, RI_ALL_ONLINE },
+	{ 0x140144, 9, RI_E1E1HE2E3_ONLINE },
+	{ 0x140168, 8, RI_ALL_ONLINE },
+	{ 0x140188, 3, RI_E1E1HE2E3_ONLINE },
+	{ 0x140194, 13, RI_ALL_ONLINE },
+	{ 0x140200, 6, RI_E1E1HE2E3_ONLINE },
+	{ 0x140220, 4, RI_E2E3_ONLINE },
+	{ 0x140240, 4, RI_E2E3_ONLINE },
+	{ 0x140260, 4, RI_E2E3_ONLINE },
+	{ 0x140280, 4, RI_E2E3_ONLINE },
+	{ 0x1402a0, 4, RI_E2E3_ONLINE },
+	{ 0x1402c0, 4, RI_E2E3_ONLINE },
+	{ 0x1402e0, 2, RI_E2E3_ONLINE },
+	{ 0x1402e8, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x1402f0, 9, RI_E2E3_ONLINE },
+	{ 0x140314, 44, RI_E3B0_ONLINE },
+	{ 0x1403d0, 70, RI_E3B0_ONLINE },
+	{ 0x144000, 4, RI_E1E1H_ONLINE },
+	{ 0x148000, 4, RI_E1E1H_ONLINE },
+	{ 0x14c000, 4, RI_E1E1H_ONLINE },
+	{ 0x150000, 4, RI_E1E1H_ONLINE },
+	{ 0x154000, 4, RI_E1E1H_ONLINE },
+	{ 0x158000, 4, RI_E1E1H_ONLINE },
+	{ 0x15c000, 2, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x15c008, 5, RI_E1H_ONLINE },
+	{ 0x15c020, 8, RI_E2E3E3B0_ONLINE },
+	{ 0x15c040, 1, RI_E2E3_ONLINE },
+	{ 0x15c044, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x15c04c, 8, RI_E2E3_ONLINE },
+	{ 0x15c06c, 8, RI_E2E3E3B0_ONLINE },
+	{ 0x15c090, 13, RI_E2E3E3B0_ONLINE },
+	{ 0x15c0c8, 24, RI_E2E3E3B0_ONLINE },
+	{ 0x15c128, 2, RI_E2E3_ONLINE },
+	{ 0x15c130, 8, RI_E2E3E3B0_ONLINE },
+	{ 0x15c150, 2, RI_E3E3B0_ONLINE },
+	{ 0x15c158, 2, RI_E3_ONLINE },
+	{ 0x15c160, 149, RI_E3B0_ONLINE },
+	{ 0x161000, 7, RI_ALL_ONLINE },
+	{ 0x16103c, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x161800, 2, RI_ALL_ONLINE },
+	{ 0x162000, 54, RI_E3E3B0_ONLINE },
+	{ 0x162200, 60, RI_E3E3B0_ONLINE },
+	{ 0x162400, 54, RI_E3E3B0_ONLINE },
+	{ 0x162600, 60, RI_E3E3B0_ONLINE },
+	{ 0x162800, 54, RI_E3E3B0_ONLINE },
+	{ 0x162a00, 60, RI_E3E3B0_ONLINE },
+	{ 0x162c00, 54, RI_E3E3B0_ONLINE },
+	{ 0x162e00, 60, RI_E3E3B0_ONLINE },
+	{ 0x164000, 60, RI_ALL_ONLINE },
+	{ 0x164110, 2, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x164118, 15, RI_E2E3E3B0_ONLINE },
+	{ 0x164200, 1, RI_ALL_ONLINE },
+	{ 0x164208, 1, RI_ALL_ONLINE },
+	{ 0x164210, 1, RI_ALL_ONLINE },
+	{ 0x164218, 1, RI_ALL_ONLINE },
+	{ 0x164220, 1, RI_ALL_ONLINE },
+	{ 0x164228, 1, RI_ALL_ONLINE },
+	{ 0x164230, 1, RI_ALL_ONLINE },
+	{ 0x164238, 1, RI_ALL_ONLINE },
+	{ 0x164240, 1, RI_ALL_ONLINE },
+	{ 0x164248, 1, RI_ALL_ONLINE },
+	{ 0x164250, 1, RI_ALL_ONLINE },
+	{ 0x164258, 1, RI_ALL_ONLINE },
+	{ 0x164260, 1, RI_ALL_ONLINE },
+	{ 0x164270, 2, RI_ALL_ONLINE },
+	{ 0x164280, 2, RI_ALL_ONLINE },
+	{ 0x164800, 2, RI_ALL_ONLINE },
+	{ 0x165000, 2, RI_ALL_ONLINE },
+	{ 0x166000, 164, RI_ALL_ONLINE },
+	{ 0x1662cc, 7, RI_E2E3E3B0_ONLINE },
+	{ 0x166400, 49, RI_ALL_ONLINE },
+	{ 0x1664c8, 38, RI_ALL_ONLINE },
+	{ 0x166568, 2, RI_ALL_ONLINE },
+	{ 0x166570, 5, RI_E2E3E3B0_ONLINE },
+	{ 0x166800, 1, RI_ALL_ONLINE },
+	{ 0x168000, 137, RI_ALL_ONLINE },
+	{ 0x168224, 2, RI_E1E1H_ONLINE },
+	{ 0x16822c, 29, RI_ALL_ONLINE },
+	{ 0x1682a0, 12, RI_E1E1H_ONLINE },
+	{ 0x1682d0, 12, RI_ALL_ONLINE },
+	{ 0x168300, 2, RI_E1E1H_ONLINE },
+	{ 0x168308, 68, RI_ALL_ONLINE },
+	{ 0x168418, 2, RI_E1E1H_ONLINE },
+	{ 0x168420, 6, RI_ALL_ONLINE },
+	{ 0x168800, 19, RI_ALL_ONLINE },
+	{ 0x168900, 1, RI_ALL_ONLINE },
+	{ 0x168a00, 128, RI_ALL_ONLINE },
+	{ 0x16a000, 1, RI_ALL_ONLINE },
+	{ 0x16a004, 1535, RI_ALL_OFFLINE },
+	{ 0x16c000, 1, RI_ALL_ONLINE },
+	{ 0x16c004, 1535, RI_ALL_OFFLINE },
+	{ 0x16e000, 16, RI_E1H_ONLINE },
+	{ 0x16e040, 8, RI_E2E3E3B0_ONLINE },
+	{ 0x16e100, 1, RI_E1H_ONLINE },
+	{ 0x16e200, 2, RI_E1H_ONLINE },
+	{ 0x16e400, 161, RI_E1H_ONLINE },
+	{ 0x16e684, 2, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x16e68c, 12, RI_E1H_ONLINE },
+	{ 0x16e6bc, 4, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x16e6cc, 4, RI_E1H_ONLINE },
+	{ 0x16e6e0, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x16e6e8, 5, RI_E2E3_ONLINE },
+	{ 0x16e6fc, 5, RI_E2E3E3B0_ONLINE },
+	{ 0x16e768, 17, RI_E2E3E3B0_ONLINE },
+	{ 0x16e7ac, 12, RI_E3B0_ONLINE },
+	{ 0x170000, 24, RI_ALL_ONLINE },
+	{ 0x170060, 4, RI_E1E1H_ONLINE },
+	{ 0x170070, 65, RI_ALL_ONLINE },
+	{ 0x170194, 11, RI_E2E3E3B0_ONLINE },
+	{ 0x1701c4, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x1701cc, 7, RI_E2E3E3B0_ONLINE },
+	{ 0x1701e8, 1, RI_E3E3B0_ONLINE },
+	{ 0x1701ec, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x1701f4, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x170200, 4, RI_ALL_ONLINE },
+	{ 0x170214, 1, RI_ALL_ONLINE },
+	{ 0x170218, 77, RI_E2E3E3B0_ONLINE },
+	{ 0x170400, 64, RI_E2E3E3B0_ONLINE },
+	{ 0x178000, 1, RI_ALL_ONLINE },
+	{ 0x180000, 61, RI_ALL_ONLINE },
+	{ 0x18013c, 2, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x180200, 58, RI_ALL_ONLINE },
+	{ 0x180340, 4, RI_ALL_ONLINE },
+	{ 0x180380, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x180388, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x180390, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x180398, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x1803a0, 5, RI_E2E3E3B0_ONLINE },
+	{ 0x1803b4, 2, RI_E3E3B0_ONLINE },
+	{ 0x180400, 1, RI_ALL_ONLINE },
+	{ 0x180404, 255, RI_E1E1H_OFFLINE },
+	{ 0x181000, 4, RI_ALL_ONLINE },
+	{ 0x181010, 1020, RI_ALL_OFFLINE },
+	{ 0x182000, 4, RI_E3E3B0_ONLINE },
+	{ 0x1a0000, 1, RI_ALL_ONLINE },
+	{ 0x1a0004, 5631, RI_ALL_OFFLINE },
+	{ 0x1a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
+	{ 0x1a8000, 1, RI_ALL_ONLINE },
+	{ 0x1a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
+	{ 0x1b0000, 1, RI_ALL_ONLINE },
+	{ 0x1b0004, 15, RI_E1H_OFFLINE },
+	{ 0x1b0040, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1b0044, 239, RI_E1H_OFFLINE },
+	{ 0x1b0400, 1, RI_ALL_ONLINE },
+	{ 0x1b0404, 255, RI_E1H_OFFLINE },
+	{ 0x1b0800, 1, RI_ALL_ONLINE },
+	{ 0x1b0840, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1b0c00, 1, RI_ALL_ONLINE },
+	{ 0x1b1000, 1, RI_ALL_ONLINE },
+	{ 0x1b1040, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1b1400, 1, RI_ALL_ONLINE },
+	{ 0x1b1440, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1b1480, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1b1800, 128, RI_ALL_OFFLINE },
+	{ 0x1b1c00, 128, RI_ALL_OFFLINE },
+	{ 0x1b2000, 1, RI_ALL_ONLINE },
+	{ 0x1b2400, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1b2404, 5631, RI_E2E3E3B0_OFFLINE },
+	{ 0x1b8000, 1, RI_ALL_ONLINE },
+	{ 0x1b8040, 1, RI_ALL_ONLINE },
+	{ 0x1b8080, 1, RI_ALL_ONLINE },
+	{ 0x1b80c0, 1, RI_ALL_ONLINE },
+	{ 0x1b8100, 1, RI_ALL_ONLINE },
+	{ 0x1b8140, 1, RI_ALL_ONLINE },
+	{ 0x1b8180, 1, RI_ALL_ONLINE },
+	{ 0x1b81c0, 1, RI_ALL_ONLINE },
+	{ 0x1b8200, 1, RI_ALL_ONLINE },
+	{ 0x1b8240, 1, RI_ALL_ONLINE },
+	{ 0x1b8280, 1, RI_ALL_ONLINE },
+	{ 0x1b82c0, 1, RI_ALL_ONLINE },
+	{ 0x1b8300, 1, RI_ALL_ONLINE },
+	{ 0x1b8340, 1, RI_ALL_ONLINE },
+	{ 0x1b8380, 1, RI_ALL_ONLINE },
+	{ 0x1b83c0, 1, RI_ALL_ONLINE },
+	{ 0x1b8400, 1, RI_ALL_ONLINE },
+	{ 0x1b8440, 1, RI_ALL_ONLINE },
+	{ 0x1b8480, 1, RI_ALL_ONLINE },
+	{ 0x1b84c0, 1, RI_ALL_ONLINE },
+	{ 0x1b8500, 1, RI_ALL_ONLINE },
+	{ 0x1b8540, 1, RI_ALL_ONLINE },
+	{ 0x1b8580, 1, RI_ALL_ONLINE },
+	{ 0x1b85c0, 19, RI_E2E3E3B0_ONLINE },
+	{ 0x1b8800, 1, RI_ALL_ONLINE },
+	{ 0x1b8840, 1, RI_ALL_ONLINE },
+	{ 0x1b8880, 1, RI_ALL_ONLINE },
+	{ 0x1b88c0, 1, RI_ALL_ONLINE },
+	{ 0x1b8900, 1, RI_ALL_ONLINE },
+	{ 0x1b8940, 1, RI_ALL_ONLINE },
+	{ 0x1b8980, 1, RI_ALL_ONLINE },
+	{ 0x1b89c0, 1, RI_ALL_ONLINE },
+	{ 0x1b8a00, 1, RI_ALL_ONLINE },
+	{ 0x1b8a40, 1, RI_ALL_ONLINE },
+	{ 0x1b8a80, 1, RI_ALL_ONLINE },
+	{ 0x1b8ac0, 1, RI_ALL_ONLINE },
+	{ 0x1b8b00, 1, RI_ALL_ONLINE },
+	{ 0x1b8b40, 1, RI_ALL_ONLINE },
+	{ 0x1b8b80, 1, RI_ALL_ONLINE },
+	{ 0x1b8bc0, 1, RI_ALL_ONLINE },
+	{ 0x1b8c00, 1, RI_ALL_ONLINE },
+	{ 0x1b8c40, 1, RI_ALL_ONLINE },
+	{ 0x1b8c80, 1, RI_ALL_ONLINE },
+	{ 0x1b8cc0, 1, RI_ALL_ONLINE },
+	{ 0x1b8cc4, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x1b8d00, 1, RI_ALL_ONLINE },
+	{ 0x1b8d40, 1, RI_ALL_ONLINE },
+	{ 0x1b8d80, 1, RI_ALL_ONLINE },
+	{ 0x1b8dc0, 1, RI_ALL_ONLINE },
+	{ 0x1b8e00, 1, RI_ALL_ONLINE },
+	{ 0x1b8e40, 1, RI_ALL_ONLINE },
+	{ 0x1b8e80, 1, RI_ALL_ONLINE },
+	{ 0x1b8e84, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x1b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x1b8fc4, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x1b8fd0, 6, RI_E2E3E3B0_ONLINE },
+	{ 0x1b8fe8, 2, RI_E3E3B0_ONLINE },
+	{ 0x1b9000, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x1b9040, 3, RI_E2E3E3B0_ONLINE },
+	{ 0x1b905c, 1, RI_E3E3B0_ONLINE },
+	{ 0x1b9064, 1, RI_E3B0_ONLINE },
+	{ 0x1b9080, 10, RI_E3B0_ONLINE },
+	{ 0x1b9400, 14, RI_E2E3E3B0_ONLINE },
+	{ 0x1b943c, 19, RI_E2E3E3B0_ONLINE },
+	{ 0x1b9490, 10, RI_E2E3E3B0_ONLINE },
+	{ 0x1c0000, 2, RI_ALL_ONLINE },
+	{ 0x200000, 65, RI_ALL_ONLINE },
+	{ 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x200200, 58, RI_ALL_ONLINE },
+	{ 0x200340, 4, RI_ALL_ONLINE },
+	{ 0x200380, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x200388, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x200390, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x200398, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x2003a0, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x2003a8, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x200400, 1, RI_ALL_ONLINE },
+	{ 0x200404, 255, RI_E1E1H_OFFLINE },
+	{ 0x202000, 4, RI_ALL_ONLINE },
+	{ 0x202010, 2044, RI_ALL_OFFLINE },
+	{ 0x204000, 4, RI_E3E3B0_ONLINE },
+	{ 0x220000, 1, RI_ALL_ONLINE },
+	{ 0x220004, 5631, RI_ALL_OFFLINE },
+	{ 0x225800, 2560, RI_E1HE2E3E3B0_OFFLINE },
+	{ 0x228000, 1, RI_ALL_ONLINE },
+	{ 0x228004, 8191, RI_E1HE2E3E3B0_OFFLINE },
+	{ 0x230000, 1, RI_ALL_ONLINE },
+	{ 0x230004, 15, RI_E1H_OFFLINE },
+	{ 0x230040, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x230044, 239, RI_E1H_OFFLINE },
+	{ 0x230400, 1, RI_ALL_ONLINE },
+	{ 0x230404, 255, RI_E1H_OFFLINE },
+	{ 0x230800, 1, RI_ALL_ONLINE },
+	{ 0x230840, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x230c00, 1, RI_ALL_ONLINE },
+	{ 0x231000, 1, RI_ALL_ONLINE },
+	{ 0x231040, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x231400, 1, RI_ALL_ONLINE },
+	{ 0x231440, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x231480, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x2314c0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x231800, 128, RI_ALL_OFFLINE },
+	{ 0x231c00, 128, RI_ALL_OFFLINE },
+	{ 0x232000, 1, RI_ALL_ONLINE },
+	{ 0x232400, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x232404, 5631, RI_E2E3E3B0_OFFLINE },
+	{ 0x238000, 1, RI_ALL_ONLINE },
+	{ 0x238040, 1, RI_ALL_ONLINE },
+	{ 0x238080, 1, RI_ALL_ONLINE },
+	{ 0x2380c0, 1, RI_ALL_ONLINE },
+	{ 0x238100, 1, RI_ALL_ONLINE },
+	{ 0x238140, 1, RI_ALL_ONLINE },
+	{ 0x238180, 1, RI_ALL_ONLINE },
+	{ 0x2381c0, 1, RI_ALL_ONLINE },
+	{ 0x238200, 1, RI_ALL_ONLINE },
+	{ 0x238240, 1, RI_ALL_ONLINE },
+	{ 0x238280, 1, RI_ALL_ONLINE },
+	{ 0x2382c0, 1, RI_ALL_ONLINE },
+	{ 0x238300, 1, RI_ALL_ONLINE },
+	{ 0x238340, 1, RI_ALL_ONLINE },
+	{ 0x238380, 1, RI_ALL_ONLINE },
+	{ 0x2383c0, 1, RI_ALL_ONLINE },
+	{ 0x238400, 1, RI_ALL_ONLINE },
+	{ 0x238440, 1, RI_ALL_ONLINE },
+	{ 0x238480, 1, RI_ALL_ONLINE },
+	{ 0x2384c0, 1, RI_ALL_ONLINE },
+	{ 0x238500, 1, RI_ALL_ONLINE },
+	{ 0x238540, 1, RI_ALL_ONLINE },
+	{ 0x238580, 1, RI_ALL_ONLINE },
+	{ 0x2385c0, 19, RI_E2E3E3B0_ONLINE },
+	{ 0x238800, 1, RI_ALL_ONLINE },
+	{ 0x238840, 1, RI_ALL_ONLINE },
+	{ 0x238880, 1, RI_ALL_ONLINE },
+	{ 0x2388c0, 1, RI_ALL_ONLINE },
+	{ 0x238900, 1, RI_ALL_ONLINE },
+	{ 0x238940, 1, RI_ALL_ONLINE },
+	{ 0x238980, 1, RI_ALL_ONLINE },
+	{ 0x2389c0, 1, RI_ALL_ONLINE },
+	{ 0x238a00, 1, RI_ALL_ONLINE },
+	{ 0x238a40, 1, RI_ALL_ONLINE },
+	{ 0x238a80, 1, RI_ALL_ONLINE },
+	{ 0x238ac0, 1, RI_ALL_ONLINE },
+	{ 0x238b00, 1, RI_ALL_ONLINE },
+	{ 0x238b40, 1, RI_ALL_ONLINE },
+	{ 0x238b80, 1, RI_ALL_ONLINE },
+	{ 0x238bc0, 1, RI_ALL_ONLINE },
+	{ 0x238c00, 1, RI_ALL_ONLINE },
+	{ 0x238c40, 1, RI_ALL_ONLINE },
+	{ 0x238c80, 1, RI_ALL_ONLINE },
+	{ 0x238cc0, 1, RI_ALL_ONLINE },
+	{ 0x238cc4, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x238d00, 1, RI_ALL_ONLINE },
+	{ 0x238d40, 1, RI_ALL_ONLINE },
+	{ 0x238d80, 1, RI_ALL_ONLINE },
+	{ 0x238dc0, 1, RI_ALL_ONLINE },
+	{ 0x238e00, 1, RI_ALL_ONLINE },
+	{ 0x238e40, 1, RI_ALL_ONLINE },
+	{ 0x238e80, 1, RI_ALL_ONLINE },
+	{ 0x238e84, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x238ec0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x238f00, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x238f40, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x238f80, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x238fc0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x238fc4, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x238fd0, 6, RI_E2E3E3B0_ONLINE },
+	{ 0x238fe8, 2, RI_E3E3B0_ONLINE },
+	{ 0x239000, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x239040, 3, RI_E2E3E3B0_ONLINE },
+	{ 0x23905c, 1, RI_E3E3B0_ONLINE },
+	{ 0x239064, 1, RI_E3B0_ONLINE },
+	{ 0x239080, 10, RI_E3B0_ONLINE },
+	{ 0x240000, 2, RI_ALL_ONLINE },
+	{ 0x280000, 65, RI_ALL_ONLINE },
+	{ 0x28014c, 2, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x280200, 58, RI_ALL_ONLINE },
+	{ 0x280340, 4, RI_ALL_ONLINE },
+	{ 0x280380, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x280388, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x280390, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x280398, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x2803a0, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x2803a8, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x280400, 1, RI_ALL_ONLINE },
+	{ 0x280404, 255, RI_E1E1H_OFFLINE },
+	{ 0x282000, 4, RI_ALL_ONLINE },
+	{ 0x282010, 2044, RI_ALL_OFFLINE },
+	{ 0x284000, 4, RI_E3E3B0_ONLINE },
+	{ 0x2a0000, 1, RI_ALL_ONLINE },
+	{ 0x2a0004, 5631, RI_ALL_OFFLINE },
+	{ 0x2a5800, 2560, RI_E1HE2E3E3B0_OFFLINE },
+	{ 0x2a8000, 1, RI_ALL_ONLINE },
+	{ 0x2a8004, 8191, RI_E1HE2E3E3B0_OFFLINE },
+	{ 0x2b0000, 1, RI_ALL_ONLINE },
+	{ 0x2b0004, 15, RI_E1H_OFFLINE },
+	{ 0x2b0040, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x2b0044, 239, RI_E1H_OFFLINE },
+	{ 0x2b0400, 1, RI_ALL_ONLINE },
+	{ 0x2b0404, 255, RI_E1H_OFFLINE },
+	{ 0x2b0800, 1, RI_ALL_ONLINE },
+	{ 0x2b0840, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x2b0c00, 1, RI_ALL_ONLINE },
+	{ 0x2b1000, 1, RI_ALL_ONLINE },
+	{ 0x2b1040, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x2b1400, 1, RI_ALL_ONLINE },
+	{ 0x2b1440, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x2b1480, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x2b14c0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x2b1800, 128, RI_ALL_OFFLINE },
+	{ 0x2b1c00, 128, RI_ALL_OFFLINE },
+	{ 0x2b2000, 1, RI_ALL_ONLINE },
+	{ 0x2b2400, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x2b2404, 5631, RI_E2E3E3B0_OFFLINE },
+	{ 0x2b8000, 1, RI_ALL_ONLINE },
+	{ 0x2b8040, 1, RI_ALL_ONLINE },
+	{ 0x2b8080, 1, RI_ALL_ONLINE },
+	{ 0x2b80c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8100, 1, RI_ALL_ONLINE },
+	{ 0x2b8140, 1, RI_ALL_ONLINE },
+	{ 0x2b8180, 1, RI_ALL_ONLINE },
+	{ 0x2b81c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8200, 1, RI_ALL_ONLINE },
+	{ 0x2b8240, 1, RI_ALL_ONLINE },
+	{ 0x2b8280, 1, RI_ALL_ONLINE },
+	{ 0x2b82c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8300, 1, RI_ALL_ONLINE },
+	{ 0x2b8340, 1, RI_ALL_ONLINE },
+	{ 0x2b8380, 1, RI_ALL_ONLINE },
+	{ 0x2b83c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8400, 1, RI_ALL_ONLINE },
+	{ 0x2b8440, 1, RI_ALL_ONLINE },
+	{ 0x2b8480, 1, RI_ALL_ONLINE },
+	{ 0x2b84c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8500, 1, RI_ALL_ONLINE },
+	{ 0x2b8540, 1, RI_ALL_ONLINE },
+	{ 0x2b8580, 1, RI_ALL_ONLINE },
+	{ 0x2b85c0, 19, RI_E2E3E3B0_ONLINE },
+	{ 0x2b8800, 1, RI_ALL_ONLINE },
+	{ 0x2b8840, 1, RI_ALL_ONLINE },
+	{ 0x2b8880, 1, RI_ALL_ONLINE },
+	{ 0x2b88c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8900, 1, RI_ALL_ONLINE },
+	{ 0x2b8940, 1, RI_ALL_ONLINE },
+	{ 0x2b8980, 1, RI_ALL_ONLINE },
+	{ 0x2b89c0, 1, RI_ALL_ONLINE },
+	{ 0x2b8a00, 1, RI_ALL_ONLINE },
+	{ 0x2b8a40, 1, RI_ALL_ONLINE },
+	{ 0x2b8a80, 1, RI_ALL_ONLINE },
+	{ 0x2b8ac0, 1, RI_ALL_ONLINE },
+	{ 0x2b8b00, 1, RI_ALL_ONLINE },
+	{ 0x2b8b40, 1, RI_ALL_ONLINE },
+	{ 0x2b8b80, 1, RI_ALL_ONLINE },
+	{ 0x2b8bc0, 1, RI_ALL_ONLINE },
+	{ 0x2b8c00, 1, RI_ALL_ONLINE },
+	{ 0x2b8c40, 1, RI_ALL_ONLINE },
+	{ 0x2b8c80, 1, RI_ALL_ONLINE },
+	{ 0x2b8cc0, 1, RI_ALL_ONLINE },
+	{ 0x2b8cc4, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x2b8d00, 1, RI_ALL_ONLINE },
+	{ 0x2b8d40, 1, RI_ALL_ONLINE },
+	{ 0x2b8d80, 1, RI_ALL_ONLINE },
+	{ 0x2b8dc0, 1, RI_ALL_ONLINE },
+	{ 0x2b8e00, 1, RI_ALL_ONLINE },
+	{ 0x2b8e40, 1, RI_ALL_ONLINE },
+	{ 0x2b8e80, 1, RI_ALL_ONLINE },
+	{ 0x2b8e84, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x2b8ec0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x2b8f00, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x2b8f40, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x2b8f80, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x2b8fc0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x2b8fc4, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x2b8fd0, 6, RI_E2E3E3B0_ONLINE },
+	{ 0x2b8fe8, 2, RI_E3E3B0_ONLINE },
+	{ 0x2b9000, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x2b9040, 3, RI_E2E3E3B0_ONLINE },
+	{ 0x2b905c, 1, RI_E3E3B0_ONLINE },
+	{ 0x2b9064, 1, RI_E3B0_ONLINE },
+	{ 0x2b9080, 10, RI_E3B0_ONLINE },
+	{ 0x2b9400, 14, RI_E2E3E3B0_ONLINE },
+	{ 0x2b943c, 19, RI_E2E3E3B0_ONLINE },
+	{ 0x2b9490, 10, RI_E2E3E3B0_ONLINE },
+	{ 0x2c0000, 2, RI_ALL_ONLINE },
+	{ 0x300000, 65, RI_ALL_ONLINE },
+	{ 0x30014c, 2, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x300200, 58, RI_ALL_ONLINE },
+	{ 0x300340, 4, RI_ALL_ONLINE },
+	{ 0x300380, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x300388, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x300390, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x300398, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x3003a0, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x3003a8, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x300400, 1, RI_ALL_ONLINE },
+	{ 0x300404, 255, RI_E1E1H_OFFLINE },
+	{ 0x302000, 4, RI_ALL_ONLINE },
+	{ 0x302010, 2044, RI_ALL_OFFLINE },
+	{ 0x304000, 4, RI_E3E3B0_ONLINE },
+	{ 0x320000, 1, RI_ALL_ONLINE },
+	{ 0x320004, 5631, RI_ALL_OFFLINE },
+	{ 0x325800, 2560, RI_E1HE2E3E3B0_OFFLINE },
+	{ 0x328000, 1, RI_ALL_ONLINE },
+	{ 0x328004, 8191, RI_E1HE2E3E3B0_OFFLINE },
+	{ 0x330000, 1, RI_ALL_ONLINE },
+	{ 0x330004, 15, RI_E1H_OFFLINE },
+	{ 0x330040, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x330044, 239, RI_E1H_OFFLINE },
+	{ 0x330400, 1, RI_ALL_ONLINE },
+	{ 0x330404, 255, RI_E1H_OFFLINE },
+	{ 0x330800, 1, RI_ALL_ONLINE },
+	{ 0x330840, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x330c00, 1, RI_ALL_ONLINE },
+	{ 0x331000, 1, RI_ALL_ONLINE },
+	{ 0x331040, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x331400, 1, RI_ALL_ONLINE },
+	{ 0x331440, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x331480, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x3314c0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x331800, 128, RI_ALL_OFFLINE },
+	{ 0x331c00, 128, RI_ALL_OFFLINE },
+	{ 0x332000, 1, RI_ALL_ONLINE },
+	{ 0x332400, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x332404, 5631, RI_E2E3E3B0_OFFLINE },
+	{ 0x338000, 1, RI_ALL_ONLINE },
+	{ 0x338040, 1, RI_ALL_ONLINE },
+	{ 0x338080, 1, RI_ALL_ONLINE },
+	{ 0x3380c0, 1, RI_ALL_ONLINE },
+	{ 0x338100, 1, RI_ALL_ONLINE },
+	{ 0x338140, 1, RI_ALL_ONLINE },
+	{ 0x338180, 1, RI_ALL_ONLINE },
+	{ 0x3381c0, 1, RI_ALL_ONLINE },
+	{ 0x338200, 1, RI_ALL_ONLINE },
+	{ 0x338240, 1, RI_ALL_ONLINE },
+	{ 0x338280, 1, RI_ALL_ONLINE },
+	{ 0x3382c0, 1, RI_ALL_ONLINE },
+	{ 0x338300, 1, RI_ALL_ONLINE },
+	{ 0x338340, 1, RI_ALL_ONLINE },
+	{ 0x338380, 1, RI_ALL_ONLINE },
+	{ 0x3383c0, 1, RI_ALL_ONLINE },
+	{ 0x338400, 1, RI_ALL_ONLINE },
+	{ 0x338440, 1, RI_ALL_ONLINE },
+	{ 0x338480, 1, RI_ALL_ONLINE },
+	{ 0x3384c0, 1, RI_ALL_ONLINE },
+	{ 0x338500, 1, RI_ALL_ONLINE },
+	{ 0x338540, 1, RI_ALL_ONLINE },
+	{ 0x338580, 1, RI_ALL_ONLINE },
+	{ 0x3385c0, 19, RI_E2E3E3B0_ONLINE },
+	{ 0x338800, 1, RI_ALL_ONLINE },
+	{ 0x338840, 1, RI_ALL_ONLINE },
+	{ 0x338880, 1, RI_ALL_ONLINE },
+	{ 0x3388c0, 1, RI_ALL_ONLINE },
+	{ 0x338900, 1, RI_ALL_ONLINE },
+	{ 0x338940, 1, RI_ALL_ONLINE },
+	{ 0x338980, 1, RI_ALL_ONLINE },
+	{ 0x3389c0, 1, RI_ALL_ONLINE },
+	{ 0x338a00, 1, RI_ALL_ONLINE },
+	{ 0x338a40, 1, RI_ALL_ONLINE },
+	{ 0x338a80, 1, RI_ALL_ONLINE },
+	{ 0x338ac0, 1, RI_ALL_ONLINE },
+	{ 0x338b00, 1, RI_ALL_ONLINE },
+	{ 0x338b40, 1, RI_ALL_ONLINE },
+	{ 0x338b80, 1, RI_ALL_ONLINE },
+	{ 0x338bc0, 1, RI_ALL_ONLINE },
+	{ 0x338c00, 1, RI_ALL_ONLINE },
+	{ 0x338c40, 1, RI_ALL_ONLINE },
+	{ 0x338c80, 1, RI_ALL_ONLINE },
+	{ 0x338cc0, 1, RI_ALL_ONLINE },
+	{ 0x338cc4, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x338d00, 1, RI_ALL_ONLINE },
+	{ 0x338d40, 1, RI_ALL_ONLINE },
+	{ 0x338d80, 1, RI_ALL_ONLINE },
+	{ 0x338dc0, 1, RI_ALL_ONLINE },
+	{ 0x338e00, 1, RI_ALL_ONLINE },
+	{ 0x338e40, 1, RI_ALL_ONLINE },
+	{ 0x338e80, 1, RI_ALL_ONLINE },
+	{ 0x338e84, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x338ec0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x338f00, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x338f40, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x338f80, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x338fc0, 1, RI_E1HE2E3E3B0_ONLINE },
+	{ 0x338fc4, 2, RI_E2E3E3B0_ONLINE },
+	{ 0x338fd0, 6, RI_E2E3E3B0_ONLINE },
+	{ 0x338fe8, 2, RI_E3E3B0_ONLINE },
+	{ 0x339000, 1, RI_E2E3E3B0_ONLINE },
+	{ 0x339040, 3, RI_E2E3E3B0_ONLINE },
+	{ 0x33905c, 1, RI_E3E3B0_ONLINE },
+	{ 0x339064, 1, RI_E3B0_ONLINE },
+	{ 0x339080, 10, RI_E3B0_ONLINE },
+	{ 0x340000, 2, RI_ALL_ONLINE },
+};
+#define REGS_COUNT			ARRAY_SIZE(reg_addrs)
+
+static const struct dump_sign dump_sign_all = { 0x4e23fde1, 0x70017, 0x3a };
+
+static const u32 page_vals_e2[] = { 0, 128 };
+#define PAGE_MODE_VALUES_E2		ARRAY_SIZE(page_vals_e2)
+
+static const u32 page_write_regs_e2[] = { 328476 };
+#define PAGE_WRITE_REGS_E2		ARRAY_SIZE(page_write_regs_e2)
+
+static const struct reg_addr page_read_regs_e2[] = {
+	{ 0x58000, 4608, RI_E2_ONLINE } };
+#define PAGE_READ_REGS_E2		ARRAY_SIZE(page_read_regs_e2)
+
+static const u32 page_vals_e3[] = { 0, 128 };
+#define PAGE_MODE_VALUES_E3		ARRAY_SIZE(page_vals_e3)
+
+static const u32 page_write_regs_e3[] = { 328476 };
+#define PAGE_WRITE_REGS_E3		ARRAY_SIZE(page_write_regs_e3)
+
+static const struct reg_addr page_read_regs_e3[] = {
+	{ 0x58000, 4608, RI_E3E3B0_ONLINE } };
+#define PAGE_READ_REGS_E3		ARRAY_SIZE(page_read_regs_e3)
+
+#endif /* BNX2X_DUMP_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
new file mode 100644
index 0000000..2218630
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -0,0 +1,2355 @@
+/* bnx2x_ethtool.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/crc32.h>
+
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dump.h"
+#include "bnx2x_init.h"
+#include "bnx2x_sp.h"
+
+/* Note: in the format strings below %s is replaced by the queue-name which is
+ * either its index or 'fcoe' for the fcoe queue. Make sure the format string
+ * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
+ */
+#define MAX_QUEUE_NAME_LEN	4
+static const struct {
+	long offset;
+	int size;
+	char string[ETH_GSTRING_LEN];
+} bnx2x_q_stats_arr[] = {
+/* 1 */	{ Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
+	{ Q_STATS_OFFSET32(total_unicast_packets_received_hi),
+						8, "[%s]: rx_ucast_packets" },
+	{ Q_STATS_OFFSET32(total_multicast_packets_received_hi),
+						8, "[%s]: rx_mcast_packets" },
+	{ Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
+						8, "[%s]: rx_bcast_packets" },
+	{ Q_STATS_OFFSET32(no_buff_discard_hi),	8, "[%s]: rx_discards" },
+	{ Q_STATS_OFFSET32(rx_err_discard_pkt),
+					 4, "[%s]: rx_phy_ip_err_discards"},
+	{ Q_STATS_OFFSET32(rx_skb_alloc_failed),
+					 4, "[%s]: rx_skb_alloc_discard" },
+	{ Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
+
+	{ Q_STATS_OFFSET32(total_bytes_transmitted_hi),	8, "[%s]: tx_bytes" },
+/* 10 */{ Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+						8, "[%s]: tx_ucast_packets" },
+	{ Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+						8, "[%s]: tx_mcast_packets" },
+	{ Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+						8, "[%s]: tx_bcast_packets" },
+	{ Q_STATS_OFFSET32(total_tpa_aggregations_hi),
+						8, "[%s]: tpa_aggregations" },
+	{ Q_STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+					8, "[%s]: tpa_aggregated_frames"},
+	{ Q_STATS_OFFSET32(total_tpa_bytes_hi),	8, "[%s]: tpa_bytes"}
+};
+
+#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
+
+static const struct {
+	long offset;
+	int size;
+	u32 flags;
+#define STATS_FLAGS_PORT		1
+#define STATS_FLAGS_FUNC		2
+#define STATS_FLAGS_BOTH		(STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
+	char string[ETH_GSTRING_LEN];
+} bnx2x_stats_arr[] = {
+/* 1 */	{ STATS_OFFSET32(total_bytes_received_hi),
+				8, STATS_FLAGS_BOTH, "rx_bytes" },
+	{ STATS_OFFSET32(error_bytes_received_hi),
+				8, STATS_FLAGS_BOTH, "rx_error_bytes" },
+	{ STATS_OFFSET32(total_unicast_packets_received_hi),
+				8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
+	{ STATS_OFFSET32(total_multicast_packets_received_hi),
+				8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
+	{ STATS_OFFSET32(total_broadcast_packets_received_hi),
+				8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
+	{ STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
+				8, STATS_FLAGS_PORT, "rx_crc_errors" },
+	{ STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
+				8, STATS_FLAGS_PORT, "rx_align_errors" },
+	{ STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
+				8, STATS_FLAGS_PORT, "rx_undersize_packets" },
+	{ STATS_OFFSET32(etherstatsoverrsizepkts_hi),
+				8, STATS_FLAGS_PORT, "rx_oversize_packets" },
+/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
+				8, STATS_FLAGS_PORT, "rx_fragments" },
+	{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
+				8, STATS_FLAGS_PORT, "rx_jabbers" },
+	{ STATS_OFFSET32(no_buff_discard_hi),
+				8, STATS_FLAGS_BOTH, "rx_discards" },
+	{ STATS_OFFSET32(mac_filter_discard),
+				4, STATS_FLAGS_PORT, "rx_filtered_packets" },
+	{ STATS_OFFSET32(mf_tag_discard),
+				4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
+	{ STATS_OFFSET32(brb_drop_hi),
+				8, STATS_FLAGS_PORT, "rx_brb_discard" },
+	{ STATS_OFFSET32(brb_truncate_hi),
+				8, STATS_FLAGS_PORT, "rx_brb_truncate" },
+	{ STATS_OFFSET32(pause_frames_received_hi),
+				8, STATS_FLAGS_PORT, "rx_pause_frames" },
+	{ STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
+				8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
+	{ STATS_OFFSET32(nig_timer_max),
+			4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
+/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
+				4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
+	{ STATS_OFFSET32(rx_skb_alloc_failed),
+				4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
+	{ STATS_OFFSET32(hw_csum_err),
+				4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
+
+	{ STATS_OFFSET32(total_bytes_transmitted_hi),
+				8, STATS_FLAGS_BOTH, "tx_bytes" },
+	{ STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
+				8, STATS_FLAGS_PORT, "tx_error_bytes" },
+	{ STATS_OFFSET32(total_unicast_packets_transmitted_hi),
+				8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
+	{ STATS_OFFSET32(total_multicast_packets_transmitted_hi),
+				8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
+	{ STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
+				8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
+	{ STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
+				8, STATS_FLAGS_PORT, "tx_mac_errors" },
+	{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
+				8, STATS_FLAGS_PORT, "tx_carrier_errors" },
+/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
+				8, STATS_FLAGS_PORT, "tx_single_collisions" },
+	{ STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
+				8, STATS_FLAGS_PORT, "tx_multi_collisions" },
+	{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
+				8, STATS_FLAGS_PORT, "tx_deferred" },
+	{ STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
+				8, STATS_FLAGS_PORT, "tx_excess_collisions" },
+	{ STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
+				8, STATS_FLAGS_PORT, "tx_late_collisions" },
+	{ STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
+				8, STATS_FLAGS_PORT, "tx_total_collisions" },
+	{ STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
+				8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
+	{ STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
+			8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
+	{ STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
+			8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
+	{ STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
+			8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
+/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
+			8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
+	{ STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
+			8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
+	{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
+			8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
+	{ STATS_OFFSET32(pause_frames_sent_hi),
+				8, STATS_FLAGS_PORT, "tx_pause_frames" },
+	{ STATS_OFFSET32(total_tpa_aggregations_hi),
+			8, STATS_FLAGS_FUNC, "tpa_aggregations" },
+	{ STATS_OFFSET32(total_tpa_aggregated_frames_hi),
+			8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
+	{ STATS_OFFSET32(total_tpa_bytes_hi),
+			8, STATS_FLAGS_FUNC, "tpa_bytes"}
+};
+
+#define BNX2X_NUM_STATS		ARRAY_SIZE(bnx2x_stats_arr)
+static int bnx2x_get_port_type(struct bnx2x *bp)
+{
+	int port_type;
+	u32 phy_idx = bnx2x_get_cur_phy_idx(bp);
+	switch (bp->link_params.phy[phy_idx].media_type) {
+	case ETH_PHY_SFP_FIBER:
+	case ETH_PHY_XFP_FIBER:
+	case ETH_PHY_KR:
+	case ETH_PHY_CX4:
+		port_type = PORT_FIBRE;
+		break;
+	case ETH_PHY_DA_TWINAX:
+		port_type = PORT_DA;
+		break;
+	case ETH_PHY_BASE_T:
+		port_type = PORT_TP;
+		break;
+	case ETH_PHY_NOT_PRESENT:
+		port_type = PORT_NONE;
+		break;
+	case ETH_PHY_UNSPECIFIED:
+	default:
+		port_type = PORT_OTHER;
+		break;
+	}
+	return port_type;
+}
+
+static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+
+	/* Dual Media boards present all available port types */
+	cmd->supported = bp->port.supported[cfg_idx] |
+		(bp->port.supported[cfg_idx ^ 1] &
+		 (SUPPORTED_TP | SUPPORTED_FIBRE));
+	cmd->advertising = bp->port.advertising[cfg_idx];
+
+	if ((bp->state == BNX2X_STATE_OPEN) &&
+	    !(bp->flags & MF_FUNC_DIS) &&
+	    (bp->link_vars.link_up)) {
+		ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
+		cmd->duplex = bp->link_vars.duplex;
+	} else {
+		ethtool_cmd_speed_set(
+			cmd, bp->link_params.req_line_speed[cfg_idx]);
+		cmd->duplex = bp->link_params.req_duplex[cfg_idx];
+	}
+
+	if (IS_MF(bp))
+		ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
+
+	cmd->port = bnx2x_get_port_type(bp);
+
+	cmd->phy_address = bp->mdio.prtad;
+	cmd->transceiver = XCVR_INTERNAL;
+
+	if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
+		cmd->autoneg = AUTONEG_ENABLE;
+	else
+		cmd->autoneg = AUTONEG_DISABLE;
+
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+
+	DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+	   DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %u\n"
+	   DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
+	   DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
+	   cmd->cmd, cmd->supported, cmd->advertising,
+	   ethtool_cmd_speed(cmd),
+	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+	return 0;
+}
+
+static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
+	u32 speed;
+
+	if (IS_MF_SD(bp))
+		return 0;
+
+	DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+	   "  supported 0x%x  advertising 0x%x  speed %u\n"
+	   "  duplex %d  port %d  phy_address %d  transceiver %d\n"
+	   "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
+	   cmd->cmd, cmd->supported, cmd->advertising,
+	   ethtool_cmd_speed(cmd),
+	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+	speed = ethtool_cmd_speed(cmd);
+
+	if (IS_MF_SI(bp)) {
+		u32 part;
+		u32 line_speed = bp->link_vars.line_speed;
+
+		/* use 10G if no link detected */
+		if (!line_speed)
+			line_speed = 10000;
+
+		if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
+			BNX2X_DEV_INFO("To set speed BC %X or higher "
+				       "is required, please upgrade BC\n",
+				       REQ_BC_VER_4_SET_MF_BW);
+			return -EINVAL;
+		}
+
+		part = (speed * 100) / line_speed;
+
+		if (line_speed < speed || !part) {
+			BNX2X_DEV_INFO("Speed setting should be in a range "
+				       "from 1%% to 100%% "
+				       "of actual line speed\n");
+			return -EINVAL;
+		}
+
+		if (bp->state != BNX2X_STATE_OPEN)
+			/* store value for following "load" */
+			bp->pending_max = part;
+		else
+			bnx2x_update_max_mf_config(bp, part);
+
+		return 0;
+	}
+
+	cfg_idx = bnx2x_get_link_cfg_idx(bp);
+	old_multi_phy_config = bp->link_params.multi_phy_config;
+	switch (cmd->port) {
+	case PORT_TP:
+		if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
+			break; /* no port change */
+
+		if (!(bp->port.supported[0] & SUPPORTED_TP ||
+		      bp->port.supported[1] & SUPPORTED_TP)) {
+			DP(NETIF_MSG_LINK, "Unsupported port type\n");
+			return -EINVAL;
+		}
+		bp->link_params.multi_phy_config &=
+			~PORT_HW_CFG_PHY_SELECTION_MASK;
+		if (bp->link_params.multi_phy_config &
+		    PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+			bp->link_params.multi_phy_config |=
+			PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+		else
+			bp->link_params.multi_phy_config |=
+			PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+		break;
+	case PORT_FIBRE:
+		if (bp->port.supported[cfg_idx] & SUPPORTED_FIBRE)
+			break; /* no port change */
+
+		if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
+		      bp->port.supported[1] & SUPPORTED_FIBRE)) {
+			DP(NETIF_MSG_LINK, "Unsupported port type\n");
+			return -EINVAL;
+		}
+		bp->link_params.multi_phy_config &=
+			~PORT_HW_CFG_PHY_SELECTION_MASK;
+		if (bp->link_params.multi_phy_config &
+		    PORT_HW_CFG_PHY_SWAPPED_ENABLED)
+			bp->link_params.multi_phy_config |=
+			PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+		else
+			bp->link_params.multi_phy_config |=
+			PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+		break;
+	default:
+		DP(NETIF_MSG_LINK, "Unsupported port type\n");
+		return -EINVAL;
+	}
+	/* Save new config in case command complete successuly */
+	new_multi_phy_config = bp->link_params.multi_phy_config;
+	/* Get the new cfg_idx */
+	cfg_idx = bnx2x_get_link_cfg_idx(bp);
+	/* Restore old config in case command failed */
+	bp->link_params.multi_phy_config = old_multi_phy_config;
+	DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
+
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
+			DP(NETIF_MSG_LINK, "Autoneg not supported\n");
+			return -EINVAL;
+		}
+
+		/* advertise the requested speed and duplex if supported */
+		cmd->advertising &= bp->port.supported[cfg_idx];
+
+		bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
+		bp->link_params.req_duplex[cfg_idx] = DUPLEX_FULL;
+		bp->port.advertising[cfg_idx] |= (ADVERTISED_Autoneg |
+					 cmd->advertising);
+
+	} else { /* forced speed */
+		/* advertise the requested speed and duplex if supported */
+		switch (speed) {
+		case SPEED_10:
+			if (cmd->duplex == DUPLEX_FULL) {
+				if (!(bp->port.supported[cfg_idx] &
+				      SUPPORTED_10baseT_Full)) {
+					DP(NETIF_MSG_LINK,
+					   "10M full not supported\n");
+					return -EINVAL;
+				}
+
+				advertising = (ADVERTISED_10baseT_Full |
+					       ADVERTISED_TP);
+			} else {
+				if (!(bp->port.supported[cfg_idx] &
+				      SUPPORTED_10baseT_Half)) {
+					DP(NETIF_MSG_LINK,
+					   "10M half not supported\n");
+					return -EINVAL;
+				}
+
+				advertising = (ADVERTISED_10baseT_Half |
+					       ADVERTISED_TP);
+			}
+			break;
+
+		case SPEED_100:
+			if (cmd->duplex == DUPLEX_FULL) {
+				if (!(bp->port.supported[cfg_idx] &
+						SUPPORTED_100baseT_Full)) {
+					DP(NETIF_MSG_LINK,
+					   "100M full not supported\n");
+					return -EINVAL;
+				}
+
+				advertising = (ADVERTISED_100baseT_Full |
+					       ADVERTISED_TP);
+			} else {
+				if (!(bp->port.supported[cfg_idx] &
+						SUPPORTED_100baseT_Half)) {
+					DP(NETIF_MSG_LINK,
+					   "100M half not supported\n");
+					return -EINVAL;
+				}
+
+				advertising = (ADVERTISED_100baseT_Half |
+					       ADVERTISED_TP);
+			}
+			break;
+
+		case SPEED_1000:
+			if (cmd->duplex != DUPLEX_FULL) {
+				DP(NETIF_MSG_LINK, "1G half not supported\n");
+				return -EINVAL;
+			}
+
+			if (!(bp->port.supported[cfg_idx] &
+			      SUPPORTED_1000baseT_Full)) {
+				DP(NETIF_MSG_LINK, "1G full not supported\n");
+				return -EINVAL;
+			}
+
+			advertising = (ADVERTISED_1000baseT_Full |
+				       ADVERTISED_TP);
+			break;
+
+		case SPEED_2500:
+			if (cmd->duplex != DUPLEX_FULL) {
+				DP(NETIF_MSG_LINK,
+				   "2.5G half not supported\n");
+				return -EINVAL;
+			}
+
+			if (!(bp->port.supported[cfg_idx]
+			      & SUPPORTED_2500baseX_Full)) {
+				DP(NETIF_MSG_LINK,
+				   "2.5G full not supported\n");
+				return -EINVAL;
+			}
+
+			advertising = (ADVERTISED_2500baseX_Full |
+				       ADVERTISED_TP);
+			break;
+
+		case SPEED_10000:
+			if (cmd->duplex != DUPLEX_FULL) {
+				DP(NETIF_MSG_LINK, "10G half not supported\n");
+				return -EINVAL;
+			}
+
+			if (!(bp->port.supported[cfg_idx]
+			      & SUPPORTED_10000baseT_Full)) {
+				DP(NETIF_MSG_LINK, "10G full not supported\n");
+				return -EINVAL;
+			}
+
+			advertising = (ADVERTISED_10000baseT_Full |
+				       ADVERTISED_FIBRE);
+			break;
+
+		default:
+			DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed);
+			return -EINVAL;
+		}
+
+		bp->link_params.req_line_speed[cfg_idx] = speed;
+		bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+		bp->port.advertising[cfg_idx] = advertising;
+	}
+
+	DP(NETIF_MSG_LINK, "req_line_speed %d\n"
+	   DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
+	   bp->link_params.req_line_speed[cfg_idx],
+	   bp->link_params.req_duplex[cfg_idx],
+	   bp->port.advertising[cfg_idx]);
+
+	/* Set new config */
+	bp->link_params.multi_phy_config = new_multi_phy_config;
+	if (netif_running(dev)) {
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+		bnx2x_link_set(bp);
+	}
+
+	return 0;
+}
+
+#define IS_E1_ONLINE(info)	(((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
+#define IS_E1H_ONLINE(info)	(((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
+#define IS_E2_ONLINE(info)	(((info) & RI_E2_ONLINE) == RI_E2_ONLINE)
+#define IS_E3_ONLINE(info)	(((info) & RI_E3_ONLINE) == RI_E3_ONLINE)
+#define IS_E3B0_ONLINE(info)	(((info) & RI_E3B0_ONLINE) == RI_E3B0_ONLINE)
+
+static inline bool bnx2x_is_reg_online(struct bnx2x *bp,
+				       const struct reg_addr *reg_info)
+{
+	if (CHIP_IS_E1(bp))
+		return IS_E1_ONLINE(reg_info->info);
+	else if (CHIP_IS_E1H(bp))
+		return IS_E1H_ONLINE(reg_info->info);
+	else if (CHIP_IS_E2(bp))
+		return IS_E2_ONLINE(reg_info->info);
+	else if (CHIP_IS_E3A0(bp))
+		return IS_E3_ONLINE(reg_info->info);
+	else if (CHIP_IS_E3B0(bp))
+		return IS_E3B0_ONLINE(reg_info->info);
+	else
+		return false;
+}
+
+/******* Paged registers info selectors ********/
+static inline const u32 *__bnx2x_get_page_addr_ar(struct bnx2x *bp)
+{
+	if (CHIP_IS_E2(bp))
+		return page_vals_e2;
+	else if (CHIP_IS_E3(bp))
+		return page_vals_e3;
+	else
+		return NULL;
+}
+
+static inline u32 __bnx2x_get_page_reg_num(struct bnx2x *bp)
+{
+	if (CHIP_IS_E2(bp))
+		return PAGE_MODE_VALUES_E2;
+	else if (CHIP_IS_E3(bp))
+		return PAGE_MODE_VALUES_E3;
+	else
+		return 0;
+}
+
+static inline const u32 *__bnx2x_get_page_write_ar(struct bnx2x *bp)
+{
+	if (CHIP_IS_E2(bp))
+		return page_write_regs_e2;
+	else if (CHIP_IS_E3(bp))
+		return page_write_regs_e3;
+	else
+		return NULL;
+}
+
+static inline u32 __bnx2x_get_page_write_num(struct bnx2x *bp)
+{
+	if (CHIP_IS_E2(bp))
+		return PAGE_WRITE_REGS_E2;
+	else if (CHIP_IS_E3(bp))
+		return PAGE_WRITE_REGS_E3;
+	else
+		return 0;
+}
+
+static inline const struct reg_addr *__bnx2x_get_page_read_ar(struct bnx2x *bp)
+{
+	if (CHIP_IS_E2(bp))
+		return page_read_regs_e2;
+	else if (CHIP_IS_E3(bp))
+		return page_read_regs_e3;
+	else
+		return NULL;
+}
+
+static inline u32 __bnx2x_get_page_read_num(struct bnx2x *bp)
+{
+	if (CHIP_IS_E2(bp))
+		return PAGE_READ_REGS_E2;
+	else if (CHIP_IS_E3(bp))
+		return PAGE_READ_REGS_E3;
+	else
+		return 0;
+}
+
+static inline int __bnx2x_get_regs_len(struct bnx2x *bp)
+{
+	int num_pages = __bnx2x_get_page_reg_num(bp);
+	int page_write_num = __bnx2x_get_page_write_num(bp);
+	const struct reg_addr *page_read_addr = __bnx2x_get_page_read_ar(bp);
+	int page_read_num = __bnx2x_get_page_read_num(bp);
+	int regdump_len = 0;
+	int i, j, k;
+
+	for (i = 0; i < REGS_COUNT; i++)
+		if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
+			regdump_len += reg_addrs[i].size;
+
+	for (i = 0; i < num_pages; i++)
+		for (j = 0; j < page_write_num; j++)
+			for (k = 0; k < page_read_num; k++)
+				if (bnx2x_is_reg_online(bp, &page_read_addr[k]))
+					regdump_len += page_read_addr[k].size;
+
+	return regdump_len;
+}
+
+static int bnx2x_get_regs_len(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int regdump_len = 0;
+
+	regdump_len = __bnx2x_get_regs_len(bp);
+	regdump_len *= 4;
+	regdump_len += sizeof(struct dump_hdr);
+
+	return regdump_len;
+}
+
+/**
+ * bnx2x_read_pages_regs - read "paged" registers
+ *
+ * @bp		device handle
+ * @p		output buffer
+ *
+ * Reads "paged" memories: memories that may only be read by first writing to a
+ * specific address ("write address") and then reading from a specific address
+ * ("read address"). There may be more than one write address per "page" and
+ * more than one read address per write address.
+ */
+static inline void bnx2x_read_pages_regs(struct bnx2x *bp, u32 *p)
+{
+	u32 i, j, k, n;
+	/* addresses of the paged registers */
+	const u32 *page_addr = __bnx2x_get_page_addr_ar(bp);
+	/* number of paged registers */
+	int num_pages = __bnx2x_get_page_reg_num(bp);
+	/* write addresses */
+	const u32 *write_addr = __bnx2x_get_page_write_ar(bp);
+	/* number of write addresses */
+	int write_num = __bnx2x_get_page_write_num(bp);
+	/* read addresses info */
+	const struct reg_addr *read_addr = __bnx2x_get_page_read_ar(bp);
+	/* number of read addresses */
+	int read_num = __bnx2x_get_page_read_num(bp);
+
+	for (i = 0; i < num_pages; i++) {
+		for (j = 0; j < write_num; j++) {
+			REG_WR(bp, write_addr[j], page_addr[i]);
+			for (k = 0; k < read_num; k++)
+				if (bnx2x_is_reg_online(bp, &read_addr[k]))
+					for (n = 0; n <
+					      read_addr[k].size; n++)
+						*p++ = REG_RD(bp,
+						       read_addr[k].addr + n*4);
+		}
+	}
+}
+
+static inline void __bnx2x_get_regs(struct bnx2x *bp, u32 *p)
+{
+	u32 i, j;
+
+	/* Read the regular registers */
+	for (i = 0; i < REGS_COUNT; i++)
+		if (bnx2x_is_reg_online(bp, &reg_addrs[i]))
+			for (j = 0; j < reg_addrs[i].size; j++)
+				*p++ = REG_RD(bp, reg_addrs[i].addr + j*4);
+
+	/* Read "paged" registes */
+	bnx2x_read_pages_regs(bp, p);
+}
+
+static void bnx2x_get_regs(struct net_device *dev,
+			   struct ethtool_regs *regs, void *_p)
+{
+	u32 *p = _p;
+	struct bnx2x *bp = netdev_priv(dev);
+	struct dump_hdr dump_hdr = {0};
+
+	regs->version = 0;
+	memset(p, 0, regs->len);
+
+	if (!netif_running(bp->dev))
+		return;
+
+	/* Disable parity attentions as long as following dump may
+	 * cause false alarms by reading never written registers. We
+	 * will re-enable parity attentions right after the dump.
+	 */
+	bnx2x_disable_blocks_parity(bp);
+
+	dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
+	dump_hdr.dump_sign = dump_sign_all;
+	dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
+	dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
+	dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
+	dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
+
+	if (CHIP_IS_E1(bp))
+		dump_hdr.info = RI_E1_ONLINE;
+	else if (CHIP_IS_E1H(bp))
+		dump_hdr.info = RI_E1H_ONLINE;
+	else if (!CHIP_IS_E1x(bp))
+		dump_hdr.info = RI_E2_ONLINE |
+		(BP_PATH(bp) ? RI_PATH1_DUMP : RI_PATH0_DUMP);
+
+	memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
+	p += dump_hdr.hdr_size + 1;
+
+	/* Actually read the registers */
+	__bnx2x_get_regs(bp, p);
+
+	/* Re-enable parity attentions */
+	bnx2x_clear_blocks_parity(bp);
+	bnx2x_enable_blocks_parity(bp);
+}
+
+static void bnx2x_get_drvinfo(struct net_device *dev,
+			      struct ethtool_drvinfo *info)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u8 phy_fw_ver[PHY_FW_VER_LEN];
+
+	strcpy(info->driver, DRV_MODULE_NAME);
+	strcpy(info->version, DRV_MODULE_VERSION);
+
+	phy_fw_ver[0] = '\0';
+	if (bp->port.pmf) {
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_get_ext_phy_fw_version(&bp->link_params,
+					     (bp->state != BNX2X_STATE_CLOSED),
+					     phy_fw_ver, PHY_FW_VER_LEN);
+		bnx2x_release_phy_lock(bp);
+	}
+
+	strncpy(info->fw_version, bp->fw_ver, 32);
+	snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
+		 "bc %d.%d.%d%s%s",
+		 (bp->common.bc_ver & 0xff0000) >> 16,
+		 (bp->common.bc_ver & 0xff00) >> 8,
+		 (bp->common.bc_ver & 0xff),
+		 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
+	strcpy(info->bus_info, pci_name(bp->pdev));
+	info->n_stats = BNX2X_NUM_STATS;
+	info->testinfo_len = BNX2X_NUM_TESTS;
+	info->eedump_len = bp->common.flash_size;
+	info->regdump_len = bnx2x_get_regs_len(dev);
+}
+
+static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->flags & NO_WOL_FLAG) {
+		wol->supported = 0;
+		wol->wolopts = 0;
+	} else {
+		wol->supported = WAKE_MAGIC;
+		if (bp->wol)
+			wol->wolopts = WAKE_MAGIC;
+		else
+			wol->wolopts = 0;
+	}
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (wol->wolopts & ~WAKE_MAGIC)
+		return -EINVAL;
+
+	if (wol->wolopts & WAKE_MAGIC) {
+		if (bp->flags & NO_WOL_FLAG)
+			return -EINVAL;
+
+		bp->wol = 1;
+	} else
+		bp->wol = 0;
+
+	return 0;
+}
+
+static u32 bnx2x_get_msglevel(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	return bp->msg_enable;
+}
+
+static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (capable(CAP_NET_ADMIN)) {
+		/* dump MCP trace */
+		if (level & BNX2X_MSG_MCP)
+			bnx2x_fw_dump_lvl(bp, KERN_INFO);
+		bp->msg_enable = level;
+	}
+}
+
+static int bnx2x_nway_reset(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (!bp->port.pmf)
+		return 0;
+
+	if (netif_running(dev)) {
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+		bnx2x_link_set(bp);
+	}
+
+	return 0;
+}
+
+static u32 bnx2x_get_link(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
+		return 0;
+
+	return bp->link_vars.link_up;
+}
+
+static int bnx2x_get_eeprom_len(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	return bp->common.flash_size;
+}
+
+static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int count, i;
+	u32 val = 0;
+
+	/* adjust timeout for emulation/FPGA */
+	count = BNX2X_NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV_IS_SLOW(bp))
+		count *= 100;
+
+	/* request access to nvram interface */
+	REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+	       (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
+
+	for (i = 0; i < count*10; i++) {
+		val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+		if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
+			break;
+
+		udelay(5);
+	}
+
+	if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
+		DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int bnx2x_release_nvram_lock(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int count, i;
+	u32 val = 0;
+
+	/* adjust timeout for emulation/FPGA */
+	count = BNX2X_NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV_IS_SLOW(bp))
+		count *= 100;
+
+	/* relinquish nvram interface */
+	REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
+	       (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
+
+	for (i = 0; i < count*10; i++) {
+		val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
+		if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
+			break;
+
+		udelay(5);
+	}
+
+	if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
+		DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static void bnx2x_enable_nvram_access(struct bnx2x *bp)
+{
+	u32 val;
+
+	val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+	/* enable both bits, even on read */
+	REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+	       (val | MCPR_NVM_ACCESS_ENABLE_EN |
+		      MCPR_NVM_ACCESS_ENABLE_WR_EN));
+}
+
+static void bnx2x_disable_nvram_access(struct bnx2x *bp)
+{
+	u32 val;
+
+	val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
+
+	/* disable both bits, even after read */
+	REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
+	       (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
+			MCPR_NVM_ACCESS_ENABLE_WR_EN)));
+}
+
+static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
+				  u32 cmd_flags)
+{
+	int count, i, rc;
+	u32 val;
+
+	/* build the command word */
+	cmd_flags |= MCPR_NVM_COMMAND_DOIT;
+
+	/* need to clear DONE bit separately */
+	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+
+	/* address of the NVRAM to read from */
+	REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+	       (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+	/* issue a read command */
+	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+	/* adjust timeout for emulation/FPGA */
+	count = BNX2X_NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV_IS_SLOW(bp))
+		count *= 100;
+
+	/* wait for completion */
+	*ret_val = 0;
+	rc = -EBUSY;
+	for (i = 0; i < count; i++) {
+		udelay(5);
+		val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+
+		if (val & MCPR_NVM_COMMAND_DONE) {
+			val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
+			/* we read nvram data in cpu order
+			 * but ethtool sees it as an array of bytes
+			 * converting to big-endian will do the work */
+			*ret_val = cpu_to_be32(val);
+			rc = 0;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
+			    int buf_size)
+{
+	int rc;
+	u32 cmd_flags;
+	__be32 val;
+
+	if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+		DP(BNX2X_MSG_NVM,
+		   "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
+		   offset, buf_size);
+		return -EINVAL;
+	}
+
+	if (offset + buf_size > bp->common.flash_size) {
+		DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+				  " buf_size (0x%x) > flash_size (0x%x)\n",
+		   offset, buf_size, bp->common.flash_size);
+		return -EINVAL;
+	}
+
+	/* request access to nvram interface */
+	rc = bnx2x_acquire_nvram_lock(bp);
+	if (rc)
+		return rc;
+
+	/* enable access to nvram interface */
+	bnx2x_enable_nvram_access(bp);
+
+	/* read the first word(s) */
+	cmd_flags = MCPR_NVM_COMMAND_FIRST;
+	while ((buf_size > sizeof(u32)) && (rc == 0)) {
+		rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+		memcpy(ret_buf, &val, 4);
+
+		/* advance to the next dword */
+		offset += sizeof(u32);
+		ret_buf += sizeof(u32);
+		buf_size -= sizeof(u32);
+		cmd_flags = 0;
+	}
+
+	if (rc == 0) {
+		cmd_flags |= MCPR_NVM_COMMAND_LAST;
+		rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
+		memcpy(ret_buf, &val, 4);
+	}
+
+	/* disable access to nvram interface */
+	bnx2x_disable_nvram_access(bp);
+	bnx2x_release_nvram_lock(bp);
+
+	return rc;
+}
+
+static int bnx2x_get_eeprom(struct net_device *dev,
+			    struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc;
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+
+	DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+	   DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
+	   eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+	   eeprom->len, eeprom->len);
+
+	/* parameters already validated in ethtool_get_eeprom */
+
+	rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
+
+	return rc;
+}
+
+static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
+				   u32 cmd_flags)
+{
+	int count, i, rc;
+
+	/* build the command word */
+	cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
+
+	/* need to clear DONE bit separately */
+	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
+
+	/* write the data */
+	REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
+
+	/* address of the NVRAM to write to */
+	REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
+	       (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
+
+	/* issue the write command */
+	REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
+
+	/* adjust timeout for emulation/FPGA */
+	count = BNX2X_NVRAM_TIMEOUT_COUNT;
+	if (CHIP_REV_IS_SLOW(bp))
+		count *= 100;
+
+	/* wait for completion */
+	rc = -EBUSY;
+	for (i = 0; i < count; i++) {
+		udelay(5);
+		val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
+		if (val & MCPR_NVM_COMMAND_DONE) {
+			rc = 0;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+#define BYTE_OFFSET(offset)		(8 * (offset & 0x03))
+
+static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
+			      int buf_size)
+{
+	int rc;
+	u32 cmd_flags;
+	u32 align_offset;
+	__be32 val;
+
+	if (offset + buf_size > bp->common.flash_size) {
+		DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+				  " buf_size (0x%x) > flash_size (0x%x)\n",
+		   offset, buf_size, bp->common.flash_size);
+		return -EINVAL;
+	}
+
+	/* request access to nvram interface */
+	rc = bnx2x_acquire_nvram_lock(bp);
+	if (rc)
+		return rc;
+
+	/* enable access to nvram interface */
+	bnx2x_enable_nvram_access(bp);
+
+	cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
+	align_offset = (offset & ~0x03);
+	rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
+
+	if (rc == 0) {
+		val &= ~(0xff << BYTE_OFFSET(offset));
+		val |= (*data_buf << BYTE_OFFSET(offset));
+
+		/* nvram data is returned as an array of bytes
+		 * convert it back to cpu order */
+		val = be32_to_cpu(val);
+
+		rc = bnx2x_nvram_write_dword(bp, align_offset, val,
+					     cmd_flags);
+	}
+
+	/* disable access to nvram interface */
+	bnx2x_disable_nvram_access(bp);
+	bnx2x_release_nvram_lock(bp);
+
+	return rc;
+}
+
+static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
+			     int buf_size)
+{
+	int rc;
+	u32 cmd_flags;
+	u32 val;
+	u32 written_so_far;
+
+	if (buf_size == 1)	/* ethtool */
+		return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
+
+	if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
+		DP(BNX2X_MSG_NVM,
+		   "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
+		   offset, buf_size);
+		return -EINVAL;
+	}
+
+	if (offset + buf_size > bp->common.flash_size) {
+		DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
+				  " buf_size (0x%x) > flash_size (0x%x)\n",
+		   offset, buf_size, bp->common.flash_size);
+		return -EINVAL;
+	}
+
+	/* request access to nvram interface */
+	rc = bnx2x_acquire_nvram_lock(bp);
+	if (rc)
+		return rc;
+
+	/* enable access to nvram interface */
+	bnx2x_enable_nvram_access(bp);
+
+	written_so_far = 0;
+	cmd_flags = MCPR_NVM_COMMAND_FIRST;
+	while ((written_so_far < buf_size) && (rc == 0)) {
+		if (written_so_far == (buf_size - sizeof(u32)))
+			cmd_flags |= MCPR_NVM_COMMAND_LAST;
+		else if (((offset + 4) % BNX2X_NVRAM_PAGE_SIZE) == 0)
+			cmd_flags |= MCPR_NVM_COMMAND_LAST;
+		else if ((offset % BNX2X_NVRAM_PAGE_SIZE) == 0)
+			cmd_flags |= MCPR_NVM_COMMAND_FIRST;
+
+		memcpy(&val, data_buf, 4);
+
+		rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
+
+		/* advance to the next dword */
+		offset += sizeof(u32);
+		data_buf += sizeof(u32);
+		written_so_far += sizeof(u32);
+		cmd_flags = 0;
+	}
+
+	/* disable access to nvram interface */
+	bnx2x_disable_nvram_access(bp);
+	bnx2x_release_nvram_lock(bp);
+
+	return rc;
+}
+
+static int bnx2x_set_eeprom(struct net_device *dev,
+			    struct ethtool_eeprom *eeprom, u8 *eebuf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int port = BP_PORT(bp);
+	int rc = 0;
+	u32 ext_phy_config;
+	if (!netif_running(dev))
+		return -EAGAIN;
+
+	DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+	   DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
+	   eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
+	   eeprom->len, eeprom->len);
+
+	/* parameters already validated in ethtool_set_eeprom */
+
+	/* PHY eeprom can be accessed only by the PMF */
+	if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
+	    !bp->port.pmf)
+		return -EINVAL;
+
+	ext_phy_config =
+		SHMEM_RD(bp,
+			 dev_info.port_hw_config[port].external_phy_config);
+
+	if (eeprom->magic == 0x50485950) {
+		/* 'PHYP' (0x50485950): prepare phy for FW upgrade */
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+		bnx2x_acquire_phy_lock(bp);
+		rc |= bnx2x_link_reset(&bp->link_params,
+				       &bp->link_vars, 0);
+		if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
+					PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
+			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+				       MISC_REGISTERS_GPIO_HIGH, port);
+		bnx2x_release_phy_lock(bp);
+		bnx2x_link_report(bp);
+
+	} else if (eeprom->magic == 0x50485952) {
+		/* 'PHYR' (0x50485952): re-init link after FW upgrade */
+		if (bp->state == BNX2X_STATE_OPEN) {
+			bnx2x_acquire_phy_lock(bp);
+			rc |= bnx2x_link_reset(&bp->link_params,
+					       &bp->link_vars, 1);
+
+			rc |= bnx2x_phy_init(&bp->link_params,
+					     &bp->link_vars);
+			bnx2x_release_phy_lock(bp);
+			bnx2x_calc_fc_adv(bp);
+		}
+	} else if (eeprom->magic == 0x53985943) {
+		/* 'PHYC' (0x53985943): PHY FW upgrade completed */
+		if (XGXS_EXT_PHY_TYPE(ext_phy_config) ==
+				       PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
+
+			/* DSP Remove Download Mode */
+			bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+				       MISC_REGISTERS_GPIO_LOW, port);
+
+			bnx2x_acquire_phy_lock(bp);
+
+			bnx2x_sfx7101_sp_sw_reset(bp,
+						&bp->link_params.phy[EXT_PHY1]);
+
+			/* wait 0.5 sec to allow it to run */
+			msleep(500);
+			bnx2x_ext_phy_hw_reset(bp, port);
+			msleep(500);
+			bnx2x_release_phy_lock(bp);
+		}
+	} else
+		rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
+
+	return rc;
+}
+
+static int bnx2x_get_coalesce(struct net_device *dev,
+			      struct ethtool_coalesce *coal)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	memset(coal, 0, sizeof(struct ethtool_coalesce));
+
+	coal->rx_coalesce_usecs = bp->rx_ticks;
+	coal->tx_coalesce_usecs = bp->tx_ticks;
+
+	return 0;
+}
+
+static int bnx2x_set_coalesce(struct net_device *dev,
+			      struct ethtool_coalesce *coal)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
+	if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
+		bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
+
+	bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
+	if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
+		bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
+
+	if (netif_running(dev))
+		bnx2x_update_coalesce(bp);
+
+	return 0;
+}
+
+static void bnx2x_get_ringparam(struct net_device *dev,
+				struct ethtool_ringparam *ering)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	ering->rx_max_pending = MAX_RX_AVAIL;
+	ering->rx_mini_max_pending = 0;
+	ering->rx_jumbo_max_pending = 0;
+
+	if (bp->rx_ring_size)
+		ering->rx_pending = bp->rx_ring_size;
+	else
+		if (bp->state == BNX2X_STATE_OPEN && bp->num_queues)
+			ering->rx_pending = MAX_RX_AVAIL/bp->num_queues;
+		else
+			ering->rx_pending = MAX_RX_AVAIL;
+
+	ering->rx_mini_pending = 0;
+	ering->rx_jumbo_pending = 0;
+
+	ering->tx_max_pending = MAX_TX_AVAIL;
+	ering->tx_pending = bp->tx_ring_size;
+}
+
+static int bnx2x_set_ringparam(struct net_device *dev,
+			       struct ethtool_ringparam *ering)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+		return -EAGAIN;
+	}
+
+	if ((ering->rx_pending > MAX_RX_AVAIL) ||
+	    (ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
+						    MIN_RX_SIZE_TPA)) ||
+	    (ering->tx_pending > MAX_TX_AVAIL) ||
+	    (ering->tx_pending <= MAX_SKB_FRAGS + 4))
+		return -EINVAL;
+
+	bp->rx_ring_size = ering->rx_pending;
+	bp->tx_ring_size = ering->tx_pending;
+
+	return bnx2x_reload_if_running(dev);
+}
+
+static void bnx2x_get_pauseparam(struct net_device *dev,
+				 struct ethtool_pauseparam *epause)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+	epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
+			   BNX2X_FLOW_CTRL_AUTO);
+
+	epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
+			    BNX2X_FLOW_CTRL_RX);
+	epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
+			    BNX2X_FLOW_CTRL_TX);
+
+	DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+	   DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
+	   epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+}
+
+static int bnx2x_set_pauseparam(struct net_device *dev,
+				struct ethtool_pauseparam *epause)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 cfg_idx = bnx2x_get_link_cfg_idx(bp);
+	if (IS_MF(bp))
+		return 0;
+
+	DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+	   DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
+	   epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
+
+	bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO;
+
+	if (epause->rx_pause)
+		bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_RX;
+
+	if (epause->tx_pause)
+		bp->link_params.req_flow_ctrl[cfg_idx] |= BNX2X_FLOW_CTRL_TX;
+
+	if (bp->link_params.req_flow_ctrl[cfg_idx] == BNX2X_FLOW_CTRL_AUTO)
+		bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_NONE;
+
+	if (epause->autoneg) {
+		if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
+			DP(NETIF_MSG_LINK, "autoneg not supported\n");
+			return -EINVAL;
+		}
+
+		if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG) {
+			bp->link_params.req_flow_ctrl[cfg_idx] =
+				BNX2X_FLOW_CTRL_AUTO;
+		}
+	}
+
+	DP(NETIF_MSG_LINK,
+	   "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
+
+	if (netif_running(dev)) {
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+		bnx2x_link_set(bp);
+	}
+
+	return 0;
+}
+
+static const struct {
+	char string[ETH_GSTRING_LEN];
+} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
+	{ "register_test (offline)" },
+	{ "memory_test (offline)" },
+	{ "loopback_test (offline)" },
+	{ "nvram_test (online)" },
+	{ "interrupt_test (online)" },
+	{ "link_test (online)" },
+	{ "idle check (online)" }
+};
+
+enum {
+	BNX2X_CHIP_E1_OFST = 0,
+	BNX2X_CHIP_E1H_OFST,
+	BNX2X_CHIP_E2_OFST,
+	BNX2X_CHIP_E3_OFST,
+	BNX2X_CHIP_E3B0_OFST,
+	BNX2X_CHIP_MAX_OFST
+};
+
+#define BNX2X_CHIP_MASK_E1	(1 << BNX2X_CHIP_E1_OFST)
+#define BNX2X_CHIP_MASK_E1H	(1 << BNX2X_CHIP_E1H_OFST)
+#define BNX2X_CHIP_MASK_E2	(1 << BNX2X_CHIP_E2_OFST)
+#define BNX2X_CHIP_MASK_E3	(1 << BNX2X_CHIP_E3_OFST)
+#define BNX2X_CHIP_MASK_E3B0	(1 << BNX2X_CHIP_E3B0_OFST)
+
+#define BNX2X_CHIP_MASK_ALL	((1 << BNX2X_CHIP_MAX_OFST) - 1)
+#define BNX2X_CHIP_MASK_E1X	(BNX2X_CHIP_MASK_E1 | BNX2X_CHIP_MASK_E1H)
+
+static int bnx2x_test_registers(struct bnx2x *bp)
+{
+	int idx, i, rc = -ENODEV;
+	u32 wr_val = 0, hw;
+	int port = BP_PORT(bp);
+	static const struct {
+		u32 hw;
+		u32 offset0;
+		u32 offset1;
+		u32 mask;
+	} reg_tbl[] = {
+/* 0 */		{ BNX2X_CHIP_MASK_ALL,
+			BRB1_REG_PAUSE_LOW_THRESHOLD_0,	4, 0x000003ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			DORQ_REG_DB_ADDR0,		4, 0xffffffff },
+		{ BNX2X_CHIP_MASK_E1X,
+			HC_REG_AGG_INT_0,		4, 0x000003ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PBF_REG_MAC_IF0_ENABLE,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2 | BNX2X_CHIP_MASK_E3,
+			PBF_REG_P0_INIT_CRD,		4, 0x000007ff },
+		{ BNX2X_CHIP_MASK_E3B0,
+			PBF_REG_INIT_CRD_Q0,		4, 0x000007ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PRS_REG_CID_PORT_0,		4, 0x00ffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_PSWRQ_CDU0_L2P,	4, 0x000fffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_PSWRQ_TM0_L2P,		4, 0x000fffff },
+/* 10 */	{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			PXP2_REG_PSWRQ_TSDM0_L2P,	4, 0x000fffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			QM_REG_CONNNUM_0,		4, 0x000fffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			TM_REG_LIN0_MAX_ACTIVE_CID,	4, 0x0003ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			SRC_REG_KEYRSS0_0,		40, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			SRC_REG_KEYRSS0_7,		40, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			XCM_REG_WU_DA_CNT_CMD00,	4, 0x00000003 },
+		{ BNX2X_CHIP_MASK_ALL,
+			XCM_REG_GLB_DEL_ACK_MAX_CNT_0,	4, 0x000000ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_T_BIT,		4, 0x00000001 },
+/* 20 */	{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_EMAC0_IN_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_BMAC0_IN_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_XCM0_OUT_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_BRB0_OUT_EN,		4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_XCM_MASK,		4, 0x00000007 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_ACPI_PAT_6_LEN,	68, 0x000000ff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_ACPI_PAT_0_CRC,	68, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_MAC_0_0,	160, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_IP_0_1,	160, 0xffffffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_IPV4_IPV6_0,	160, 0x00000001 },
+/* 30 */	{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_UDP_0,	160, 0x0000ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_DEST_TCP_0,	160, 0x0000ffff },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LLH0_VLAN_ID_0,	160, 0x00000fff },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_XGXS_SERDES0_MODE_SEL,	4, 0x00000001 },
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001},
+		{ BNX2X_CHIP_MASK_ALL,
+			NIG_REG_STATUS_INTERRUPT_PORT0,	4, 0x07ffffff },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
+		{ BNX2X_CHIP_MASK_E1X | BNX2X_CHIP_MASK_E2,
+			NIG_REG_SERDES0_CTRL_PHY_ADDR,	16, 0x0000001f },
+
+		{ BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
+	};
+
+	if (!netif_running(bp->dev))
+		return rc;
+
+	if (CHIP_IS_E1(bp))
+		hw = BNX2X_CHIP_MASK_E1;
+	else if (CHIP_IS_E1H(bp))
+		hw = BNX2X_CHIP_MASK_E1H;
+	else if (CHIP_IS_E2(bp))
+		hw = BNX2X_CHIP_MASK_E2;
+	else if (CHIP_IS_E3B0(bp))
+		hw = BNX2X_CHIP_MASK_E3B0;
+	else /* e3 A0 */
+		hw = BNX2X_CHIP_MASK_E3;
+
+	/* Repeat the test twice:
+	   First by writing 0x00000000, second by writing 0xffffffff */
+	for (idx = 0; idx < 2; idx++) {
+
+		switch (idx) {
+		case 0:
+			wr_val = 0;
+			break;
+		case 1:
+			wr_val = 0xffffffff;
+			break;
+		}
+
+		for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
+			u32 offset, mask, save_val, val;
+			if (!(hw & reg_tbl[i].hw))
+				continue;
+
+			offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
+			mask = reg_tbl[i].mask;
+
+			save_val = REG_RD(bp, offset);
+
+			REG_WR(bp, offset, wr_val & mask);
+
+			val = REG_RD(bp, offset);
+
+			/* Restore the original register's value */
+			REG_WR(bp, offset, save_val);
+
+			/* verify value is as expected */
+			if ((val & mask) != (wr_val & mask)) {
+				DP(NETIF_MSG_HW,
+				   "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
+				   offset, val, wr_val, mask);
+				goto test_reg_exit;
+			}
+		}
+	}
+
+	rc = 0;
+
+test_reg_exit:
+	return rc;
+}
+
+static int bnx2x_test_memory(struct bnx2x *bp)
+{
+	int i, j, rc = -ENODEV;
+	u32 val, index;
+	static const struct {
+		u32 offset;
+		int size;
+	} mem_tbl[] = {
+		{ CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
+		{ CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
+		{ CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
+		{ DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
+		{ TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
+		{ UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
+		{ XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
+
+		{ 0xffffffff, 0 }
+	};
+
+	static const struct {
+		char *name;
+		u32 offset;
+		u32 hw_mask[BNX2X_CHIP_MAX_OFST];
+	} prty_tbl[] = {
+		{ "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,
+			{0x3ffc0, 0,   0, 0} },
+		{ "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,
+			{0x2,     0x2, 0, 0} },
+		{ "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS,
+			{0,       0,   0, 0} },
+		{ "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,
+			{0x3ffc0, 0,   0, 0} },
+		{ "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,
+			{0x3ffc0, 0,   0, 0} },
+		{ "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,
+			{0x3ffc1, 0,   0, 0} },
+
+		{ NULL, 0xffffffff, {0, 0, 0, 0} }
+	};
+
+	if (!netif_running(bp->dev))
+		return rc;
+
+	if (CHIP_IS_E1(bp))
+		index = BNX2X_CHIP_E1_OFST;
+	else if (CHIP_IS_E1H(bp))
+		index = BNX2X_CHIP_E1H_OFST;
+	else if (CHIP_IS_E2(bp))
+		index = BNX2X_CHIP_E2_OFST;
+	else /* e3 */
+		index = BNX2X_CHIP_E3_OFST;
+
+	/* pre-Check the parity status */
+	for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
+		val = REG_RD(bp, prty_tbl[i].offset);
+		if (val & ~(prty_tbl[i].hw_mask[index])) {
+			DP(NETIF_MSG_HW,
+			   "%s is 0x%x\n", prty_tbl[i].name, val);
+			goto test_mem_exit;
+		}
+	}
+
+	/* Go through all the memories */
+	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
+		for (j = 0; j < mem_tbl[i].size; j++)
+			REG_RD(bp, mem_tbl[i].offset + j*4);
+
+	/* Check the parity status */
+	for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
+		val = REG_RD(bp, prty_tbl[i].offset);
+		if (val & ~(prty_tbl[i].hw_mask[index])) {
+			DP(NETIF_MSG_HW,
+			   "%s is 0x%x\n", prty_tbl[i].name, val);
+			goto test_mem_exit;
+		}
+	}
+
+	rc = 0;
+
+test_mem_exit:
+	return rc;
+}
+
+static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
+{
+	int cnt = 1400;
+
+	if (link_up) {
+		while (bnx2x_link_test(bp, is_serdes) && cnt--)
+			msleep(20);
+
+		if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
+			DP(NETIF_MSG_LINK, "Timeout waiting for link up\n");
+	}
+}
+
+static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
+{
+	unsigned int pkt_size, num_pkts, i;
+	struct sk_buff *skb;
+	unsigned char *packet;
+	struct bnx2x_fastpath *fp_rx = &bp->fp[0];
+	struct bnx2x_fastpath *fp_tx = &bp->fp[0];
+	struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
+	u16 tx_start_idx, tx_idx;
+	u16 rx_start_idx, rx_idx;
+	u16 pkt_prod, bd_prod, rx_comp_cons;
+	struct sw_tx_bd *tx_buf;
+	struct eth_tx_start_bd *tx_start_bd;
+	struct eth_tx_parse_bd_e1x  *pbd_e1x = NULL;
+	struct eth_tx_parse_bd_e2  *pbd_e2 = NULL;
+	dma_addr_t mapping;
+	union eth_rx_cqe *cqe;
+	u8 cqe_fp_flags, cqe_fp_type;
+	struct sw_rx_bd *rx_buf;
+	u16 len;
+	int rc = -ENODEV;
+
+	/* check the loopback mode */
+	switch (loopback_mode) {
+	case BNX2X_PHY_LOOPBACK:
+		if (bp->link_params.loopback_mode != LOOPBACK_XGXS)
+			return -EINVAL;
+		break;
+	case BNX2X_MAC_LOOPBACK:
+		bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
+						LOOPBACK_XMAC : LOOPBACK_BMAC;
+		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* prepare the loopback packet */
+	pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
+		     bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
+	skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
+	if (!skb) {
+		rc = -ENOMEM;
+		goto test_loopback_exit;
+	}
+	packet = skb_put(skb, pkt_size);
+	memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
+	memset(packet + ETH_ALEN, 0, ETH_ALEN);
+	memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
+	for (i = ETH_HLEN; i < pkt_size; i++)
+		packet[i] = (unsigned char) (i & 0xff);
+	mapping = dma_map_single(&bp->pdev->dev, skb->data,
+				 skb_headlen(skb), DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		rc = -ENOMEM;
+		dev_kfree_skb(skb);
+		BNX2X_ERR("Unable to map SKB\n");
+		goto test_loopback_exit;
+	}
+
+	/* send the loopback packet */
+	num_pkts = 0;
+	tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
+	rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+
+	pkt_prod = txdata->tx_pkt_prod++;
+	tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
+	tx_buf->first_bd = txdata->tx_bd_prod;
+	tx_buf->skb = skb;
+	tx_buf->flags = 0;
+
+	bd_prod = TX_BD(txdata->tx_bd_prod);
+	tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
+	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
+	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
+	tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
+	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
+	tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
+	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+	SET_FLAG(tx_start_bd->general_data,
+		 ETH_TX_START_BD_ETH_ADDR_TYPE,
+		 UNICAST_ADDRESS);
+	SET_FLAG(tx_start_bd->general_data,
+		 ETH_TX_START_BD_HDR_NBDS,
+		 1);
+
+	/* turn on parsing and get a BD */
+	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
+
+	pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
+	pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
+
+	memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
+	memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
+
+	wmb();
+
+	txdata->tx_db.data.prod += 2;
+	barrier();
+	DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
+
+	mmiowb();
+	barrier();
+
+	num_pkts++;
+	txdata->tx_bd_prod += 2; /* start + pbd */
+
+	udelay(100);
+
+	tx_idx = le16_to_cpu(*txdata->tx_cons_sb);
+	if (tx_idx != tx_start_idx + num_pkts)
+		goto test_loopback_exit;
+
+	/* Unlike HC IGU won't generate an interrupt for status block
+	 * updates that have been performed while interrupts were
+	 * disabled.
+	 */
+	if (bp->common.int_block == INT_BLOCK_IGU) {
+		/* Disable local BHes to prevent a dead-lock situation between
+		 * sch_direct_xmit() and bnx2x_run_loopback() (calling
+		 * bnx2x_tx_int()), as both are taking netif_tx_lock().
+		 */
+		local_bh_disable();
+		bnx2x_tx_int(bp, txdata);
+		local_bh_enable();
+	}
+
+	rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+	if (rx_idx != rx_start_idx + num_pkts)
+		goto test_loopback_exit;
+
+	rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons);
+	cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)];
+	cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+	cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
+	if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
+		goto test_loopback_rx_exit;
+
+	len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
+	if (len != pkt_size)
+		goto test_loopback_rx_exit;
+
+	rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
+	dma_sync_single_for_cpu(&bp->pdev->dev,
+				   dma_unmap_addr(rx_buf, mapping),
+				   fp_rx->rx_buf_size, DMA_FROM_DEVICE);
+	skb = rx_buf->skb;
+	skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
+	for (i = ETH_HLEN; i < pkt_size; i++)
+		if (*(skb->data + i) != (unsigned char) (i & 0xff))
+			goto test_loopback_rx_exit;
+
+	rc = 0;
+
+test_loopback_rx_exit:
+
+	fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
+	fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
+	fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
+	fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
+
+	/* Update producers */
+	bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
+			     fp_rx->rx_sge_prod);
+
+test_loopback_exit:
+	bp->link_params.loopback_mode = LOOPBACK_NONE;
+
+	return rc;
+}
+
+static int bnx2x_test_loopback(struct bnx2x *bp)
+{
+	int rc = 0, res;
+
+	if (BP_NOMCP(bp))
+		return rc;
+
+	if (!netif_running(bp->dev))
+		return BNX2X_LOOPBACK_FAILED;
+
+	bnx2x_netif_stop(bp, 1);
+	bnx2x_acquire_phy_lock(bp);
+
+	res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
+	if (res) {
+		DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
+		rc |= BNX2X_PHY_LOOPBACK_FAILED;
+	}
+
+	res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
+	if (res) {
+		DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
+		rc |= BNX2X_MAC_LOOPBACK_FAILED;
+	}
+
+	bnx2x_release_phy_lock(bp);
+	bnx2x_netif_start(bp);
+
+	return rc;
+}
+
+#define CRC32_RESIDUAL			0xdebb20e3
+
+static int bnx2x_test_nvram(struct bnx2x *bp)
+{
+	static const struct {
+		int offset;
+		int size;
+	} nvram_tbl[] = {
+		{     0,  0x14 }, /* bootstrap */
+		{  0x14,  0xec }, /* dir */
+		{ 0x100, 0x350 }, /* manuf_info */
+		{ 0x450,  0xf0 }, /* feature_info */
+		{ 0x640,  0x64 }, /* upgrade_key_info */
+		{ 0x708,  0x70 }, /* manuf_key_info */
+		{     0,     0 }
+	};
+	__be32 buf[0x350 / 4];
+	u8 *data = (u8 *)buf;
+	int i, rc;
+	u32 magic, crc;
+
+	if (BP_NOMCP(bp))
+		return 0;
+
+	rc = bnx2x_nvram_read(bp, 0, data, 4);
+	if (rc) {
+		DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
+		goto test_nvram_exit;
+	}
+
+	magic = be32_to_cpu(buf[0]);
+	if (magic != 0x669955aa) {
+		DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
+		rc = -ENODEV;
+		goto test_nvram_exit;
+	}
+
+	for (i = 0; nvram_tbl[i].size; i++) {
+
+		rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
+				      nvram_tbl[i].size);
+		if (rc) {
+			DP(NETIF_MSG_PROBE,
+			   "nvram_tbl[%d] read data (rc %d)\n", i, rc);
+			goto test_nvram_exit;
+		}
+
+		crc = ether_crc_le(nvram_tbl[i].size, data);
+		if (crc != CRC32_RESIDUAL) {
+			DP(NETIF_MSG_PROBE,
+			   "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
+			rc = -ENODEV;
+			goto test_nvram_exit;
+		}
+	}
+
+test_nvram_exit:
+	return rc;
+}
+
+/* Send an EMPTY ramrod on the first queue */
+static int bnx2x_test_intr(struct bnx2x *bp)
+{
+	struct bnx2x_queue_state_params params = {0};
+
+	if (!netif_running(bp->dev))
+		return -ENODEV;
+
+	params.q_obj = &bp->fp->q_obj;
+	params.cmd = BNX2X_Q_CMD_EMPTY;
+
+	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
+
+	return bnx2x_queue_state_change(bp, &params);
+}
+
+static void bnx2x_self_test(struct net_device *dev,
+			    struct ethtool_test *etest, u64 *buf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u8 is_serdes;
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		printk(KERN_ERR "Handling parity error recovery. Try again later\n");
+		etest->flags |= ETH_TEST_FL_FAILED;
+		return;
+	}
+
+	memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
+
+	if (!netif_running(dev))
+		return;
+
+	/* offline tests are not supported in MF mode */
+	if (IS_MF(bp))
+		etest->flags &= ~ETH_TEST_FL_OFFLINE;
+	is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0;
+
+	if (etest->flags & ETH_TEST_FL_OFFLINE) {
+		int port = BP_PORT(bp);
+		u32 val;
+		u8 link_up;
+
+		/* save current value of input enable for TX port IF */
+		val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
+		/* disable input for TX port IF */
+		REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
+
+		link_up = bp->link_vars.link_up;
+
+		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+		bnx2x_nic_load(bp, LOAD_DIAG);
+		/* wait until link state is restored */
+		bnx2x_wait_for_link(bp, 1, is_serdes);
+
+		if (bnx2x_test_registers(bp) != 0) {
+			buf[0] = 1;
+			etest->flags |= ETH_TEST_FL_FAILED;
+		}
+		if (bnx2x_test_memory(bp) != 0) {
+			buf[1] = 1;
+			etest->flags |= ETH_TEST_FL_FAILED;
+		}
+
+		buf[2] = bnx2x_test_loopback(bp);
+		if (buf[2] != 0)
+			etest->flags |= ETH_TEST_FL_FAILED;
+
+		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+
+		/* restore input for TX port IF */
+		REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
+
+		bnx2x_nic_load(bp, LOAD_NORMAL);
+		/* wait until link state is restored */
+		bnx2x_wait_for_link(bp, link_up, is_serdes);
+	}
+	if (bnx2x_test_nvram(bp) != 0) {
+		buf[3] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+	if (bnx2x_test_intr(bp) != 0) {
+		buf[4] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+
+	if (bnx2x_link_test(bp, is_serdes) != 0) {
+		buf[5] = 1;
+		etest->flags |= ETH_TEST_FL_FAILED;
+	}
+
+#ifdef BNX2X_EXTRA_DEBUG
+	bnx2x_panic_dump(bp);
+#endif
+}
+
+#define IS_PORT_STAT(i) \
+	((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
+#define IS_FUNC_STAT(i)		(bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
+#define IS_MF_MODE_STAT(bp) \
+			(IS_MF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
+
+/* ethtool statistics are displayed for all regular ethernet queues and the
+ * fcoe L2 queue if not disabled
+ */
+static inline int bnx2x_num_stat_queues(struct bnx2x *bp)
+{
+	return BNX2X_NUM_ETH_QUEUES(bp);
+}
+
+static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int i, num_stats;
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		if (is_multi(bp)) {
+			num_stats = bnx2x_num_stat_queues(bp) *
+				BNX2X_NUM_Q_STATS;
+			if (!IS_MF_MODE_STAT(bp))
+				num_stats += BNX2X_NUM_STATS;
+		} else {
+			if (IS_MF_MODE_STAT(bp)) {
+				num_stats = 0;
+				for (i = 0; i < BNX2X_NUM_STATS; i++)
+					if (IS_FUNC_STAT(i))
+						num_stats++;
+			} else
+				num_stats = BNX2X_NUM_STATS;
+		}
+		return num_stats;
+
+	case ETH_SS_TEST:
+		return BNX2X_NUM_TESTS;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int i, j, k;
+	char queue_name[MAX_QUEUE_NAME_LEN+1];
+
+	switch (stringset) {
+	case ETH_SS_STATS:
+		if (is_multi(bp)) {
+			k = 0;
+			for_each_eth_queue(bp, i) {
+				memset(queue_name, 0, sizeof(queue_name));
+				sprintf(queue_name, "%d", i);
+				for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
+					snprintf(buf + (k + j)*ETH_GSTRING_LEN,
+						ETH_GSTRING_LEN,
+						bnx2x_q_stats_arr[j].string,
+						queue_name);
+				k += BNX2X_NUM_Q_STATS;
+			}
+			if (IS_MF_MODE_STAT(bp))
+				break;
+			for (j = 0; j < BNX2X_NUM_STATS; j++)
+				strcpy(buf + (k + j)*ETH_GSTRING_LEN,
+				       bnx2x_stats_arr[j].string);
+		} else {
+			for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+				if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+					continue;
+				strcpy(buf + j*ETH_GSTRING_LEN,
+				       bnx2x_stats_arr[i].string);
+				j++;
+			}
+		}
+		break;
+
+	case ETH_SS_TEST:
+		memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
+		break;
+	}
+}
+
+static void bnx2x_get_ethtool_stats(struct net_device *dev,
+				    struct ethtool_stats *stats, u64 *buf)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 *hw_stats, *offset;
+	int i, j, k;
+
+	if (is_multi(bp)) {
+		k = 0;
+		for_each_eth_queue(bp, i) {
+			hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
+			for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
+				if (bnx2x_q_stats_arr[j].size == 0) {
+					/* skip this counter */
+					buf[k + j] = 0;
+					continue;
+				}
+				offset = (hw_stats +
+					  bnx2x_q_stats_arr[j].offset);
+				if (bnx2x_q_stats_arr[j].size == 4) {
+					/* 4-byte counter */
+					buf[k + j] = (u64) *offset;
+					continue;
+				}
+				/* 8-byte counter */
+				buf[k + j] = HILO_U64(*offset, *(offset + 1));
+			}
+			k += BNX2X_NUM_Q_STATS;
+		}
+		if (IS_MF_MODE_STAT(bp))
+			return;
+		hw_stats = (u32 *)&bp->eth_stats;
+		for (j = 0; j < BNX2X_NUM_STATS; j++) {
+			if (bnx2x_stats_arr[j].size == 0) {
+				/* skip this counter */
+				buf[k + j] = 0;
+				continue;
+			}
+			offset = (hw_stats + bnx2x_stats_arr[j].offset);
+			if (bnx2x_stats_arr[j].size == 4) {
+				/* 4-byte counter */
+				buf[k + j] = (u64) *offset;
+				continue;
+			}
+			/* 8-byte counter */
+			buf[k + j] = HILO_U64(*offset, *(offset + 1));
+		}
+	} else {
+		hw_stats = (u32 *)&bp->eth_stats;
+		for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+			if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+				continue;
+			if (bnx2x_stats_arr[i].size == 0) {
+				/* skip this counter */
+				buf[j] = 0;
+				j++;
+				continue;
+			}
+			offset = (hw_stats + bnx2x_stats_arr[i].offset);
+			if (bnx2x_stats_arr[i].size == 4) {
+				/* 4-byte counter */
+				buf[j] = (u64) *offset;
+				j++;
+				continue;
+			}
+			/* 8-byte counter */
+			buf[j] = HILO_U64(*offset, *(offset + 1));
+			j++;
+		}
+	}
+}
+
+static int bnx2x_set_phys_id(struct net_device *dev,
+			     enum ethtool_phys_id_state state)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+
+	if (!bp->port.pmf)
+		return -EOPNOTSUPP;
+
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		return 1;	/* cycle on/off once per second */
+
+	case ETHTOOL_ID_ON:
+		bnx2x_set_led(&bp->link_params, &bp->link_vars,
+			      LED_MODE_ON, SPEED_1000);
+		break;
+
+	case ETHTOOL_ID_OFF:
+		bnx2x_set_led(&bp->link_params, &bp->link_vars,
+			      LED_MODE_FRONT_PANEL_OFF, 0);
+
+		break;
+
+	case ETHTOOL_ID_INACTIVE:
+		bnx2x_set_led(&bp->link_params, &bp->link_vars,
+			      LED_MODE_OPER,
+			      bp->link_vars.line_speed);
+	}
+
+	return 0;
+}
+
+static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+			   void *rules __always_unused)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	switch (info->cmd) {
+	case ETHTOOL_GRXRINGS:
+		info->data = BNX2X_NUM_ETH_QUEUES(bp);
+		return 0;
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev,
+				struct ethtool_rxfh_indir *indir)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	size_t copy_size =
+		min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
+	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+	size_t i;
+
+	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+		return -EOPNOTSUPP;
+
+	/* Get the current configuration of the RSS indirection table */
+	bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
+
+	/*
+	 * We can't use a memcpy() as an internal storage of an
+	 * indirection table is a u8 array while indir->ring_index
+	 * points to an array of u32.
+	 *
+	 * Indirection table contains the FW Client IDs, so we need to
+	 * align the returned table to the Client ID of the leading RSS
+	 * queue.
+	 */
+	for (i = 0; i < copy_size; i++)
+		indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
+
+	indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
+
+	return 0;
+}
+
+static int bnx2x_set_rxfh_indir(struct net_device *dev,
+				const struct ethtool_rxfh_indir *indir)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	size_t i;
+	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+	u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
+
+	if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
+		return -EOPNOTSUPP;
+
+	/* validate the size */
+	if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
+		return -EINVAL;
+
+	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
+		/* validate the indices */
+		if (indir->ring_index[i] >= num_eth_queues)
+			return -EINVAL;
+		/*
+		 * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
+		 * as an internal storage of an indirection table is a u8 array
+		 * while indir->ring_index points to an array of u32.
+		 *
+		 * Indirection table contains the FW Client IDs, so we need to
+		 * align the received table to the Client ID of the leading RSS
+		 * queue
+		 */
+		ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
+	}
+
+	return bnx2x_config_rss_pf(bp, ind_table, false);
+}
+
+static const struct ethtool_ops bnx2x_ethtool_ops = {
+	.get_settings		= bnx2x_get_settings,
+	.set_settings		= bnx2x_set_settings,
+	.get_drvinfo		= bnx2x_get_drvinfo,
+	.get_regs_len		= bnx2x_get_regs_len,
+	.get_regs		= bnx2x_get_regs,
+	.get_wol		= bnx2x_get_wol,
+	.set_wol		= bnx2x_set_wol,
+	.get_msglevel		= bnx2x_get_msglevel,
+	.set_msglevel		= bnx2x_set_msglevel,
+	.nway_reset		= bnx2x_nway_reset,
+	.get_link		= bnx2x_get_link,
+	.get_eeprom_len		= bnx2x_get_eeprom_len,
+	.get_eeprom		= bnx2x_get_eeprom,
+	.set_eeprom		= bnx2x_set_eeprom,
+	.get_coalesce		= bnx2x_get_coalesce,
+	.set_coalesce		= bnx2x_set_coalesce,
+	.get_ringparam		= bnx2x_get_ringparam,
+	.set_ringparam		= bnx2x_set_ringparam,
+	.get_pauseparam		= bnx2x_get_pauseparam,
+	.set_pauseparam		= bnx2x_set_pauseparam,
+	.self_test		= bnx2x_self_test,
+	.get_sset_count		= bnx2x_get_sset_count,
+	.get_strings		= bnx2x_get_strings,
+	.set_phys_id		= bnx2x_set_phys_id,
+	.get_ethtool_stats	= bnx2x_get_ethtool_stats,
+	.get_rxnfc		= bnx2x_get_rxnfc,
+	.get_rxfh_indir		= bnx2x_get_rxfh_indir,
+	.set_rxfh_indir		= bnx2x_set_rxfh_indir,
+};
+
+void bnx2x_set_ethtool_ops(struct net_device *netdev)
+{
+	SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
new file mode 100644
index 0000000..998652a
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -0,0 +1,410 @@
+/* bnx2x_fw_defs.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef BNX2X_FW_DEFS_H
+#define BNX2X_FW_DEFS_H
+
+#define CSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[148].base)
+#define CSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+	(IRO[147].base + ((assertListEntry) * IRO[147].m1))
+#define CSTORM_EVENT_RING_DATA_OFFSET(pfId) \
+	(IRO[153].base + (((pfId)>>1) * IRO[153].m1) + (((pfId)&1) * \
+	IRO[153].m2))
+#define CSTORM_EVENT_RING_PROD_OFFSET(pfId) \
+	(IRO[154].base + (((pfId)>>1) * IRO[154].m1) + (((pfId)&1) * \
+	IRO[154].m2))
+#define CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(funcId) \
+	(IRO[159].base + ((funcId) * IRO[159].m1))
+#define CSTORM_FUNC_EN_OFFSET(funcId) \
+	(IRO[149].base + ((funcId) * IRO[149].m1))
+#define CSTORM_IGU_MODE_OFFSET (IRO[157].base)
+#define CSTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
+	(IRO[315].base + ((pfId) * IRO[315].m1))
+#define CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+	(IRO[316].base + ((pfId) * IRO[316].m1))
+#define CSTORM_ISCSI_EQ_CONS_OFFSET(pfId, iscsiEqId) \
+	(IRO[308].base + ((pfId) * IRO[308].m1) + ((iscsiEqId) * IRO[308].m2))
+#define CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfId, iscsiEqId) \
+	(IRO[310].base + ((pfId) * IRO[310].m1) + ((iscsiEqId) * IRO[310].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfId, iscsiEqId) \
+	(IRO[309].base + ((pfId) * IRO[309].m1) + ((iscsiEqId) * IRO[309].m2))
+#define CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfId, iscsiEqId) \
+	(IRO[311].base + ((pfId) * IRO[311].m1) + ((iscsiEqId) * IRO[311].m2))
+#define CSTORM_ISCSI_EQ_PROD_OFFSET(pfId, iscsiEqId) \
+	(IRO[307].base + ((pfId) * IRO[307].m1) + ((iscsiEqId) * IRO[307].m2))
+#define CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfId, iscsiEqId) \
+	(IRO[313].base + ((pfId) * IRO[313].m1) + ((iscsiEqId) * IRO[313].m2))
+#define CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfId, iscsiEqId) \
+	(IRO[312].base + ((pfId) * IRO[312].m1) + ((iscsiEqId) * IRO[312].m2))
+#define CSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
+	(IRO[314].base + ((pfId) * IRO[314].m1))
+#define CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+	(IRO[306].base + ((pfId) * IRO[306].m1))
+#define CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+	(IRO[305].base + ((pfId) * IRO[305].m1))
+#define CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+	(IRO[304].base + ((pfId) * IRO[304].m1))
+#define CSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[151].base + ((funcId) * IRO[151].m1))
+#define CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(pfId) \
+	(IRO[142].base + ((pfId) * IRO[142].m1))
+#define CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(pfId) \
+	(IRO[143].base + ((pfId) * IRO[143].m1))
+#define CSTORM_SP_STATUS_BLOCK_OFFSET(pfId) \
+	(IRO[141].base + ((pfId) * IRO[141].m1))
+#define CSTORM_SP_STATUS_BLOCK_SIZE (IRO[141].size)
+#define CSTORM_SP_SYNC_BLOCK_OFFSET(pfId) \
+	(IRO[144].base + ((pfId) * IRO[144].m1))
+#define CSTORM_SP_SYNC_BLOCK_SIZE (IRO[144].size)
+#define CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(sbId, hcIndex) \
+	(IRO[136].base + ((sbId) * IRO[136].m1) + ((hcIndex) * IRO[136].m2))
+#define CSTORM_STATUS_BLOCK_DATA_OFFSET(sbId) \
+	(IRO[133].base + ((sbId) * IRO[133].m1))
+#define CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(sbId) \
+	(IRO[134].base + ((sbId) * IRO[134].m1))
+#define CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(sbId, hcIndex) \
+	(IRO[135].base + ((sbId) * IRO[135].m1) + ((hcIndex) * IRO[135].m2))
+#define CSTORM_STATUS_BLOCK_OFFSET(sbId) \
+	(IRO[132].base + ((sbId) * IRO[132].m1))
+#define CSTORM_STATUS_BLOCK_SIZE (IRO[132].size)
+#define CSTORM_SYNC_BLOCK_OFFSET(sbId) \
+	(IRO[137].base + ((sbId) * IRO[137].m1))
+#define CSTORM_SYNC_BLOCK_SIZE (IRO[137].size)
+#define CSTORM_VF_PF_CHANNEL_STATE_OFFSET(vfId) \
+	(IRO[155].base + ((vfId) * IRO[155].m1))
+#define CSTORM_VF_PF_CHANNEL_VALID_OFFSET(vfId) \
+	(IRO[156].base + ((vfId) * IRO[156].m1))
+#define CSTORM_VF_TO_PF_OFFSET(funcId) \
+	(IRO[150].base + ((funcId) * IRO[150].m1))
+#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
+#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
+	(IRO[203].base + ((pfId) * IRO[203].m1))
+#define TSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[102].base)
+#define TSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+	(IRO[101].base + ((assertListEntry) * IRO[101].m1))
+#define TSTORM_COMMON_SAFC_WORKAROUND_ENABLE_OFFSET (IRO[107].base)
+#define TSTORM_COMMON_SAFC_WORKAROUND_TIMEOUT_10USEC_OFFSET \
+	(IRO[108].base)
+#define TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(pfId) \
+	(IRO[201].base + ((pfId) * IRO[201].m1))
+#define TSTORM_FUNC_EN_OFFSET(funcId) \
+	(IRO[103].base + ((funcId) * IRO[103].m1))
+#define TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+	(IRO[271].base + ((pfId) * IRO[271].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CID_TABLE_OFFSET(pfId) \
+	(IRO[272].base + ((pfId) * IRO[272].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_CLIENT_ID_TABLE_OFFSET(pfId) \
+	(IRO[273].base + ((pfId) * IRO[273].m1))
+#define TSTORM_ISCSI_L2_ISCSI_OOO_PROD_OFFSET(pfId) \
+	(IRO[274].base + ((pfId) * IRO[274].m1))
+#define TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+	(IRO[270].base + ((pfId) * IRO[270].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+	(IRO[269].base + ((pfId) * IRO[269].m1))
+#define TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+	(IRO[268].base + ((pfId) * IRO[268].m1))
+#define TSTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+	(IRO[267].base + ((pfId) * IRO[267].m1))
+#define TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfId) \
+	(IRO[276].base + ((pfId) * IRO[276].m1))
+#define TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+	(IRO[263].base + ((pfId) * IRO[263].m1))
+#define TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+	(IRO[264].base + ((pfId) * IRO[264].m1))
+#define TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfId) \
+	(IRO[265].base + ((pfId) * IRO[265].m1))
+#define TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfId) \
+	(IRO[266].base + ((pfId) * IRO[266].m1))
+#define TSTORM_MAC_FILTER_CONFIG_OFFSET(pfId) \
+	(IRO[202].base + ((pfId) * IRO[202].m1))
+#define TSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[105].base + ((funcId) * IRO[105].m1))
+#define TSTORM_TCP_MAX_CWND_OFFSET(pfId) \
+	(IRO[216].base + ((pfId) * IRO[216].m1))
+#define TSTORM_VF_TO_PF_OFFSET(funcId) \
+	(IRO[104].base + ((funcId) * IRO[104].m1))
+#define USTORM_AGG_DATA_OFFSET (IRO[206].base)
+#define USTORM_AGG_DATA_SIZE (IRO[206].size)
+#define USTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[177].base)
+#define USTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+	(IRO[176].base + ((assertListEntry) * IRO[176].m1))
+#define USTORM_CQE_PAGE_NEXT_OFFSET(portId, clientId) \
+	(IRO[205].base + ((portId) * IRO[205].m1) + ((clientId) * \
+	IRO[205].m2))
+#define USTORM_ETH_PAUSE_ENABLED_OFFSET(portId) \
+	(IRO[183].base + ((portId) * IRO[183].m1))
+#define USTORM_FCOE_EQ_PROD_OFFSET(pfId) \
+	(IRO[317].base + ((pfId) * IRO[317].m1))
+#define USTORM_FUNC_EN_OFFSET(funcId) \
+	(IRO[178].base + ((funcId) * IRO[178].m1))
+#define USTORM_ISCSI_CQ_SIZE_OFFSET(pfId) \
+	(IRO[281].base + ((pfId) * IRO[281].m1))
+#define USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfId) \
+	(IRO[282].base + ((pfId) * IRO[282].m1))
+#define USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfId) \
+	(IRO[286].base + ((pfId) * IRO[286].m1))
+#define USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfId) \
+	(IRO[283].base + ((pfId) * IRO[283].m1))
+#define USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+	(IRO[279].base + ((pfId) * IRO[279].m1))
+#define USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+	(IRO[278].base + ((pfId) * IRO[278].m1))
+#define USTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+	(IRO[277].base + ((pfId) * IRO[277].m1))
+#define USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+	(IRO[280].base + ((pfId) * IRO[280].m1))
+#define USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfId) \
+	(IRO[284].base + ((pfId) * IRO[284].m1))
+#define USTORM_ISCSI_RQ_SIZE_OFFSET(pfId) \
+	(IRO[285].base + ((pfId) * IRO[285].m1))
+#define USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(pfId) \
+	(IRO[182].base + ((pfId) * IRO[182].m1))
+#define USTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[180].base + ((funcId) * IRO[180].m1))
+#define USTORM_RX_PRODS_E1X_OFFSET(portId, clientId) \
+	(IRO[209].base + ((portId) * IRO[209].m1) + ((clientId) * \
+	IRO[209].m2))
+#define USTORM_RX_PRODS_E2_OFFSET(qzoneId) \
+	(IRO[210].base + ((qzoneId) * IRO[210].m1))
+#define USTORM_TPA_BTR_OFFSET (IRO[207].base)
+#define USTORM_TPA_BTR_SIZE (IRO[207].size)
+#define USTORM_VF_TO_PF_OFFSET(funcId) \
+	(IRO[179].base + ((funcId) * IRO[179].m1))
+#define XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE (IRO[67].base)
+#define XSTORM_AGG_INT_FINAL_CLEANUP_INDEX (IRO[66].base)
+#define XSTORM_ASSERT_LIST_INDEX_OFFSET	(IRO[51].base)
+#define XSTORM_ASSERT_LIST_OFFSET(assertListEntry) \
+	(IRO[50].base + ((assertListEntry) * IRO[50].m1))
+#define XSTORM_CMNG_PER_PORT_VARS_OFFSET(portId) \
+	(IRO[43].base + ((portId) * IRO[43].m1))
+#define XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(pfId) \
+	(IRO[45].base + ((pfId) * IRO[45].m1))
+#define XSTORM_FUNC_EN_OFFSET(funcId) \
+	(IRO[47].base + ((funcId) * IRO[47].m1))
+#define XSTORM_ISCSI_HQ_SIZE_OFFSET(pfId) \
+	(IRO[294].base + ((pfId) * IRO[294].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfId) \
+	(IRO[297].base + ((pfId) * IRO[297].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfId) \
+	(IRO[298].base + ((pfId) * IRO[298].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfId) \
+	(IRO[299].base + ((pfId) * IRO[299].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfId) \
+	(IRO[300].base + ((pfId) * IRO[300].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfId) \
+	(IRO[301].base + ((pfId) * IRO[301].m1))
+#define XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfId) \
+	(IRO[302].base + ((pfId) * IRO[302].m1))
+#define XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfId) \
+	(IRO[303].base + ((pfId) * IRO[303].m1))
+#define XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfId) \
+	(IRO[293].base + ((pfId) * IRO[293].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfId) \
+	(IRO[292].base + ((pfId) * IRO[292].m1))
+#define XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfId) \
+	(IRO[291].base + ((pfId) * IRO[291].m1))
+#define XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfId) \
+	(IRO[296].base + ((pfId) * IRO[296].m1))
+#define XSTORM_ISCSI_SQ_SIZE_OFFSET(pfId) \
+	(IRO[295].base + ((pfId) * IRO[295].m1))
+#define XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfId) \
+	(IRO[290].base + ((pfId) * IRO[290].m1))
+#define XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(pfId) \
+	(IRO[289].base + ((pfId) * IRO[289].m1))
+#define XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfId) \
+	(IRO[288].base + ((pfId) * IRO[288].m1))
+#define XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfId) \
+	(IRO[287].base + ((pfId) * IRO[287].m1))
+#define XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(pfId) \
+	(IRO[44].base + ((pfId) * IRO[44].m1))
+#define XSTORM_RECORD_SLOW_PATH_OFFSET(funcId) \
+	(IRO[49].base + ((funcId) * IRO[49].m1))
+#define XSTORM_SPQ_DATA_OFFSET(funcId) \
+	(IRO[32].base + ((funcId) * IRO[32].m1))
+#define XSTORM_SPQ_DATA_SIZE (IRO[32].size)
+#define XSTORM_SPQ_PAGE_BASE_OFFSET(funcId) \
+	(IRO[30].base + ((funcId) * IRO[30].m1))
+#define XSTORM_SPQ_PROD_OFFSET(funcId) \
+	(IRO[31].base + ((funcId) * IRO[31].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(portId) \
+	(IRO[211].base + ((portId) * IRO[211].m1))
+#define XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(portId) \
+	(IRO[212].base + ((portId) * IRO[212].m1))
+#define XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfId) \
+	(IRO[214].base + (((pfId)>>1) * IRO[214].m1) + (((pfId)&1) * \
+	IRO[214].m2))
+#define XSTORM_VF_TO_PF_OFFSET(funcId) \
+	(IRO[48].base + ((funcId) * IRO[48].m1))
+#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
+
+/**
+* This file defines HSI constants for the ETH flow
+*/
+#ifdef _EVEREST_MICROCODE
+#include "Microcode\Generated\DataTypes\eth_rx_bd.h"
+#include "Microcode\Generated\DataTypes\eth_tx_bd.h"
+#include "Microcode\Generated\DataTypes\eth_rx_cqe.h"
+#include "Microcode\Generated\DataTypes\eth_rx_sge.h"
+#include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h"
+#endif
+
+
+/* Ethernet Ring parameters */
+#define X_ETH_LOCAL_RING_SIZE 13
+#define FIRST_BD_IN_PKT	0
+#define PARSE_BD_INDEX 1
+#define NUM_OF_ETH_BDS_IN_PAGE ((PAGE_SIZE)/(STRUCT_SIZE(eth_tx_bd)/8))
+#define U_ETH_NUM_OF_SGES_TO_FETCH 8
+#define U_ETH_MAX_SGES_FOR_PACKET 3
+
+/* Rx ring params */
+#define U_ETH_LOCAL_BD_RING_SIZE 8
+#define U_ETH_LOCAL_SGE_RING_SIZE 10
+#define U_ETH_SGL_SIZE 8
+	/* The fw will padd the buffer with this value, so the IP header \
+	will be align to 4 Byte */
+#define IP_HEADER_ALIGNMENT_PADDING 2
+
+#define U_ETH_SGES_PER_PAGE_INVERSE_MASK \
+	(0xFFFF - ((PAGE_SIZE/((STRUCT_SIZE(eth_rx_sge))/8))-1))
+
+#define TU_ETH_CQES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_cqe)/8))
+#define U_ETH_BDS_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_bd)/8))
+#define U_ETH_SGES_PER_PAGE (PAGE_SIZE/(STRUCT_SIZE(eth_rx_sge)/8))
+
+#define U_ETH_BDS_PER_PAGE_MASK	(U_ETH_BDS_PER_PAGE-1)
+#define U_ETH_CQE_PER_PAGE_MASK	(TU_ETH_CQES_PER_PAGE-1)
+#define U_ETH_SGES_PER_PAGE_MASK (U_ETH_SGES_PER_PAGE-1)
+
+#define U_ETH_UNDEFINED_Q 0xFF
+
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY 10
+#define ETH_NUM_OF_RSS_ENGINES_E2 72
+
+#define FILTER_RULES_COUNT 16
+#define MULTICAST_RULES_COUNT 16
+#define CLASSIFY_RULES_COUNT 16
+
+/*The CRC32 seed, that is used for the hash(reduction) multicast address */
+#define ETH_CRC32_HASH_SEED 0x00000000
+
+#define ETH_CRC32_HASH_BIT_SIZE	(8)
+#define ETH_CRC32_HASH_MASK EVAL((1<<ETH_CRC32_HASH_BIT_SIZE)-1)
+
+/* Maximal L2 clients supported */
+#define ETH_MAX_RX_CLIENTS_E1 18
+#define ETH_MAX_RX_CLIENTS_E1H 28
+#define ETH_MAX_RX_CLIENTS_E2 152
+
+/* Maximal statistics client Ids */
+#define MAX_STAT_COUNTER_ID_E1 36
+#define MAX_STAT_COUNTER_ID_E1H	56
+#define MAX_STAT_COUNTER_ID_E2 140
+
+#define MAX_MAC_CREDIT_E1 192 /* Per Chip */
+#define MAX_MAC_CREDIT_E1H 256 /* Per Chip */
+#define MAX_MAC_CREDIT_E2 272 /* Per Path */
+#define MAX_VLAN_CREDIT_E1 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E1H 0 /* Per Chip */
+#define MAX_VLAN_CREDIT_E2 272 /* Per Path */
+
+
+/* Maximal aggregation queues supported */
+#define ETH_MAX_AGGREGATION_QUEUES_E1 32
+#define ETH_MAX_AGGREGATION_QUEUES_E1H_E2 64
+
+
+#define ETH_NUM_OF_MCAST_BINS 256
+#define ETH_NUM_OF_MCAST_ENGINES_E2 72
+
+#define ETH_MIN_RX_CQES_WITHOUT_TPA (MAX_RAMRODS_PER_PORT + 3)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1 \
+	(ETH_MAX_AGGREGATION_QUEUES_E1 + ETH_MIN_RX_CQES_WITHOUT_TPA)
+#define ETH_MIN_RX_CQES_WITH_TPA_E1H_E2 \
+	(ETH_MAX_AGGREGATION_QUEUES_E1H_E2 + ETH_MIN_RX_CQES_WITHOUT_TPA)
+
+#define DISABLE_STATISTIC_COUNTER_ID_VALUE 0
+
+
+/**
+ * This file defines HSI constants common to all microcode flows
+ */
+
+#define PROTOCOL_STATE_BIT_OFFSET 6
+
+#define ETH_STATE (ETH_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+#define TOE_STATE (TOE_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+#define RDMA_STATE (RDMA_CONNECTION_TYPE << PROTOCOL_STATE_BIT_OFFSET)
+
+/* microcode fixed page page size 4K (chains and ring segments) */
+#define MC_PAGE_SIZE 4096
+
+/* Number of indices per slow-path SB */
+#define HC_SP_SB_MAX_INDICES 16
+
+/* Number of indices per SB */
+#define HC_SB_MAX_INDICES_E1X 8
+#define HC_SB_MAX_INDICES_E2 8
+
+#define HC_SB_MAX_SB_E1X 32
+#define HC_SB_MAX_SB_E2	136
+
+#define HC_SP_SB_ID 0xde
+
+#define HC_SB_MAX_SM 2
+
+#define HC_SB_MAX_DYNAMIC_INDICES 4
+
+/* max number of slow path commands per port */
+#define MAX_RAMRODS_PER_PORT 8
+
+
+/**** DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
+
+#define TIMERS_TICK_SIZE_CHIP (1e-3)
+
+#define TSEMI_CLK1_RESUL_CHIP (1e-3)
+
+#define XSEMI_CLK1_RESUL_CHIP (1e-3)
+
+#define SDM_TIMER_TICK_RESUL_CHIP (4 * (1e-6))
+
+/**** END DEFINES FOR TIMERS/CLOCKS RESOLUTIONS ****/
+
+#define XSTORM_IP_ID_ROLL_HALF 0x8000
+#define XSTORM_IP_ID_ROLL_ALL 0
+
+#define FW_LOG_LIST_SIZE 50
+
+#define NUM_OF_SAFC_BITS 16
+#define MAX_COS_NUMBER 4
+#define MAX_TRAFFIC_TYPES 8
+#define MAX_PFC_PRIORITIES 8
+
+	/* used by array traffic_type_to_priority[] to mark traffic type \
+	that is not mapped to priority*/
+#define LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED 0xFF
+
+
+#define C_ERES_PER_PAGE \
+	(PAGE_SIZE / BITS_TO_BYTES(STRUCT_SIZE(event_ring_elem)))
+#define C_ERE_PER_PAGE_MASK (C_ERES_PER_PAGE - 1)
+
+#define STATS_QUERY_CMD_COUNT 16
+
+#define NIV_LIST_TABLE_SIZE 4096
+
+#define INVALID_VNIC_ID	0xFF
+
+
+#define UNDEF_IRO 0x80000000
+
+
+#endif /* BNX2X_FW_DEFS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
new file mode 100644
index 0000000..f4a07fb
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -0,0 +1,38 @@
+/* bnx2x_fw_file_hdr.h: FW binary file header structure.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Based on the original idea of John Wright <john.wright@hp.com>.
+ */
+
+#ifndef BNX2X_INIT_FILE_HDR_H
+#define BNX2X_INIT_FILE_HDR_H
+
+struct bnx2x_fw_file_section {
+	__be32 len;
+	__be32 offset;
+};
+
+struct bnx2x_fw_file_hdr {
+	struct bnx2x_fw_file_section init_ops;
+	struct bnx2x_fw_file_section init_ops_offsets;
+	struct bnx2x_fw_file_section init_data;
+	struct bnx2x_fw_file_section tsem_int_table_data;
+	struct bnx2x_fw_file_section tsem_pram_data;
+	struct bnx2x_fw_file_section usem_int_table_data;
+	struct bnx2x_fw_file_section usem_pram_data;
+	struct bnx2x_fw_file_section csem_int_table_data;
+	struct bnx2x_fw_file_section csem_pram_data;
+	struct bnx2x_fw_file_section xsem_int_table_data;
+	struct bnx2x_fw_file_section xsem_pram_data;
+	struct bnx2x_fw_file_section iro_arr;
+	struct bnx2x_fw_file_section fw_version;
+};
+
+#endif /* BNX2X_INIT_FILE_HDR_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
new file mode 100644
index 0000000..dc24de4
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -0,0 +1,5131 @@
+/* bnx2x_hsi.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+#ifndef BNX2X_HSI_H
+#define BNX2X_HSI_H
+
+#include "bnx2x_fw_defs.h"
+
+#define FW_ENCODE_32BIT_PATTERN         0x1e1e1e1e
+
+struct license_key {
+	u32 reserved[6];
+
+	u32 max_iscsi_conn;
+#define BNX2X_MAX_ISCSI_TRGT_CONN_MASK	0xFFFF
+#define BNX2X_MAX_ISCSI_TRGT_CONN_SHIFT	0
+#define BNX2X_MAX_ISCSI_INIT_CONN_MASK	0xFFFF0000
+#define BNX2X_MAX_ISCSI_INIT_CONN_SHIFT	16
+
+	u32 reserved_a;
+
+	u32 max_fcoe_conn;
+#define BNX2X_MAX_FCOE_TRGT_CONN_MASK	0xFFFF
+#define BNX2X_MAX_FCOE_TRGT_CONN_SHIFT	0
+#define BNX2X_MAX_FCOE_INIT_CONN_MASK	0xFFFF0000
+#define BNX2X_MAX_FCOE_INIT_CONN_SHIFT	16
+
+	u32 reserved_b[4];
+};
+
+
+#define PORT_0              0
+#define PORT_1              1
+#define PORT_MAX            2
+
+/****************************************************************************
+ * Shared HW configuration                                                  *
+ ****************************************************************************/
+#define PIN_CFG_NA                          0x00000000
+#define PIN_CFG_GPIO0_P0                    0x00000001
+#define PIN_CFG_GPIO1_P0                    0x00000002
+#define PIN_CFG_GPIO2_P0                    0x00000003
+#define PIN_CFG_GPIO3_P0                    0x00000004
+#define PIN_CFG_GPIO0_P1                    0x00000005
+#define PIN_CFG_GPIO1_P1                    0x00000006
+#define PIN_CFG_GPIO2_P1                    0x00000007
+#define PIN_CFG_GPIO3_P1                    0x00000008
+#define PIN_CFG_EPIO0                       0x00000009
+#define PIN_CFG_EPIO1                       0x0000000a
+#define PIN_CFG_EPIO2                       0x0000000b
+#define PIN_CFG_EPIO3                       0x0000000c
+#define PIN_CFG_EPIO4                       0x0000000d
+#define PIN_CFG_EPIO5                       0x0000000e
+#define PIN_CFG_EPIO6                       0x0000000f
+#define PIN_CFG_EPIO7                       0x00000010
+#define PIN_CFG_EPIO8                       0x00000011
+#define PIN_CFG_EPIO9                       0x00000012
+#define PIN_CFG_EPIO10                      0x00000013
+#define PIN_CFG_EPIO11                      0x00000014
+#define PIN_CFG_EPIO12                      0x00000015
+#define PIN_CFG_EPIO13                      0x00000016
+#define PIN_CFG_EPIO14                      0x00000017
+#define PIN_CFG_EPIO15                      0x00000018
+#define PIN_CFG_EPIO16                      0x00000019
+#define PIN_CFG_EPIO17                      0x0000001a
+#define PIN_CFG_EPIO18                      0x0000001b
+#define PIN_CFG_EPIO19                      0x0000001c
+#define PIN_CFG_EPIO20                      0x0000001d
+#define PIN_CFG_EPIO21                      0x0000001e
+#define PIN_CFG_EPIO22                      0x0000001f
+#define PIN_CFG_EPIO23                      0x00000020
+#define PIN_CFG_EPIO24                      0x00000021
+#define PIN_CFG_EPIO25                      0x00000022
+#define PIN_CFG_EPIO26                      0x00000023
+#define PIN_CFG_EPIO27                      0x00000024
+#define PIN_CFG_EPIO28                      0x00000025
+#define PIN_CFG_EPIO29                      0x00000026
+#define PIN_CFG_EPIO30                      0x00000027
+#define PIN_CFG_EPIO31                      0x00000028
+
+/* EPIO definition */
+#define EPIO_CFG_NA                         0x00000000
+#define EPIO_CFG_EPIO0                      0x00000001
+#define EPIO_CFG_EPIO1                      0x00000002
+#define EPIO_CFG_EPIO2                      0x00000003
+#define EPIO_CFG_EPIO3                      0x00000004
+#define EPIO_CFG_EPIO4                      0x00000005
+#define EPIO_CFG_EPIO5                      0x00000006
+#define EPIO_CFG_EPIO6                      0x00000007
+#define EPIO_CFG_EPIO7                      0x00000008
+#define EPIO_CFG_EPIO8                      0x00000009
+#define EPIO_CFG_EPIO9                      0x0000000a
+#define EPIO_CFG_EPIO10                     0x0000000b
+#define EPIO_CFG_EPIO11                     0x0000000c
+#define EPIO_CFG_EPIO12                     0x0000000d
+#define EPIO_CFG_EPIO13                     0x0000000e
+#define EPIO_CFG_EPIO14                     0x0000000f
+#define EPIO_CFG_EPIO15                     0x00000010
+#define EPIO_CFG_EPIO16                     0x00000011
+#define EPIO_CFG_EPIO17                     0x00000012
+#define EPIO_CFG_EPIO18                     0x00000013
+#define EPIO_CFG_EPIO19                     0x00000014
+#define EPIO_CFG_EPIO20                     0x00000015
+#define EPIO_CFG_EPIO21                     0x00000016
+#define EPIO_CFG_EPIO22                     0x00000017
+#define EPIO_CFG_EPIO23                     0x00000018
+#define EPIO_CFG_EPIO24                     0x00000019
+#define EPIO_CFG_EPIO25                     0x0000001a
+#define EPIO_CFG_EPIO26                     0x0000001b
+#define EPIO_CFG_EPIO27                     0x0000001c
+#define EPIO_CFG_EPIO28                     0x0000001d
+#define EPIO_CFG_EPIO29                     0x0000001e
+#define EPIO_CFG_EPIO30                     0x0000001f
+#define EPIO_CFG_EPIO31                     0x00000020
+
+
+struct shared_hw_cfg {			 /* NVRAM Offset */
+	/* Up to 16 bytes of NULL-terminated string */
+	u8  part_num[16];		    /* 0x104 */
+
+	u32 config;			/* 0x114 */
+	#define SHARED_HW_CFG_MDIO_VOLTAGE_MASK             0x00000001
+		#define SHARED_HW_CFG_MDIO_VOLTAGE_SHIFT             0
+		#define SHARED_HW_CFG_MDIO_VOLTAGE_1_2V              0x00000000
+		#define SHARED_HW_CFG_MDIO_VOLTAGE_2_5V              0x00000001
+	#define SHARED_HW_CFG_MCP_RST_ON_CORE_RST_EN        0x00000002
+
+	#define SHARED_HW_CFG_PORT_SWAP                     0x00000004
+
+	#define SHARED_HW_CFG_BEACON_WOL_EN                 0x00000008
+
+	#define SHARED_HW_CFG_PCIE_GEN3_DISABLED            0x00000000
+	#define SHARED_HW_CFG_PCIE_GEN3_ENABLED             0x00000010
+
+	#define SHARED_HW_CFG_MFW_SELECT_MASK               0x00000700
+		#define SHARED_HW_CFG_MFW_SELECT_SHIFT               8
+	/* Whatever MFW found in NVM
+	   (if multiple found, priority order is: NC-SI, UMP, IPMI) */
+		#define SHARED_HW_CFG_MFW_SELECT_DEFAULT             0x00000000
+		#define SHARED_HW_CFG_MFW_SELECT_NC_SI               0x00000100
+		#define SHARED_HW_CFG_MFW_SELECT_UMP                 0x00000200
+		#define SHARED_HW_CFG_MFW_SELECT_IPMI                0x00000300
+	/* Use SPIO4 as an arbiter between: 0-NC_SI, 1-IPMI
+	  (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+		#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_IPMI    0x00000400
+	/* Use SPIO4 as an arbiter between: 0-UMP, 1-IPMI
+	  (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+		#define SHARED_HW_CFG_MFW_SELECT_SPIO4_UMP_IPMI      0x00000500
+	/* Use SPIO4 as an arbiter between: 0-NC-SI, 1-UMP
+	  (can only be used when an add-in board, not BMC, pulls-down SPIO4) */
+		#define SHARED_HW_CFG_MFW_SELECT_SPIO4_NC_SI_UMP     0x00000600
+
+	#define SHARED_HW_CFG_LED_MODE_MASK                 0x000f0000
+		#define SHARED_HW_CFG_LED_MODE_SHIFT                 16
+		#define SHARED_HW_CFG_LED_MAC1                       0x00000000
+		#define SHARED_HW_CFG_LED_PHY1                       0x00010000
+		#define SHARED_HW_CFG_LED_PHY2                       0x00020000
+		#define SHARED_HW_CFG_LED_PHY3                       0x00030000
+		#define SHARED_HW_CFG_LED_MAC2                       0x00040000
+		#define SHARED_HW_CFG_LED_PHY4                       0x00050000
+		#define SHARED_HW_CFG_LED_PHY5                       0x00060000
+		#define SHARED_HW_CFG_LED_PHY6                       0x00070000
+		#define SHARED_HW_CFG_LED_MAC3                       0x00080000
+		#define SHARED_HW_CFG_LED_PHY7                       0x00090000
+		#define SHARED_HW_CFG_LED_PHY9                       0x000a0000
+		#define SHARED_HW_CFG_LED_PHY11                      0x000b0000
+		#define SHARED_HW_CFG_LED_MAC4                       0x000c0000
+		#define SHARED_HW_CFG_LED_PHY8                       0x000d0000
+		#define SHARED_HW_CFG_LED_EXTPHY1                    0x000e0000
+
+
+	#define SHARED_HW_CFG_AN_ENABLE_MASK                0x3f000000
+		#define SHARED_HW_CFG_AN_ENABLE_SHIFT                24
+		#define SHARED_HW_CFG_AN_ENABLE_CL37                 0x01000000
+		#define SHARED_HW_CFG_AN_ENABLE_CL73                 0x02000000
+		#define SHARED_HW_CFG_AN_ENABLE_BAM                  0x04000000
+		#define SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION   0x08000000
+		#define SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT  0x10000000
+		#define SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY           0x20000000
+
+	#define SHARED_HW_CFG_SRIOV_MASK                    0x40000000
+		#define SHARED_HW_CFG_SRIOV_DISABLED                 0x00000000
+		#define SHARED_HW_CFG_SRIOV_ENABLED                  0x40000000
+
+	#define SHARED_HW_CFG_ATC_MASK                      0x80000000
+		#define SHARED_HW_CFG_ATC_DISABLED                   0x00000000
+		#define SHARED_HW_CFG_ATC_ENABLED                    0x80000000
+
+	u32 config2;			    /* 0x118 */
+	/* one time auto detect grace period (in sec) */
+	#define SHARED_HW_CFG_GRACE_PERIOD_MASK             0x000000ff
+	#define SHARED_HW_CFG_GRACE_PERIOD_SHIFT                     0
+
+	#define SHARED_HW_CFG_PCIE_GEN2_ENABLED             0x00000100
+	#define SHARED_HW_CFG_PCIE_GEN2_DISABLED            0x00000000
+
+	/* The default value for the core clock is 250MHz and it is
+	   achieved by setting the clock change to 4 */
+	#define SHARED_HW_CFG_CLOCK_CHANGE_MASK             0x00000e00
+	#define SHARED_HW_CFG_CLOCK_CHANGE_SHIFT                     9
+
+	#define SHARED_HW_CFG_SMBUS_TIMING_MASK             0x00001000
+		#define SHARED_HW_CFG_SMBUS_TIMING_100KHZ            0x00000000
+		#define SHARED_HW_CFG_SMBUS_TIMING_400KHZ            0x00001000
+
+	#define SHARED_HW_CFG_HIDE_PORT1                    0x00002000
+
+	#define SHARED_HW_CFG_WOL_CAPABLE_MASK              0x00004000
+		#define SHARED_HW_CFG_WOL_CAPABLE_DISABLED           0x00000000
+		#define SHARED_HW_CFG_WOL_CAPABLE_ENABLED            0x00004000
+
+		/* Output low when PERST is asserted */
+	#define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_MASK       0x00008000
+		#define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_DISABLED    0x00000000
+		#define SHARED_HW_CFG_SPIO4_FOLLOW_PERST_ENABLED     0x00008000
+
+	#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_MASK    0x00070000
+		#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_SHIFT    16
+		#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_HW       0x00000000
+		#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_0DB      0x00010000
+		#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_3_5DB    0x00020000
+		#define SHARED_HW_CFG_PCIE_GEN2_PREEMPHASIS_6_0DB    0x00030000
+
+	/*  The fan failure mechanism is usually related to the PHY type
+	      since the power consumption of the board is determined by the PHY.
+	      Currently, fan is required for most designs with SFX7101, BCM8727
+	      and BCM8481. If a fan is not required for a board which uses one
+	      of those PHYs, this field should be set to "Disabled". If a fan is
+	      required for a different PHY type, this option should be set to
+	      "Enabled". The fan failure indication is expected on SPIO5 */
+	#define SHARED_HW_CFG_FAN_FAILURE_MASK              0x00180000
+		#define SHARED_HW_CFG_FAN_FAILURE_SHIFT              19
+		#define SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE           0x00000000
+		#define SHARED_HW_CFG_FAN_FAILURE_DISABLED           0x00080000
+		#define SHARED_HW_CFG_FAN_FAILURE_ENABLED            0x00100000
+
+		/* ASPM Power Management support */
+	#define SHARED_HW_CFG_ASPM_SUPPORT_MASK             0x00600000
+		#define SHARED_HW_CFG_ASPM_SUPPORT_SHIFT             21
+		#define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_ENABLED    0x00000000
+		#define SHARED_HW_CFG_ASPM_SUPPORT_L0S_DISABLED      0x00200000
+		#define SHARED_HW_CFG_ASPM_SUPPORT_L1_DISABLED       0x00400000
+		#define SHARED_HW_CFG_ASPM_SUPPORT_L0S_L1_DISABLED   0x00600000
+
+	/* The value of PM_TL_IGNORE_REQS (bit0) in PCI register
+	   tl_control_0 (register 0x2800) */
+	#define SHARED_HW_CFG_PREVENT_L1_ENTRY_MASK         0x00800000
+		#define SHARED_HW_CFG_PREVENT_L1_ENTRY_DISABLED      0x00000000
+		#define SHARED_HW_CFG_PREVENT_L1_ENTRY_ENABLED       0x00800000
+
+	#define SHARED_HW_CFG_PORT_MODE_MASK                0x01000000
+		#define SHARED_HW_CFG_PORT_MODE_2                    0x00000000
+		#define SHARED_HW_CFG_PORT_MODE_4                    0x01000000
+
+	#define SHARED_HW_CFG_PATH_SWAP_MASK                0x02000000
+		#define SHARED_HW_CFG_PATH_SWAP_DISABLED             0x00000000
+		#define SHARED_HW_CFG_PATH_SWAP_ENABLED              0x02000000
+
+	/*  Set the MDC/MDIO access for the first external phy */
+	#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK         0x1C000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT         26
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE      0x00000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0         0x04000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1         0x08000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH          0x0c000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED       0x10000000
+
+	/*  Set the MDC/MDIO access for the second external phy */
+	#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK         0xE0000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT         29
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_PHY_TYPE      0x00000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC0         0x20000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_EMAC1         0x40000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_BOTH          0x60000000
+		#define SHARED_HW_CFG_MDC_MDIO_ACCESS2_SWAPPED       0x80000000
+
+
+	u32 power_dissipated;			/* 0x11c */
+	#define SHARED_HW_CFG_POWER_MGNT_SCALE_MASK         0x00ff0000
+		#define SHARED_HW_CFG_POWER_MGNT_SCALE_SHIFT         16
+		#define SHARED_HW_CFG_POWER_MGNT_UNKNOWN_SCALE       0x00000000
+		#define SHARED_HW_CFG_POWER_MGNT_DOT_1_WATT          0x00010000
+		#define SHARED_HW_CFG_POWER_MGNT_DOT_01_WATT         0x00020000
+		#define SHARED_HW_CFG_POWER_MGNT_DOT_001_WATT        0x00030000
+
+	#define SHARED_HW_CFG_POWER_DIS_CMN_MASK            0xff000000
+	#define SHARED_HW_CFG_POWER_DIS_CMN_SHIFT                    24
+
+	u32 ump_nc_si_config;			/* 0x120 */
+	#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MASK       0x00000003
+		#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_SHIFT       0
+		#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MAC         0x00000000
+		#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_PHY         0x00000001
+		#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_MII         0x00000000
+		#define SHARED_HW_CFG_UMP_NC_SI_MII_MODE_RMII        0x00000002
+
+	#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_MASK       0x00000f00
+		#define SHARED_HW_CFG_UMP_NC_SI_NUM_DEVS_SHIFT       8
+
+	#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_MASK   0x00ff0000
+		#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_SHIFT   16
+		#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_NONE    0x00000000
+		#define SHARED_HW_CFG_UMP_NC_SI_EXT_PHY_TYPE_BCM5221 0x00010000
+
+	u32 board;			/* 0x124 */
+	#define SHARED_HW_CFG_E3_I2C_MUX0_MASK              0x0000003F
+	#define SHARED_HW_CFG_E3_I2C_MUX0_SHIFT                      0
+	#define SHARED_HW_CFG_E3_I2C_MUX1_MASK              0x00000FC0
+	#define SHARED_HW_CFG_E3_I2C_MUX1_SHIFT                      6
+	/* Use the PIN_CFG_XXX defines on top */
+	#define SHARED_HW_CFG_BOARD_REV_MASK                0x00ff0000
+	#define SHARED_HW_CFG_BOARD_REV_SHIFT                        16
+
+	#define SHARED_HW_CFG_BOARD_MAJOR_VER_MASK          0x0f000000
+	#define SHARED_HW_CFG_BOARD_MAJOR_VER_SHIFT                  24
+
+	#define SHARED_HW_CFG_BOARD_MINOR_VER_MASK          0xf0000000
+	#define SHARED_HW_CFG_BOARD_MINOR_VER_SHIFT                  28
+
+	u32 wc_lane_config;				    /* 0x128 */
+	#define SHARED_HW_CFG_LANE_SWAP_CFG_MASK            0x0000FFFF
+		#define SHARED_HW_CFG_LANE_SWAP_CFG_SHIFT            0
+		#define SHARED_HW_CFG_LANE_SWAP_CFG_32103210         0x00001b1b
+		#define SHARED_HW_CFG_LANE_SWAP_CFG_32100123         0x00001be4
+		#define SHARED_HW_CFG_LANE_SWAP_CFG_01233210         0x0000e41b
+		#define SHARED_HW_CFG_LANE_SWAP_CFG_01230123         0x0000e4e4
+	#define SHARED_HW_CFG_LANE_SWAP_CFG_TX_MASK         0x000000FF
+	#define SHARED_HW_CFG_LANE_SWAP_CFG_TX_SHIFT                 0
+	#define SHARED_HW_CFG_LANE_SWAP_CFG_RX_MASK         0x0000FF00
+	#define SHARED_HW_CFG_LANE_SWAP_CFG_RX_SHIFT                 8
+
+	/* TX lane Polarity swap */
+	#define SHARED_HW_CFG_TX_LANE0_POL_FLIP_ENABLED     0x00010000
+	#define SHARED_HW_CFG_TX_LANE1_POL_FLIP_ENABLED     0x00020000
+	#define SHARED_HW_CFG_TX_LANE2_POL_FLIP_ENABLED     0x00040000
+	#define SHARED_HW_CFG_TX_LANE3_POL_FLIP_ENABLED     0x00080000
+	/* TX lane Polarity swap */
+	#define SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED     0x00100000
+	#define SHARED_HW_CFG_RX_LANE1_POL_FLIP_ENABLED     0x00200000
+	#define SHARED_HW_CFG_RX_LANE2_POL_FLIP_ENABLED     0x00400000
+	#define SHARED_HW_CFG_RX_LANE3_POL_FLIP_ENABLED     0x00800000
+
+	/*  Selects the port layout of the board */
+	#define SHARED_HW_CFG_E3_PORT_LAYOUT_MASK           0x0F000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_SHIFT           24
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_01           0x00000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_2P_10           0x01000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_0123         0x02000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_1032         0x03000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_2301         0x04000000
+		#define SHARED_HW_CFG_E3_PORT_LAYOUT_4P_3210         0x05000000
+};
+
+
+/****************************************************************************
+ * Port HW configuration                                                    *
+ ****************************************************************************/
+struct port_hw_cfg {		    /* port 0: 0x12c  port 1: 0x2bc */
+
+	u32 pci_id;
+	#define PORT_HW_CFG_PCI_VENDOR_ID_MASK              0xffff0000
+	#define PORT_HW_CFG_PCI_DEVICE_ID_MASK              0x0000ffff
+
+	u32 pci_sub_id;
+	#define PORT_HW_CFG_PCI_SUBSYS_DEVICE_ID_MASK       0xffff0000
+	#define PORT_HW_CFG_PCI_SUBSYS_VENDOR_ID_MASK       0x0000ffff
+
+	u32 power_dissipated;
+	#define PORT_HW_CFG_POWER_DIS_D0_MASK               0x000000ff
+	#define PORT_HW_CFG_POWER_DIS_D0_SHIFT                       0
+	#define PORT_HW_CFG_POWER_DIS_D1_MASK               0x0000ff00
+	#define PORT_HW_CFG_POWER_DIS_D1_SHIFT                       8
+	#define PORT_HW_CFG_POWER_DIS_D2_MASK               0x00ff0000
+	#define PORT_HW_CFG_POWER_DIS_D2_SHIFT                       16
+	#define PORT_HW_CFG_POWER_DIS_D3_MASK               0xff000000
+	#define PORT_HW_CFG_POWER_DIS_D3_SHIFT                       24
+
+	u32 power_consumed;
+	#define PORT_HW_CFG_POWER_CONS_D0_MASK              0x000000ff
+	#define PORT_HW_CFG_POWER_CONS_D0_SHIFT                      0
+	#define PORT_HW_CFG_POWER_CONS_D1_MASK              0x0000ff00
+	#define PORT_HW_CFG_POWER_CONS_D1_SHIFT                      8
+	#define PORT_HW_CFG_POWER_CONS_D2_MASK              0x00ff0000
+	#define PORT_HW_CFG_POWER_CONS_D2_SHIFT                      16
+	#define PORT_HW_CFG_POWER_CONS_D3_MASK              0xff000000
+	#define PORT_HW_CFG_POWER_CONS_D3_SHIFT                      24
+
+	u32 mac_upper;
+	#define PORT_HW_CFG_UPPERMAC_MASK                   0x0000ffff
+	#define PORT_HW_CFG_UPPERMAC_SHIFT                           0
+	u32 mac_lower;
+
+	u32 iscsi_mac_upper;  /* Upper 16 bits are always zeroes */
+	u32 iscsi_mac_lower;
+
+	u32 rdma_mac_upper;   /* Upper 16 bits are always zeroes */
+	u32 rdma_mac_lower;
+
+	u32 serdes_config;
+	#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_MASK 0x0000ffff
+	#define PORT_HW_CFG_SERDES_TX_DRV_PRE_EMPHASIS_SHIFT         0
+
+	#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK    0xffff0000
+	#define PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT            16
+
+
+	/*  Default values: 2P-64, 4P-32 */
+	u32 pf_config;					    /* 0x158 */
+	#define PORT_HW_CFG_PF_NUM_VF_MASK                  0x0000007F
+	#define PORT_HW_CFG_PF_NUM_VF_SHIFT                          0
+
+	/*  Default values: 17 */
+	#define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_MASK        0x00007F00
+	#define PORT_HW_CFG_PF_NUM_MSIX_VECTORS_SHIFT                8
+
+	#define PORT_HW_CFG_ENABLE_FLR_MASK                 0x00010000
+	#define PORT_HW_CFG_FLR_ENABLED                     0x00010000
+
+	u32 vf_config;					    /* 0x15C */
+	#define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_MASK        0x0000007F
+	#define PORT_HW_CFG_VF_NUM_MSIX_VECTORS_SHIFT                0
+
+	#define PORT_HW_CFG_VF_PCI_DEVICE_ID_MASK           0xFFFF0000
+	#define PORT_HW_CFG_VF_PCI_DEVICE_ID_SHIFT                   16
+
+	u32 mf_pci_id;					    /* 0x160 */
+	#define PORT_HW_CFG_MF_PCI_DEVICE_ID_MASK           0x0000FFFF
+	#define PORT_HW_CFG_MF_PCI_DEVICE_ID_SHIFT                   0
+
+	/*  Controls the TX laser of the SFP+ module */
+	u32 sfp_ctrl;					    /* 0x164 */
+	#define PORT_HW_CFG_TX_LASER_MASK                   0x000000FF
+		#define PORT_HW_CFG_TX_LASER_SHIFT                   0
+		#define PORT_HW_CFG_TX_LASER_MDIO                    0x00000000
+		#define PORT_HW_CFG_TX_LASER_GPIO0                   0x00000001
+		#define PORT_HW_CFG_TX_LASER_GPIO1                   0x00000002
+		#define PORT_HW_CFG_TX_LASER_GPIO2                   0x00000003
+		#define PORT_HW_CFG_TX_LASER_GPIO3                   0x00000004
+
+	/*  Controls the fault module LED of the SFP+ */
+	#define PORT_HW_CFG_FAULT_MODULE_LED_MASK           0x0000FF00
+		#define PORT_HW_CFG_FAULT_MODULE_LED_SHIFT           8
+		#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO0           0x00000000
+		#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO1           0x00000100
+		#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO2           0x00000200
+		#define PORT_HW_CFG_FAULT_MODULE_LED_GPIO3           0x00000300
+		#define PORT_HW_CFG_FAULT_MODULE_LED_DISABLED        0x00000400
+
+	/*  The output pin TX_DIS that controls the TX laser of the SFP+
+	  module. Use the PIN_CFG_XXX defines on top */
+	u32 e3_sfp_ctrl;				    /* 0x168 */
+	#define PORT_HW_CFG_E3_TX_LASER_MASK                0x000000FF
+	#define PORT_HW_CFG_E3_TX_LASER_SHIFT                        0
+
+	/*  The output pin for SFPP_TYPE which turns on the Fault module LED */
+	#define PORT_HW_CFG_E3_FAULT_MDL_LED_MASK           0x0000FF00
+	#define PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT                   8
+
+	/*  The input pin MOD_ABS that indicates whether SFP+ module is
+	  present or not. Use the PIN_CFG_XXX defines on top */
+	#define PORT_HW_CFG_E3_MOD_ABS_MASK                 0x00FF0000
+	#define PORT_HW_CFG_E3_MOD_ABS_SHIFT                         16
+
+	/*  The output pin PWRDIS_SFP_X which disable the power of the SFP+
+	  module. Use the PIN_CFG_XXX defines on top */
+	#define PORT_HW_CFG_E3_PWR_DIS_MASK                 0xFF000000
+	#define PORT_HW_CFG_E3_PWR_DIS_SHIFT                         24
+
+	/*
+	 * The input pin which signals module transmit fault. Use the
+	 * PIN_CFG_XXX defines on top
+	 */
+	u32 e3_cmn_pin_cfg;				    /* 0x16C */
+	#define PORT_HW_CFG_E3_TX_FAULT_MASK                0x000000FF
+	#define PORT_HW_CFG_E3_TX_FAULT_SHIFT                        0
+
+	/*  The output pin which reset the PHY. Use the PIN_CFG_XXX defines on
+	 top */
+	#define PORT_HW_CFG_E3_PHY_RESET_MASK               0x0000FF00
+	#define PORT_HW_CFG_E3_PHY_RESET_SHIFT                       8
+
+	/*
+	 * The output pin which powers down the PHY. Use the PIN_CFG_XXX
+	 * defines on top
+	 */
+	#define PORT_HW_CFG_E3_PWR_DOWN_MASK                0x00FF0000
+	#define PORT_HW_CFG_E3_PWR_DOWN_SHIFT                        16
+
+	/*  The output pin values BSC_SEL which selects the I2C for this port
+	  in the I2C Mux */
+	#define PORT_HW_CFG_E3_I2C_MUX0_MASK                0x01000000
+	#define PORT_HW_CFG_E3_I2C_MUX1_MASK                0x02000000
+
+
+	/*
+	 * The input pin I_FAULT which indicate over-current has occurred.
+	 * Use the PIN_CFG_XXX defines on top
+	 */
+	u32 e3_cmn_pin_cfg1;				    /* 0x170 */
+	#define PORT_HW_CFG_E3_OVER_CURRENT_MASK            0x000000FF
+	#define PORT_HW_CFG_E3_OVER_CURRENT_SHIFT                    0
+	u32 reserved0[7];				    /* 0x174 */
+
+	u32 aeu_int_mask;				    /* 0x190 */
+
+	u32 media_type;					    /* 0x194 */
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK            0x000000FF
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT                    0
+
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK            0x0000FF00
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT                    8
+
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK            0x00FF0000
+	#define PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT                    16
+
+	/*  4 times 16 bits for all 4 lanes. In case external PHY is present
+	      (not direct mode), those values will not take effect on the 4 XGXS
+	      lanes. For some external PHYs (such as 8706 and 8726) the values
+	      will be used to configure the external PHY  in those cases, not
+	      all 4 values are needed. */
+	u16 xgxs_config_rx[4];			/* 0x198 */
+	u16 xgxs_config_tx[4];			/* 0x1A0 */
+
+	/* For storing FCOE mac on shared memory */
+	u32 fcoe_fip_mac_upper;
+	#define PORT_HW_CFG_FCOE_UPPERMAC_MASK              0x0000ffff
+	#define PORT_HW_CFG_FCOE_UPPERMAC_SHIFT                      0
+	u32 fcoe_fip_mac_lower;
+
+	u32 fcoe_wwn_port_name_upper;
+	u32 fcoe_wwn_port_name_lower;
+
+	u32 fcoe_wwn_node_name_upper;
+	u32 fcoe_wwn_node_name_lower;
+
+	u32 Reserved1[49];				    /* 0x1C0 */
+
+	/*  Enable RJ45 magjack pair swapping on 10GBase-T PHY (0=default),
+	      84833 only */
+	u32 xgbt_phy_cfg;				    /* 0x284 */
+	#define PORT_HW_CFG_RJ45_PAIR_SWAP_MASK             0x000000FF
+	#define PORT_HW_CFG_RJ45_PAIR_SWAP_SHIFT                     0
+
+		u32 default_cfg;			    /* 0x288 */
+	#define PORT_HW_CFG_GPIO0_CONFIG_MASK               0x00000003
+		#define PORT_HW_CFG_GPIO0_CONFIG_SHIFT               0
+		#define PORT_HW_CFG_GPIO0_CONFIG_NA                  0x00000000
+		#define PORT_HW_CFG_GPIO0_CONFIG_LOW                 0x00000001
+		#define PORT_HW_CFG_GPIO0_CONFIG_HIGH                0x00000002
+		#define PORT_HW_CFG_GPIO0_CONFIG_INPUT               0x00000003
+
+	#define PORT_HW_CFG_GPIO1_CONFIG_MASK               0x0000000C
+		#define PORT_HW_CFG_GPIO1_CONFIG_SHIFT               2
+		#define PORT_HW_CFG_GPIO1_CONFIG_NA                  0x00000000
+		#define PORT_HW_CFG_GPIO1_CONFIG_LOW                 0x00000004
+		#define PORT_HW_CFG_GPIO1_CONFIG_HIGH                0x00000008
+		#define PORT_HW_CFG_GPIO1_CONFIG_INPUT               0x0000000c
+
+	#define PORT_HW_CFG_GPIO2_CONFIG_MASK               0x00000030
+		#define PORT_HW_CFG_GPIO2_CONFIG_SHIFT               4
+		#define PORT_HW_CFG_GPIO2_CONFIG_NA                  0x00000000
+		#define PORT_HW_CFG_GPIO2_CONFIG_LOW                 0x00000010
+		#define PORT_HW_CFG_GPIO2_CONFIG_HIGH                0x00000020
+		#define PORT_HW_CFG_GPIO2_CONFIG_INPUT               0x00000030
+
+	#define PORT_HW_CFG_GPIO3_CONFIG_MASK               0x000000C0
+		#define PORT_HW_CFG_GPIO3_CONFIG_SHIFT               6
+		#define PORT_HW_CFG_GPIO3_CONFIG_NA                  0x00000000
+		#define PORT_HW_CFG_GPIO3_CONFIG_LOW                 0x00000040
+		#define PORT_HW_CFG_GPIO3_CONFIG_HIGH                0x00000080
+		#define PORT_HW_CFG_GPIO3_CONFIG_INPUT               0x000000c0
+
+	/*  When KR link is required to be set to force which is not
+	      KR-compliant, this parameter determine what is the trigger for it.
+	      When GPIO is selected, low input will force the speed. Currently
+	      default speed is 1G. In the future, it may be widen to select the
+	      forced speed in with another parameter. Note when force-1G is
+	      enabled, it override option 56: Link Speed option. */
+	#define PORT_HW_CFG_FORCE_KR_ENABLER_MASK           0x00000F00
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_SHIFT           8
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_NOT_FORCED      0x00000000
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P0        0x00000100
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P0        0x00000200
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P0        0x00000300
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P0        0x00000400
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO0_P1        0x00000500
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO1_P1        0x00000600
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO2_P1        0x00000700
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_GPIO3_P1        0x00000800
+		#define PORT_HW_CFG_FORCE_KR_ENABLER_FORCED          0x00000900
+	/*  Enable to determine with which GPIO to reset the external phy */
+	#define PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK           0x000F0000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT           16
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_PHY_TYPE        0x00000000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0        0x00010000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0        0x00020000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0        0x00030000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0        0x00040000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1        0x00050000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1        0x00060000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1        0x00070000
+		#define PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1        0x00080000
+
+	/*  Enable BAM on KR */
+	#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK           0x00100000
+	#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT                   20
+	#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED                0x00000000
+	#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED                 0x00100000
+
+	/*  Enable Common Mode Sense */
+	#define PORT_HW_CFG_ENABLE_CMS_MASK                 0x00200000
+	#define PORT_HW_CFG_ENABLE_CMS_SHIFT                         21
+	#define PORT_HW_CFG_ENABLE_CMS_DISABLED                      0x00000000
+	#define PORT_HW_CFG_ENABLE_CMS_ENABLED                       0x00200000
+
+	/*  Enable RJ45 magjack pair swapping on 10GBase-T PHY, 84833 only */
+	#define PORT_HW_CFG_RJ45_PR_SWP_MASK                0x00400000
+	#define PORT_HW_CFG_RJ45_PR_SWP_SHIFT			     22
+	#define PORT_HW_CFG_RJ45_PR_SWP_DISABLED		     0x00000000
+	#define PORT_HW_CFG_RJ45_PR_SWP_ENABLED			     0x00400000
+
+	/*  Determine the Serdes electrical interface   */
+	#define PORT_HW_CFG_NET_SERDES_IF_MASK              0x0F000000
+	#define PORT_HW_CFG_NET_SERDES_IF_SHIFT                      24
+	#define PORT_HW_CFG_NET_SERDES_IF_SGMII                      0x00000000
+	#define PORT_HW_CFG_NET_SERDES_IF_XFI                        0x01000000
+	#define PORT_HW_CFG_NET_SERDES_IF_SFI                        0x02000000
+	#define PORT_HW_CFG_NET_SERDES_IF_KR                         0x03000000
+	#define PORT_HW_CFG_NET_SERDES_IF_DXGXS                      0x04000000
+	#define PORT_HW_CFG_NET_SERDES_IF_KR2                        0x05000000
+
+
+	u32 speed_capability_mask2;			    /* 0x28C */
+	#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK       0x0000FFFF
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT       0
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10M_FULL    0x00000001
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3__           0x00000002
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3___          0x00000004
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_100M_FULL   0x00000008
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_1G          0x00000010
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_2_DOT_5G    0x00000020
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_10G         0x00000040
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D3_20G         0x00000080
+
+	#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_MASK       0xFFFF0000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_SHIFT       16
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10M_FULL    0x00010000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0__           0x00020000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0___          0x00040000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_100M_FULL   0x00080000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_1G          0x00100000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_2_DOT_5G    0x00200000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_10G         0x00400000
+		#define PORT_HW_CFG_SPEED_CAPABILITY2_D0_20G         0x00800000
+
+
+	/*  In the case where two media types (e.g. copper and fiber) are
+	      present and electrically active at the same time, PHY Selection
+	      will determine which of the two PHYs will be designated as the
+	      Active PHY and used for a connection to the network.  */
+	u32 multi_phy_config;				    /* 0x290 */
+	#define PORT_HW_CFG_PHY_SELECTION_MASK              0x00000007
+		#define PORT_HW_CFG_PHY_SELECTION_SHIFT              0
+		#define PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT   0x00000000
+		#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY          0x00000001
+		#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY         0x00000002
+		#define PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY 0x00000003
+		#define PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY 0x00000004
+
+	/*  When enabled, all second phy nvram parameters will be swapped
+	      with the first phy parameters */
+	#define PORT_HW_CFG_PHY_SWAPPED_MASK                0x00000008
+		#define PORT_HW_CFG_PHY_SWAPPED_SHIFT                3
+		#define PORT_HW_CFG_PHY_SWAPPED_DISABLED             0x00000000
+		#define PORT_HW_CFG_PHY_SWAPPED_ENABLED              0x00000008
+
+
+	/*  Address of the second external phy */
+	u32 external_phy_config2;			    /* 0x294 */
+	#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_MASK         0x000000FF
+	#define PORT_HW_CFG_XGXS_EXT_PHY2_ADDR_SHIFT                 0
+
+	/*  The second XGXS external PHY type */
+	#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_MASK         0x0000FF00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SHIFT         8
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_DIRECT        0x00000000
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8071       0x00000100
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8072       0x00000200
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8073       0x00000300
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8705       0x00000400
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8706       0x00000500
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8726       0x00000600
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8481       0x00000700
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_SFX7101       0x00000800
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727       0x00000900
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8727_NOC   0x00000a00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84823      0x00000b00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54640      0x00000c00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM84833      0x00000d00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM54618SE    0x00000e00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_BCM8722       0x00000f00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_FAILURE       0x0000fd00
+		#define PORT_HW_CFG_XGXS_EXT_PHY2_TYPE_NOT_CONN      0x0000ff00
+
+
+	/*  4 times 16 bits for all 4 lanes. For some external PHYs (such as
+	      8706, 8726 and 8727) not all 4 values are needed. */
+	u16 xgxs_config2_rx[4];				    /* 0x296 */
+	u16 xgxs_config2_tx[4];				    /* 0x2A0 */
+
+	u32 lane_config;
+	#define PORT_HW_CFG_LANE_SWAP_CFG_MASK              0x0000ffff
+		#define PORT_HW_CFG_LANE_SWAP_CFG_SHIFT              0
+		/* AN and forced */
+		#define PORT_HW_CFG_LANE_SWAP_CFG_01230123           0x00001b1b
+		/* forced only */
+		#define PORT_HW_CFG_LANE_SWAP_CFG_01233210           0x00001be4
+		/* forced only */
+		#define PORT_HW_CFG_LANE_SWAP_CFG_31203120           0x0000d8d8
+		/* forced only */
+		#define PORT_HW_CFG_LANE_SWAP_CFG_32103210           0x0000e4e4
+	#define PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK           0x000000ff
+	#define PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT                   0
+	#define PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK           0x0000ff00
+	#define PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT                   8
+	#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK       0x0000c000
+	#define PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT               14
+
+	/*  Indicate whether to swap the external phy polarity */
+	#define PORT_HW_CFG_SWAP_PHY_POLARITY_MASK          0x00010000
+		#define PORT_HW_CFG_SWAP_PHY_POLARITY_DISABLED       0x00000000
+		#define PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED        0x00010000
+
+
+	u32 external_phy_config;
+	#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK          0x000000ff
+	#define PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT                  0
+
+	#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK          0x0000ff00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SHIFT          8
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT         0x00000000
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8071        0x00000100
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072        0x00000200
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073        0x00000300
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705        0x00000400
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706        0x00000500
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726        0x00000600
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481        0x00000700
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101        0x00000800
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727        0x00000900
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC    0x00000a00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823       0x00000b00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54640       0x00000c00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833       0x00000d00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE     0x00000e00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722        0x00000f00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT_WC      0x0000fc00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE        0x0000fd00
+		#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN       0x0000ff00
+
+	#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK        0x00ff0000
+	#define PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT                16
+
+	#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK        0xff000000
+		#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_SHIFT        24
+		#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT       0x00000000
+		#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482      0x01000000
+		#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD    0x02000000
+		#define PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN     0xff000000
+
+	u32 speed_capability_mask;
+	#define PORT_HW_CFG_SPEED_CAPABILITY_D3_MASK        0x0000ffff
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_SHIFT        0
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_FULL     0x00000001
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10M_HALF     0x00000002
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_HALF    0x00000004
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_100M_FULL    0x00000008
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_1G           0x00000010
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_2_5G         0x00000020
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_10G          0x00000040
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_20G          0x00000080
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D3_RESERVED     0x0000f000
+
+	#define PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK        0xffff0000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_SHIFT        16
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL     0x00010000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF     0x00020000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF    0x00040000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL    0x00080000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_1G           0x00100000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G         0x00200000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_10G          0x00400000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_20G          0x00800000
+		#define PORT_HW_CFG_SPEED_CAPABILITY_D0_RESERVED     0xf0000000
+
+	/*  A place to hold the original MAC address as a backup */
+	u32 backup_mac_upper;			/* 0x2B4 */
+	u32 backup_mac_lower;			/* 0x2B8 */
+
+};
+
+
+/****************************************************************************
+ * Shared Feature configuration                                             *
+ ****************************************************************************/
+struct shared_feat_cfg {		 /* NVRAM Offset */
+
+	u32 config;			/* 0x450 */
+	#define SHARED_FEATURE_BMC_ECHO_MODE_EN             0x00000001
+
+	/* Use NVRAM values instead of HW default values */
+	#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_MASK \
+							    0x00000002
+		#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED \
+								     0x00000000
+		#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED \
+								     0x00000002
+
+	#define SHARED_FEAT_CFG_NCSI_ID_METHOD_MASK         0x00000008
+		#define SHARED_FEAT_CFG_NCSI_ID_METHOD_SPIO          0x00000000
+		#define SHARED_FEAT_CFG_NCSI_ID_METHOD_NVRAM         0x00000008
+
+	#define SHARED_FEAT_CFG_NCSI_ID_MASK                0x00000030
+	#define SHARED_FEAT_CFG_NCSI_ID_SHIFT                        4
+
+	/*  Override the OTP back to single function mode. When using GPIO,
+	      high means only SF, 0 is according to CLP configuration */
+	#define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK          0x00000700
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT          8
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED     0x00000000
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF      0x00000100
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4          0x00000200
+		#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT  0x00000300
+
+	/* The interval in seconds between sending LLDP packets. Set to zero
+	   to disable the feature */
+	#define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_MASK     0x00ff0000
+	#define SHARED_FEAT_CFG_LLDP_XMIT_INTERVAL_SHIFT             16
+
+	/* The assigned device type ID for LLDP usage */
+	#define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_MASK    0xff000000
+	#define SHARED_FEAT_CFG_LLDP_DEVICE_TYPE_ID_SHIFT            24
+
+};
+
+
+/****************************************************************************
+ * Port Feature configuration                                               *
+ ****************************************************************************/
+struct port_feat_cfg {		    /* port 0: 0x454  port 1: 0x4c8 */
+
+	u32 config;
+	#define PORT_FEATURE_BAR1_SIZE_MASK                 0x0000000f
+		#define PORT_FEATURE_BAR1_SIZE_SHIFT                 0
+		#define PORT_FEATURE_BAR1_SIZE_DISABLED              0x00000000
+		#define PORT_FEATURE_BAR1_SIZE_64K                   0x00000001
+		#define PORT_FEATURE_BAR1_SIZE_128K                  0x00000002
+		#define PORT_FEATURE_BAR1_SIZE_256K                  0x00000003
+		#define PORT_FEATURE_BAR1_SIZE_512K                  0x00000004
+		#define PORT_FEATURE_BAR1_SIZE_1M                    0x00000005
+		#define PORT_FEATURE_BAR1_SIZE_2M                    0x00000006
+		#define PORT_FEATURE_BAR1_SIZE_4M                    0x00000007
+		#define PORT_FEATURE_BAR1_SIZE_8M                    0x00000008
+		#define PORT_FEATURE_BAR1_SIZE_16M                   0x00000009
+		#define PORT_FEATURE_BAR1_SIZE_32M                   0x0000000a
+		#define PORT_FEATURE_BAR1_SIZE_64M                   0x0000000b
+		#define PORT_FEATURE_BAR1_SIZE_128M                  0x0000000c
+		#define PORT_FEATURE_BAR1_SIZE_256M                  0x0000000d
+		#define PORT_FEATURE_BAR1_SIZE_512M                  0x0000000e
+		#define PORT_FEATURE_BAR1_SIZE_1G                    0x0000000f
+	#define PORT_FEATURE_BAR2_SIZE_MASK                 0x000000f0
+		#define PORT_FEATURE_BAR2_SIZE_SHIFT                 4
+		#define PORT_FEATURE_BAR2_SIZE_DISABLED              0x00000000
+		#define PORT_FEATURE_BAR2_SIZE_64K                   0x00000010
+		#define PORT_FEATURE_BAR2_SIZE_128K                  0x00000020
+		#define PORT_FEATURE_BAR2_SIZE_256K                  0x00000030
+		#define PORT_FEATURE_BAR2_SIZE_512K                  0x00000040
+		#define PORT_FEATURE_BAR2_SIZE_1M                    0x00000050
+		#define PORT_FEATURE_BAR2_SIZE_2M                    0x00000060
+		#define PORT_FEATURE_BAR2_SIZE_4M                    0x00000070
+		#define PORT_FEATURE_BAR2_SIZE_8M                    0x00000080
+		#define PORT_FEATURE_BAR2_SIZE_16M                   0x00000090
+		#define PORT_FEATURE_BAR2_SIZE_32M                   0x000000a0
+		#define PORT_FEATURE_BAR2_SIZE_64M                   0x000000b0
+		#define PORT_FEATURE_BAR2_SIZE_128M                  0x000000c0
+		#define PORT_FEATURE_BAR2_SIZE_256M                  0x000000d0
+		#define PORT_FEATURE_BAR2_SIZE_512M                  0x000000e0
+		#define PORT_FEATURE_BAR2_SIZE_1G                    0x000000f0
+
+	#define PORT_FEAT_CFG_DCBX_MASK                     0x00000100
+		#define PORT_FEAT_CFG_DCBX_DISABLED                  0x00000000
+		#define PORT_FEAT_CFG_DCBX_ENABLED                   0x00000100
+
+	#define PORT_FEAT_CFG_AUTOGREEN_MASK                0x00000200
+	#define PORT_FEAT_CFG_AUTOGREEN_SHIFT                        9
+	#define PORT_FEAT_CFG_AUTOGREEN_DISABLED                     0x00000000
+	#define PORT_FEAT_CFG_AUTOGREEN_ENABLED                      0x00000200
+
+	#define PORT_FEATURE_EN_SIZE_MASK                   0x0f000000
+	#define PORT_FEATURE_EN_SIZE_SHIFT                           24
+	#define PORT_FEATURE_WOL_ENABLED                             0x01000000
+	#define PORT_FEATURE_MBA_ENABLED                             0x02000000
+	#define PORT_FEATURE_MFW_ENABLED                             0x04000000
+
+	/* Advertise expansion ROM even if MBA is disabled */
+	#define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_MASK        0x08000000
+		#define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_DISABLED     0x00000000
+		#define PORT_FEAT_CFG_FORCE_EXP_ROM_ADV_ENABLED      0x08000000
+
+	/* Check the optic vendor via i2c against a list of approved modules
+	   in a separate nvram image */
+	#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK         0xe0000000
+		#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_SHIFT         29
+		#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT \
+								     0x00000000
+		#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER \
+								     0x20000000
+		#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_WARNING_MSG   0x40000000
+		#define PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN    0x60000000
+
+	u32 wol_config;
+	/* Default is used when driver sets to "auto" mode */
+	#define PORT_FEATURE_WOL_DEFAULT_MASK               0x00000003
+		#define PORT_FEATURE_WOL_DEFAULT_SHIFT               0
+		#define PORT_FEATURE_WOL_DEFAULT_DISABLE             0x00000000
+		#define PORT_FEATURE_WOL_DEFAULT_MAGIC               0x00000001
+		#define PORT_FEATURE_WOL_DEFAULT_ACPI                0x00000002
+		#define PORT_FEATURE_WOL_DEFAULT_MAGIC_AND_ACPI      0x00000003
+	#define PORT_FEATURE_WOL_RES_PAUSE_CAP              0x00000004
+	#define PORT_FEATURE_WOL_RES_ASYM_PAUSE_CAP         0x00000008
+	#define PORT_FEATURE_WOL_ACPI_UPON_MGMT             0x00000010
+
+	u32 mba_config;
+	#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK       0x00000007
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_SHIFT       0
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE         0x00000000
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_RPL         0x00000001
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_BOOTP       0x00000002
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB      0x00000003
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT   0x00000004
+		#define PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE        0x00000007
+
+	#define PORT_FEATURE_MBA_BOOT_RETRY_MASK            0x00000038
+	#define PORT_FEATURE_MBA_BOOT_RETRY_SHIFT                    3
+
+	#define PORT_FEATURE_MBA_RES_PAUSE_CAP              0x00000100
+	#define PORT_FEATURE_MBA_RES_ASYM_PAUSE_CAP         0x00000200
+	#define PORT_FEATURE_MBA_SETUP_PROMPT_ENABLE        0x00000400
+	#define PORT_FEATURE_MBA_HOTKEY_MASK                0x00000800
+		#define PORT_FEATURE_MBA_HOTKEY_CTRL_S               0x00000000
+		#define PORT_FEATURE_MBA_HOTKEY_CTRL_B               0x00000800
+	#define PORT_FEATURE_MBA_EXP_ROM_SIZE_MASK          0x000ff000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_SHIFT          12
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_DISABLED       0x00000000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2K             0x00001000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4K             0x00002000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8K             0x00003000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16K            0x00004000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32K            0x00005000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_64K            0x00006000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_128K           0x00007000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_256K           0x00008000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_512K           0x00009000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_1M             0x0000a000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_2M             0x0000b000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_4M             0x0000c000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_8M             0x0000d000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_16M            0x0000e000
+		#define PORT_FEATURE_MBA_EXP_ROM_SIZE_32M            0x0000f000
+	#define PORT_FEATURE_MBA_MSG_TIMEOUT_MASK           0x00f00000
+	#define PORT_FEATURE_MBA_MSG_TIMEOUT_SHIFT                   20
+	#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_MASK        0x03000000
+		#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_SHIFT        24
+		#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_AUTO         0x00000000
+		#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_BBS          0x01000000
+		#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT18H       0x02000000
+		#define PORT_FEATURE_MBA_BIOS_BOOTSTRAP_INT19H       0x03000000
+	#define PORT_FEATURE_MBA_LINK_SPEED_MASK            0x3c000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_SHIFT            26
+		#define PORT_FEATURE_MBA_LINK_SPEED_AUTO             0x00000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_10HD             0x04000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_10FD             0x08000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_100HD            0x0c000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_100FD            0x10000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_1GBPS            0x14000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_2_5GBPS          0x18000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_10GBPS_CX4       0x1c000000
+		#define PORT_FEATURE_MBA_LINK_SPEED_20GBPS           0x20000000
+	u32 bmc_config;
+	#define PORT_FEATURE_BMC_LINK_OVERRIDE_MASK         0x00000001
+		#define PORT_FEATURE_BMC_LINK_OVERRIDE_DEFAULT       0x00000000
+		#define PORT_FEATURE_BMC_LINK_OVERRIDE_EN            0x00000001
+
+	u32 mba_vlan_cfg;
+	#define PORT_FEATURE_MBA_VLAN_TAG_MASK              0x0000ffff
+	#define PORT_FEATURE_MBA_VLAN_TAG_SHIFT                      0
+	#define PORT_FEATURE_MBA_VLAN_EN                    0x00010000
+
+	u32 resource_cfg;
+	#define PORT_FEATURE_RESOURCE_CFG_VALID             0x00000001
+	#define PORT_FEATURE_RESOURCE_CFG_DIAG              0x00000002
+	#define PORT_FEATURE_RESOURCE_CFG_L2                0x00000004
+	#define PORT_FEATURE_RESOURCE_CFG_ISCSI             0x00000008
+	#define PORT_FEATURE_RESOURCE_CFG_RDMA              0x00000010
+
+	u32 smbus_config;
+	#define PORT_FEATURE_SMBUS_ADDR_MASK                0x000000fe
+	#define PORT_FEATURE_SMBUS_ADDR_SHIFT                        1
+
+	u32 vf_config;
+	#define PORT_FEAT_CFG_VF_BAR2_SIZE_MASK             0x0000000f
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_SHIFT             0
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_DISABLED          0x00000000
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_4K                0x00000001
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_8K                0x00000002
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_16K               0x00000003
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_32K               0x00000004
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_64K               0x00000005
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_128K              0x00000006
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_256K              0x00000007
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_512K              0x00000008
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_1M                0x00000009
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_2M                0x0000000a
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_4M                0x0000000b
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_8M                0x0000000c
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_16M               0x0000000d
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_32M               0x0000000e
+		#define PORT_FEAT_CFG_VF_BAR2_SIZE_64M               0x0000000f
+
+	u32 link_config;    /* Used as HW defaults for the driver */
+	#define PORT_FEATURE_CONNECTED_SWITCH_MASK          0x03000000
+		#define PORT_FEATURE_CONNECTED_SWITCH_SHIFT          24
+		/* (forced) low speed switch (< 10G) */
+		#define PORT_FEATURE_CON_SWITCH_1G_SWITCH            0x00000000
+		/* (forced) high speed switch (>= 10G) */
+		#define PORT_FEATURE_CON_SWITCH_10G_SWITCH           0x01000000
+		#define PORT_FEATURE_CON_SWITCH_AUTO_DETECT          0x02000000
+		#define PORT_FEATURE_CON_SWITCH_ONE_TIME_DETECT      0x03000000
+
+	#define PORT_FEATURE_LINK_SPEED_MASK                0x000f0000
+		#define PORT_FEATURE_LINK_SPEED_SHIFT                16
+		#define PORT_FEATURE_LINK_SPEED_AUTO                 0x00000000
+		#define PORT_FEATURE_LINK_SPEED_10M_FULL             0x00010000
+		#define PORT_FEATURE_LINK_SPEED_10M_HALF             0x00020000
+		#define PORT_FEATURE_LINK_SPEED_100M_HALF            0x00030000
+		#define PORT_FEATURE_LINK_SPEED_100M_FULL            0x00040000
+		#define PORT_FEATURE_LINK_SPEED_1G                   0x00050000
+		#define PORT_FEATURE_LINK_SPEED_2_5G                 0x00060000
+		#define PORT_FEATURE_LINK_SPEED_10G_CX4              0x00070000
+		#define PORT_FEATURE_LINK_SPEED_20G                  0x00080000
+
+	#define PORT_FEATURE_FLOW_CONTROL_MASK              0x00000700
+		#define PORT_FEATURE_FLOW_CONTROL_SHIFT              8
+		#define PORT_FEATURE_FLOW_CONTROL_AUTO               0x00000000
+		#define PORT_FEATURE_FLOW_CONTROL_TX                 0x00000100
+		#define PORT_FEATURE_FLOW_CONTROL_RX                 0x00000200
+		#define PORT_FEATURE_FLOW_CONTROL_BOTH               0x00000300
+		#define PORT_FEATURE_FLOW_CONTROL_NONE               0x00000400
+
+	/* The default for MCP link configuration,
+	   uses the same defines as link_config */
+	u32 mfw_wol_link_cfg;
+
+	/* The default for the driver of the second external phy,
+	   uses the same defines as link_config */
+	u32 link_config2;				    /* 0x47C */
+
+	/* The default for MCP of the second external phy,
+	   uses the same defines as link_config */
+	u32 mfw_wol_link_cfg2;				    /* 0x480 */
+
+	u32 Reserved2[17];				    /* 0x484 */
+
+};
+
+
+/****************************************************************************
+ * Device Information                                                       *
+ ****************************************************************************/
+struct shm_dev_info {				/* size */
+
+	u32    bc_rev; /* 8 bits each: major, minor, build */	       /* 4 */
+
+	struct shared_hw_cfg     shared_hw_config;	      /* 40 */
+
+	struct port_hw_cfg       port_hw_config[PORT_MAX];     /* 400*2=800 */
+
+	struct shared_feat_cfg   shared_feature_config;		   /* 4 */
+
+	struct port_feat_cfg     port_feature_config[PORT_MAX];/* 116*2=232 */
+
+};
+
+
+#if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN)
+	#error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition."
+#endif
+
+#define FUNC_0              0
+#define FUNC_1              1
+#define FUNC_2              2
+#define FUNC_3              3
+#define FUNC_4              4
+#define FUNC_5              5
+#define FUNC_6              6
+#define FUNC_7              7
+#define E1_FUNC_MAX         2
+#define E1H_FUNC_MAX            8
+#define E2_FUNC_MAX         4   /* per path */
+
+#define VN_0                0
+#define VN_1                1
+#define VN_2                2
+#define VN_3                3
+#define E1VN_MAX            1
+#define E1HVN_MAX           4
+
+#define E2_VF_MAX           64  /* HC_REG_VF_CONFIGURATION_SIZE */
+/* This value (in milliseconds) determines the frequency of the driver
+ * issuing the PULSE message code.  The firmware monitors this periodic
+ * pulse to determine when to switch to an OS-absent mode. */
+#define DRV_PULSE_PERIOD_MS     250
+
+/* This value (in milliseconds) determines how long the driver should
+ * wait for an acknowledgement from the firmware before timing out.  Once
+ * the firmware has timed out, the driver will assume there is no firmware
+ * running and there won't be any firmware-driver synchronization during a
+ * driver reset. */
+#define FW_ACK_TIME_OUT_MS      5000
+
+#define FW_ACK_POLL_TIME_MS     1
+
+#define FW_ACK_NUM_OF_POLL  (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
+
+/* LED Blink rate that will achieve ~15.9Hz */
+#define LED_BLINK_RATE_VAL      480
+
+/****************************************************************************
+ * Driver <-> FW Mailbox                                                    *
+ ****************************************************************************/
+struct drv_port_mb {
+
+	u32 link_status;
+	/* Driver should update this field on any link change event */
+
+	#define LINK_STATUS_LINK_FLAG_MASK			0x00000001
+	#define LINK_STATUS_LINK_UP				0x00000001
+	#define LINK_STATUS_SPEED_AND_DUPLEX_MASK		0x0000001E
+	#define LINK_STATUS_SPEED_AND_DUPLEX_AN_NOT_COMPLETE	(0<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_10THD		(1<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_10TFD		(2<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_100TXHD		(3<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_100T4		(4<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_100TXFD		(5<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD		(6<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD		(7<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_1000XFD		(7<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_2500THD		(8<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_2500TFD		(9<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_2500XFD		(9<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_10GTFD		(10<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_10GXFD		(10<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_20GTFD		(11<<1)
+	#define LINK_STATUS_SPEED_AND_DUPLEX_20GXFD		(11<<1)
+
+	#define LINK_STATUS_AUTO_NEGOTIATE_FLAG_MASK		0x00000020
+	#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED		0x00000020
+
+	#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE		0x00000040
+	#define LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK	0x00000080
+	#define LINK_STATUS_PARALLEL_DETECTION_USED		0x00000080
+
+	#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE	0x00000200
+	#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE	0x00000400
+	#define LINK_STATUS_LINK_PARTNER_100T4_CAPABLE		0x00000800
+	#define LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE	0x00001000
+	#define LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE	0x00002000
+	#define LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE		0x00004000
+	#define LINK_STATUS_LINK_PARTNER_10THD_CAPABLE		0x00008000
+
+	#define LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK		0x00010000
+	#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED		0x00010000
+
+	#define LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK		0x00020000
+	#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED		0x00020000
+
+	#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK	0x000C0000
+	#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE	(0<<18)
+	#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE	(1<<18)
+	#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE	(2<<18)
+	#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE		(3<<18)
+
+	#define LINK_STATUS_SERDES_LINK				0x00100000
+
+	#define LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE	0x00200000
+	#define LINK_STATUS_LINK_PARTNER_2500XHD_CAPABLE	0x00400000
+	#define LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE		0x00800000
+	#define LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE		0x10000000
+
+	#define LINK_STATUS_PFC_ENABLED				0x20000000
+
+	#define LINK_STATUS_PHYSICAL_LINK_FLAG			0x40000000
+
+	u32 port_stx;
+
+	u32 stat_nig_timer;
+
+	/* MCP firmware does not use this field */
+	u32 ext_phy_fw_version;
+
+};
+
+
+struct drv_func_mb {
+
+	u32 drv_mb_header;
+	#define DRV_MSG_CODE_MASK                       0xffff0000
+	#define DRV_MSG_CODE_LOAD_REQ                   0x10000000
+	#define DRV_MSG_CODE_LOAD_DONE                  0x11000000
+	#define DRV_MSG_CODE_UNLOAD_REQ_WOL_EN          0x20000000
+	#define DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS         0x20010000
+	#define DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP         0x20020000
+	#define DRV_MSG_CODE_UNLOAD_DONE                0x21000000
+	#define DRV_MSG_CODE_DCC_OK                     0x30000000
+	#define DRV_MSG_CODE_DCC_FAILURE                0x31000000
+	#define DRV_MSG_CODE_DIAG_ENTER_REQ             0x50000000
+	#define DRV_MSG_CODE_DIAG_EXIT_REQ              0x60000000
+	#define DRV_MSG_CODE_VALIDATE_KEY               0x70000000
+	#define DRV_MSG_CODE_GET_CURR_KEY               0x80000000
+	#define DRV_MSG_CODE_GET_UPGRADE_KEY            0x81000000
+	#define DRV_MSG_CODE_GET_MANUF_KEY              0x82000000
+	#define DRV_MSG_CODE_LOAD_L2B_PRAM              0x90000000
+	/*
+	 * The optic module verification command requires bootcode
+	 * v5.0.6 or later, te specific optic module verification command
+	 * requires bootcode v5.2.12 or later
+	 */
+	#define DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL     0xa0000000
+	#define REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL     0x00050006
+	#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL  0xa1000000
+	#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL  0x00050234
+	#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED   0x00070014
+
+	#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG         0xb0000000
+	#define DRV_MSG_CODE_DCBX_PMF_DRV_OK            0xb2000000
+
+	#define DRV_MSG_CODE_VF_DISABLED_DONE           0xc0000000
+
+	#define DRV_MSG_CODE_SET_MF_BW                  0xe0000000
+	#define REQ_BC_VER_4_SET_MF_BW                  0x00060202
+	#define DRV_MSG_CODE_SET_MF_BW_ACK              0xe1000000
+
+	#define DRV_MSG_CODE_LINK_STATUS_CHANGED        0x01000000
+
+	#define BIOS_MSG_CODE_LIC_CHALLENGE             0xff010000
+	#define BIOS_MSG_CODE_LIC_RESPONSE              0xff020000
+	#define BIOS_MSG_CODE_VIRT_MAC_PRIM             0xff030000
+	#define BIOS_MSG_CODE_VIRT_MAC_ISCSI            0xff040000
+
+	#define DRV_MSG_SEQ_NUMBER_MASK                 0x0000ffff
+
+	u32 drv_mb_param;
+	#define DRV_MSG_CODE_SET_MF_BW_MIN_MASK         0x00ff0000
+	#define DRV_MSG_CODE_SET_MF_BW_MAX_MASK         0xff000000
+
+	u32 fw_mb_header;
+	#define FW_MSG_CODE_MASK                        0xffff0000
+	#define FW_MSG_CODE_DRV_LOAD_COMMON             0x10100000
+	#define FW_MSG_CODE_DRV_LOAD_PORT               0x10110000
+	#define FW_MSG_CODE_DRV_LOAD_FUNCTION           0x10120000
+	/* Load common chip is supported from bc 6.0.0  */
+	#define REQ_BC_VER_4_DRV_LOAD_COMMON_CHIP       0x00060000
+	#define FW_MSG_CODE_DRV_LOAD_COMMON_CHIP        0x10130000
+
+	#define FW_MSG_CODE_DRV_LOAD_REFUSED            0x10200000
+	#define FW_MSG_CODE_DRV_LOAD_DONE               0x11100000
+	#define FW_MSG_CODE_DRV_UNLOAD_COMMON           0x20100000
+	#define FW_MSG_CODE_DRV_UNLOAD_PORT             0x20110000
+	#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION         0x20120000
+	#define FW_MSG_CODE_DRV_UNLOAD_DONE             0x21100000
+	#define FW_MSG_CODE_DCC_DONE                    0x30100000
+	#define FW_MSG_CODE_LLDP_DONE                   0x40100000
+	#define FW_MSG_CODE_DIAG_ENTER_DONE             0x50100000
+	#define FW_MSG_CODE_DIAG_REFUSE                 0x50200000
+	#define FW_MSG_CODE_DIAG_EXIT_DONE              0x60100000
+	#define FW_MSG_CODE_VALIDATE_KEY_SUCCESS        0x70100000
+	#define FW_MSG_CODE_VALIDATE_KEY_FAILURE        0x70200000
+	#define FW_MSG_CODE_GET_KEY_DONE                0x80100000
+	#define FW_MSG_CODE_NO_KEY                      0x80f00000
+	#define FW_MSG_CODE_LIC_INFO_NOT_READY          0x80f80000
+	#define FW_MSG_CODE_L2B_PRAM_LOADED             0x90100000
+	#define FW_MSG_CODE_L2B_PRAM_T_LOAD_FAILURE     0x90210000
+	#define FW_MSG_CODE_L2B_PRAM_C_LOAD_FAILURE     0x90220000
+	#define FW_MSG_CODE_L2B_PRAM_X_LOAD_FAILURE     0x90230000
+	#define FW_MSG_CODE_L2B_PRAM_U_LOAD_FAILURE     0x90240000
+	#define FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS        0xa0100000
+	#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG      0xa0200000
+	#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED     0xa0300000
+	#define FW_MSG_CODE_VF_DISABLED_DONE            0xb0000000
+
+	#define FW_MSG_CODE_SET_MF_BW_SENT              0xe0000000
+	#define FW_MSG_CODE_SET_MF_BW_DONE              0xe1000000
+
+	#define FW_MSG_CODE_LINK_CHANGED_ACK            0x01100000
+
+	#define FW_MSG_CODE_LIC_CHALLENGE               0xff010000
+	#define FW_MSG_CODE_LIC_RESPONSE                0xff020000
+	#define FW_MSG_CODE_VIRT_MAC_PRIM               0xff030000
+	#define FW_MSG_CODE_VIRT_MAC_ISCSI              0xff040000
+
+	#define FW_MSG_SEQ_NUMBER_MASK                  0x0000ffff
+
+	u32 fw_mb_param;
+
+	u32 drv_pulse_mb;
+	#define DRV_PULSE_SEQ_MASK                      0x00007fff
+	#define DRV_PULSE_SYSTEM_TIME_MASK              0xffff0000
+	/*
+	 * The system time is in the format of
+	 * (year-2001)*12*32 + month*32 + day.
+	 */
+	#define DRV_PULSE_ALWAYS_ALIVE                  0x00008000
+	/*
+	 * Indicate to the firmware not to go into the
+	 * OS-absent when it is not getting driver pulse.
+	 * This is used for debugging as well for PXE(MBA).
+	 */
+
+	u32 mcp_pulse_mb;
+	#define MCP_PULSE_SEQ_MASK                      0x00007fff
+	#define MCP_PULSE_ALWAYS_ALIVE                  0x00008000
+	/* Indicates to the driver not to assert due to lack
+	 * of MCP response */
+	#define MCP_EVENT_MASK                          0xffff0000
+	#define MCP_EVENT_OTHER_DRIVER_RESET_REQ        0x00010000
+
+	u32 iscsi_boot_signature;
+	u32 iscsi_boot_block_offset;
+
+	u32 drv_status;
+	#define DRV_STATUS_PMF                          0x00000001
+	#define DRV_STATUS_VF_DISABLED                  0x00000002
+	#define DRV_STATUS_SET_MF_BW                    0x00000004
+	#define DRV_STATUS_LINK_EVENT                   0x00000008
+
+	#define DRV_STATUS_DCC_EVENT_MASK               0x0000ff00
+	#define DRV_STATUS_DCC_DISABLE_ENABLE_PF        0x00000100
+	#define DRV_STATUS_DCC_BANDWIDTH_ALLOCATION     0x00000200
+	#define DRV_STATUS_DCC_CHANGE_MAC_ADDRESS       0x00000400
+	#define DRV_STATUS_DCC_RESERVED1                0x00000800
+	#define DRV_STATUS_DCC_SET_PROTOCOL             0x00001000
+	#define DRV_STATUS_DCC_SET_PRIORITY             0x00002000
+
+	#define DRV_STATUS_DCBX_EVENT_MASK              0x000f0000
+	#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS     0x00010000
+
+	u32 virt_mac_upper;
+	#define VIRT_MAC_SIGN_MASK                      0xffff0000
+	#define VIRT_MAC_SIGNATURE                      0x564d0000
+	u32 virt_mac_lower;
+
+};
+
+
+/****************************************************************************
+ * Management firmware state                                                *
+ ****************************************************************************/
+/* Allocate 440 bytes for management firmware */
+#define MGMTFW_STATE_WORD_SIZE                          110
+
+struct mgmtfw_state {
+	u32 opaque[MGMTFW_STATE_WORD_SIZE];
+};
+
+
+/****************************************************************************
+ * Multi-Function configuration                                             *
+ ****************************************************************************/
+struct shared_mf_cfg {
+
+	u32 clp_mb;
+	#define SHARED_MF_CLP_SET_DEFAULT               0x00000000
+	/* set by CLP */
+	#define SHARED_MF_CLP_EXIT                      0x00000001
+	/* set by MCP */
+	#define SHARED_MF_CLP_EXIT_DONE                 0x00010000
+
+};
+
+struct port_mf_cfg {
+
+	u32 dynamic_cfg;    /* device control channel */
+	#define PORT_MF_CFG_E1HOV_TAG_MASK              0x0000ffff
+	#define PORT_MF_CFG_E1HOV_TAG_SHIFT             0
+	#define PORT_MF_CFG_E1HOV_TAG_DEFAULT         PORT_MF_CFG_E1HOV_TAG_MASK
+
+	u32 reserved[3];
+
+};
+
+struct func_mf_cfg {
+
+	u32 config;
+	/* E/R/I/D */
+	/* function 0 of each port cannot be hidden */
+	#define FUNC_MF_CFG_FUNC_HIDE                   0x00000001
+
+	#define FUNC_MF_CFG_PROTOCOL_MASK               0x00000006
+	#define FUNC_MF_CFG_PROTOCOL_FCOE               0x00000000
+	#define FUNC_MF_CFG_PROTOCOL_ETHERNET           0x00000002
+	#define FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA 0x00000004
+	#define FUNC_MF_CFG_PROTOCOL_ISCSI              0x00000006
+	#define FUNC_MF_CFG_PROTOCOL_DEFAULT \
+				FUNC_MF_CFG_PROTOCOL_ETHERNET_WITH_RDMA
+
+	#define FUNC_MF_CFG_FUNC_DISABLED               0x00000008
+	#define FUNC_MF_CFG_FUNC_DELETED                0x00000010
+
+	/* PRI */
+	/* 0 - low priority, 3 - high priority */
+	#define FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK      0x00000300
+	#define FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT     8
+	#define FUNC_MF_CFG_TRANSMIT_PRIORITY_DEFAULT   0x00000000
+
+	/* MINBW, MAXBW */
+	/* value range - 0..100, increments in 100Mbps */
+	#define FUNC_MF_CFG_MIN_BW_MASK                 0x00ff0000
+	#define FUNC_MF_CFG_MIN_BW_SHIFT                16
+	#define FUNC_MF_CFG_MIN_BW_DEFAULT              0x00000000
+	#define FUNC_MF_CFG_MAX_BW_MASK                 0xff000000
+	#define FUNC_MF_CFG_MAX_BW_SHIFT                24
+	#define FUNC_MF_CFG_MAX_BW_DEFAULT              0x64000000
+
+	u32 mac_upper;	    /* MAC */
+	#define FUNC_MF_CFG_UPPERMAC_MASK               0x0000ffff
+	#define FUNC_MF_CFG_UPPERMAC_SHIFT              0
+	#define FUNC_MF_CFG_UPPERMAC_DEFAULT           FUNC_MF_CFG_UPPERMAC_MASK
+	u32 mac_lower;
+	#define FUNC_MF_CFG_LOWERMAC_DEFAULT            0xffffffff
+
+	u32 e1hov_tag;	/* VNI */
+	#define FUNC_MF_CFG_E1HOV_TAG_MASK              0x0000ffff
+	#define FUNC_MF_CFG_E1HOV_TAG_SHIFT             0
+	#define FUNC_MF_CFG_E1HOV_TAG_DEFAULT         FUNC_MF_CFG_E1HOV_TAG_MASK
+
+	u32 reserved[2];
+};
+
+/* This structure is not applicable and should not be accessed on 57711 */
+struct func_ext_cfg {
+	u32 func_cfg;
+	#define MACP_FUNC_CFG_FLAGS_MASK                0x000000FF
+	#define MACP_FUNC_CFG_FLAGS_SHIFT               0
+	#define MACP_FUNC_CFG_FLAGS_ENABLED             0x00000001
+	#define MACP_FUNC_CFG_FLAGS_ETHERNET            0x00000002
+	#define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD       0x00000004
+	#define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD        0x00000008
+
+	u32 iscsi_mac_addr_upper;
+	u32 iscsi_mac_addr_lower;
+
+	u32 fcoe_mac_addr_upper;
+	u32 fcoe_mac_addr_lower;
+
+	u32 fcoe_wwn_port_name_upper;
+	u32 fcoe_wwn_port_name_lower;
+
+	u32 fcoe_wwn_node_name_upper;
+	u32 fcoe_wwn_node_name_lower;
+
+	u32 preserve_data;
+	#define MF_FUNC_CFG_PRESERVE_L2_MAC             (1<<0)
+	#define MF_FUNC_CFG_PRESERVE_ISCSI_MAC          (1<<1)
+	#define MF_FUNC_CFG_PRESERVE_FCOE_MAC           (1<<2)
+	#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P         (1<<3)
+	#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N         (1<<4)
+	#define MF_FUNC_CFG_PRESERVE_TX_BW              (1<<5)
+};
+
+struct mf_cfg {
+
+	struct shared_mf_cfg    shared_mf_config;       /* 0x4 */
+	struct port_mf_cfg  port_mf_config[PORT_MAX];   /* 0x10 * 2 = 0x20 */
+	/* for all chips, there are 8 mf functions */
+	struct func_mf_cfg  func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */
+	/*
+	 * Extended configuration per function  - this array does not exist and
+	 * should not be accessed on 57711
+	 */
+	struct func_ext_cfg func_ext_config[E1H_FUNC_MAX]; /* 0x28 * 8 = 0x140*/
+}; /* 0x224 */
+
+/****************************************************************************
+ * Shared Memory Region                                                     *
+ ****************************************************************************/
+struct shmem_region {		       /*   SharedMem Offset (size) */
+
+	u32         validity_map[PORT_MAX];  /* 0x0 (4*2 = 0x8) */
+	#define SHR_MEM_FORMAT_REV_MASK                     0xff000000
+	#define SHR_MEM_FORMAT_REV_ID                       ('A'<<24)
+	/* validity bits */
+	#define SHR_MEM_VALIDITY_PCI_CFG                    0x00100000
+	#define SHR_MEM_VALIDITY_MB                         0x00200000
+	#define SHR_MEM_VALIDITY_DEV_INFO                   0x00400000
+	#define SHR_MEM_VALIDITY_RESERVED                   0x00000007
+	/* One licensing bit should be set */
+	#define SHR_MEM_VALIDITY_LIC_KEY_IN_EFFECT_MASK     0x00000038
+	#define SHR_MEM_VALIDITY_LIC_MANUF_KEY_IN_EFFECT    0x00000008
+	#define SHR_MEM_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT  0x00000010
+	#define SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT       0x00000020
+	/* Active MFW */
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_UNKNOWN         0x00000000
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_MASK            0x000001c0
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_IPMI            0x00000040
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_UMP             0x00000080
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_NCSI            0x000000c0
+	#define SHR_MEM_VALIDITY_ACTIVE_MFW_NONE            0x000001c0
+
+	struct shm_dev_info dev_info;	     /* 0x8     (0x438) */
+
+	struct license_key       drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */
+
+	/* FW information (for internal FW use) */
+	u32         fw_info_fio_offset;		/* 0x4a8       (0x4) */
+	struct mgmtfw_state mgmtfw_state;	/* 0x4ac     (0x1b8) */
+
+	struct drv_port_mb  port_mb[PORT_MAX];	/* 0x664 (16*2=0x20) */
+
+#ifdef BMAPI
+	/* This is a variable length array */
+	/* the number of function depends on the chip type */
+	struct drv_func_mb func_mb[1];	/* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#else
+	/* the number of function depends on the chip type */
+	struct drv_func_mb  func_mb[];	/* 0x684 (44*2/4/8=0x58/0xb0/0x160) */
+#endif /* BMAPI */
+
+}; /* 57710 = 0x6dc | 57711 = 0x7E4 | 57712 = 0x734 */
+
+/****************************************************************************
+ * Shared Memory 2 Region                                                   *
+ ****************************************************************************/
+/* The fw_flr_ack is actually built in the following way:                   */
+/* 8 bit:  PF ack                                                           */
+/* 64 bit: VF ack                                                           */
+/* 8 bit:  ios_dis_ack                                                      */
+/* In order to maintain endianity in the mailbox hsi, we want to keep using */
+/* u32. The fw must have the VF right after the PF since this is how it     */
+/* access arrays(it expects always the VF to reside after the PF, and that  */
+/* makes the calculation much easier for it. )                              */
+/* In order to answer both limitations, and keep the struct small, the code */
+/* will abuse the structure defined here to achieve the actual partition    */
+/* above                                                                    */
+/****************************************************************************/
+struct fw_flr_ack {
+	u32         pf_ack;
+	u32         vf_ack[1];
+	u32         iov_dis_ack;
+};
+
+struct fw_flr_mb {
+	u32         aggint;
+	u32         opgen_addr;
+	struct fw_flr_ack ack;
+};
+
+/**** SUPPORT FOR SHMEM ARRRAYS ***
+ * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
+ * define arrays with storage types smaller then unsigned dwords.
+ * The macros below add generic support for SHMEM arrays with numeric elements
+ * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword
+ * array with individual bit-filed elements accessed using shifts and masks.
+ *
+ */
+
+/* eb is the bitwidth of a single element */
+#define SHMEM_ARRAY_MASK(eb)		((1<<(eb))-1)
+#define SHMEM_ARRAY_ENTRY(i, eb)	((i)/(32/(eb)))
+
+/* the bit-position macro allows the used to flip the order of the arrays
+ * elements on a per byte or word boundary.
+ *
+ * example: an array with 8 entries each 4 bit wide. This array will fit into
+ * a single dword. The diagrmas below show the array order of the nibbles.
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
+ *
+ *                |                |                |               |
+ *   0    |   1   |   2    |   3   |   4    |   5   |   6   |   7   |
+ *                |                |                |               |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte:
+ *
+ *                |                |                |               |
+ *   1   |   0    |   3    |   2   |   5    |   4   |   7   |   6   |
+ *                |                |                |               |
+ *
+ * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word:
+ *
+ *                |                |                |               |
+ *   3   |   2    |   1   |   0    |   7   |   6    |   5   |   4   |
+ *                |                |                |               |
+ */
+#define SHMEM_ARRAY_BITPOS(i, eb, fb)	\
+	((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
+	(((i)%((fb)/(eb))) * (eb)))
+
+#define SHMEM_ARRAY_GET(a, i, eb, fb)					\
+	((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) &  \
+	SHMEM_ARRAY_MASK(eb))
+
+#define SHMEM_ARRAY_SET(a, i, eb, fb, val)				\
+do {									   \
+	a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) <<	   \
+	SHMEM_ARRAY_BITPOS(i, eb, fb));					   \
+	a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) <<  \
+	SHMEM_ARRAY_BITPOS(i, eb, fb));					   \
+} while (0)
+
+
+/****START OF DCBX STRUCTURES DECLARATIONS****/
+#define DCBX_MAX_NUM_PRI_PG_ENTRIES	8
+#define DCBX_PRI_PG_BITWIDTH		4
+#define DCBX_PRI_PG_FBITS		8
+#define DCBX_PRI_PG_GET(a, i)		\
+	SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS)
+#define DCBX_PRI_PG_SET(a, i, val)	\
+	SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val)
+#define DCBX_MAX_NUM_PG_BW_ENTRIES	8
+#define DCBX_BW_PG_BITWIDTH		8
+#define DCBX_PG_BW_GET(a, i)		\
+	SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH)
+#define DCBX_PG_BW_SET(a, i, val)	\
+	SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val)
+#define DCBX_STRICT_PRI_PG		15
+#define DCBX_MAX_APP_PROTOCOL		16
+#define FCOE_APP_IDX			0
+#define ISCSI_APP_IDX			1
+#define PREDEFINED_APP_IDX_MAX		2
+
+
+/* Big/Little endian have the same representation. */
+struct dcbx_ets_feature {
+	/*
+	 * For Admin MIB - is this feature supported by the
+	 * driver | For Local MIB - should this feature be enabled.
+	 */
+	u32 enabled;
+	u32  pg_bw_tbl[2];
+	u32  pri_pg_tbl[1];
+};
+
+/* Driver structure in LE */
+struct dcbx_pfc_feature {
+#ifdef __BIG_ENDIAN
+	u8 pri_en_bitmap;
+	#define DCBX_PFC_PRI_0 0x01
+	#define DCBX_PFC_PRI_1 0x02
+	#define DCBX_PFC_PRI_2 0x04
+	#define DCBX_PFC_PRI_3 0x08
+	#define DCBX_PFC_PRI_4 0x10
+	#define DCBX_PFC_PRI_5 0x20
+	#define DCBX_PFC_PRI_6 0x40
+	#define DCBX_PFC_PRI_7 0x80
+	u8 pfc_caps;
+	u8 reserved;
+	u8 enabled;
+#elif defined(__LITTLE_ENDIAN)
+	u8 enabled;
+	u8 reserved;
+	u8 pfc_caps;
+	u8 pri_en_bitmap;
+	#define DCBX_PFC_PRI_0 0x01
+	#define DCBX_PFC_PRI_1 0x02
+	#define DCBX_PFC_PRI_2 0x04
+	#define DCBX_PFC_PRI_3 0x08
+	#define DCBX_PFC_PRI_4 0x10
+	#define DCBX_PFC_PRI_5 0x20
+	#define DCBX_PFC_PRI_6 0x40
+	#define DCBX_PFC_PRI_7 0x80
+#endif
+};
+
+struct dcbx_app_priority_entry {
+#ifdef __BIG_ENDIAN
+	u16  app_id;
+	u8  pri_bitmap;
+	u8  appBitfield;
+	#define DCBX_APP_ENTRY_VALID         0x01
+	#define DCBX_APP_ENTRY_SF_MASK       0x30
+	#define DCBX_APP_ENTRY_SF_SHIFT      4
+	#define DCBX_APP_SF_ETH_TYPE         0x10
+	#define DCBX_APP_SF_PORT             0x20
+#elif defined(__LITTLE_ENDIAN)
+	u8 appBitfield;
+	#define DCBX_APP_ENTRY_VALID         0x01
+	#define DCBX_APP_ENTRY_SF_MASK       0x30
+	#define DCBX_APP_ENTRY_SF_SHIFT      4
+	#define DCBX_APP_SF_ETH_TYPE         0x10
+	#define DCBX_APP_SF_PORT             0x20
+	u8  pri_bitmap;
+	u16  app_id;
+#endif
+};
+
+
+/* FW structure in BE */
+struct dcbx_app_priority_feature {
+#ifdef __BIG_ENDIAN
+	u8 reserved;
+	u8 default_pri;
+	u8 tc_supported;
+	u8 enabled;
+#elif defined(__LITTLE_ENDIAN)
+	u8 enabled;
+	u8 tc_supported;
+	u8 default_pri;
+	u8 reserved;
+#endif
+	struct dcbx_app_priority_entry  app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+/* FW structure in BE */
+struct dcbx_features {
+	/* PG feature */
+	struct dcbx_ets_feature ets;
+	/* PFC feature */
+	struct dcbx_pfc_feature pfc;
+	/* APP feature */
+	struct dcbx_app_priority_feature app;
+};
+
+/* LLDP protocol parameters */
+/* FW structure in BE */
+struct lldp_params {
+#ifdef __BIG_ENDIAN
+	u8  msg_fast_tx_interval;
+	u8  msg_tx_hold;
+	u8  msg_tx_interval;
+	u8  admin_status;
+	#define LLDP_TX_ONLY  0x01
+	#define LLDP_RX_ONLY  0x02
+	#define LLDP_TX_RX    0x03
+	#define LLDP_DISABLED 0x04
+	u8  reserved1;
+	u8  tx_fast;
+	u8  tx_crd_max;
+	u8  tx_crd;
+#elif defined(__LITTLE_ENDIAN)
+	u8  admin_status;
+	#define LLDP_TX_ONLY  0x01
+	#define LLDP_RX_ONLY  0x02
+	#define LLDP_TX_RX    0x03
+	#define LLDP_DISABLED 0x04
+	u8  msg_tx_interval;
+	u8  msg_tx_hold;
+	u8  msg_fast_tx_interval;
+	u8  tx_crd;
+	u8  tx_crd_max;
+	u8  tx_fast;
+	u8  reserved1;
+#endif
+	#define REM_CHASSIS_ID_STAT_LEN 4
+	#define REM_PORT_ID_STAT_LEN 4
+	/* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
+	u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN];
+	/* Holds remote Port ID TLV header, subtype and 9B of payload. */
+	u32 peer_port_id[REM_PORT_ID_STAT_LEN];
+};
+
+struct lldp_dcbx_stat {
+	#define LOCAL_CHASSIS_ID_STAT_LEN 2
+	#define LOCAL_PORT_ID_STAT_LEN 2
+	/* Holds local Chassis ID 8B payload of constant subtype 4. */
+	u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN];
+	/* Holds local Port ID 8B payload of constant subtype 3. */
+	u32 local_port_id[LOCAL_PORT_ID_STAT_LEN];
+	/* Number of DCBX frames transmitted. */
+	u32 num_tx_dcbx_pkts;
+	/* Number of DCBX frames received. */
+	u32 num_rx_dcbx_pkts;
+};
+
+/* ADMIN MIB - DCBX local machine default configuration. */
+struct lldp_admin_mib {
+	u32     ver_cfg_flags;
+	#define DCBX_ETS_CONFIG_TX_ENABLED       0x00000001
+	#define DCBX_PFC_CONFIG_TX_ENABLED       0x00000002
+	#define DCBX_APP_CONFIG_TX_ENABLED       0x00000004
+	#define DCBX_ETS_RECO_TX_ENABLED         0x00000008
+	#define DCBX_ETS_RECO_VALID              0x00000010
+	#define DCBX_ETS_WILLING                 0x00000020
+	#define DCBX_PFC_WILLING                 0x00000040
+	#define DCBX_APP_WILLING                 0x00000080
+	#define DCBX_VERSION_CEE                 0x00000100
+	#define DCBX_VERSION_IEEE                0x00000200
+	#define DCBX_DCBX_ENABLED                0x00000400
+	#define DCBX_CEE_VERSION_MASK            0x0000f000
+	#define DCBX_CEE_VERSION_SHIFT           12
+	#define DCBX_CEE_MAX_VERSION_MASK        0x000f0000
+	#define DCBX_CEE_MAX_VERSION_SHIFT       16
+	struct dcbx_features     features;
+};
+
+/* REMOTE MIB - remote machine DCBX configuration. */
+struct lldp_remote_mib {
+	u32 prefix_seq_num;
+	u32 flags;
+	#define DCBX_ETS_TLV_RX                  0x00000001
+	#define DCBX_PFC_TLV_RX                  0x00000002
+	#define DCBX_APP_TLV_RX                  0x00000004
+	#define DCBX_ETS_RX_ERROR                0x00000010
+	#define DCBX_PFC_RX_ERROR                0x00000020
+	#define DCBX_APP_RX_ERROR                0x00000040
+	#define DCBX_ETS_REM_WILLING             0x00000100
+	#define DCBX_PFC_REM_WILLING             0x00000200
+	#define DCBX_APP_REM_WILLING             0x00000400
+	#define DCBX_REMOTE_ETS_RECO_VALID       0x00001000
+	#define DCBX_REMOTE_MIB_VALID            0x00002000
+	struct dcbx_features features;
+	u32 suffix_seq_num;
+};
+
+/* LOCAL MIB - operational DCBX configuration - transmitted on Tx LLDPDU. */
+struct lldp_local_mib {
+	u32 prefix_seq_num;
+	/* Indicates if there is mismatch with negotiation results. */
+	u32 error;
+	#define DCBX_LOCAL_ETS_ERROR             0x00000001
+	#define DCBX_LOCAL_PFC_ERROR             0x00000002
+	#define DCBX_LOCAL_APP_ERROR             0x00000004
+	#define DCBX_LOCAL_PFC_MISMATCH          0x00000010
+	#define DCBX_LOCAL_APP_MISMATCH          0x00000020
+	#define DCBX_REMOTE_MIB_ERROR		 0x00000040
+	struct dcbx_features   features;
+	u32 suffix_seq_num;
+};
+/***END OF DCBX STRUCTURES DECLARATIONS***/
+
+struct ncsi_oem_fcoe_features {
+	u32 fcoe_features1;
+	#define FCOE_FEATURES1_IOS_PER_CONNECTION_MASK          0x0000FFFF
+	#define FCOE_FEATURES1_IOS_PER_CONNECTION_OFFSET        0
+
+	#define FCOE_FEATURES1_LOGINS_PER_PORT_MASK             0xFFFF0000
+	#define FCOE_FEATURES1_LOGINS_PER_PORT_OFFSET           16
+
+	u32 fcoe_features2;
+	#define FCOE_FEATURES2_EXCHANGES_MASK                   0x0000FFFF
+	#define FCOE_FEATURES2_EXCHANGES_OFFSET                 0
+
+	#define FCOE_FEATURES2_NPIV_WWN_PER_PORT_MASK           0xFFFF0000
+	#define FCOE_FEATURES2_NPIV_WWN_PER_PORT_OFFSET         16
+
+	u32 fcoe_features3;
+	#define FCOE_FEATURES3_TARGETS_SUPPORTED_MASK           0x0000FFFF
+	#define FCOE_FEATURES3_TARGETS_SUPPORTED_OFFSET         0
+
+	#define FCOE_FEATURES3_OUTSTANDING_COMMANDS_MASK        0xFFFF0000
+	#define FCOE_FEATURES3_OUTSTANDING_COMMANDS_OFFSET      16
+
+	u32 fcoe_features4;
+	#define FCOE_FEATURES4_FEATURE_SETTINGS_MASK            0x0000000F
+	#define FCOE_FEATURES4_FEATURE_SETTINGS_OFFSET          0
+};
+
+struct ncsi_oem_data {
+	u32 driver_version[4];
+	struct ncsi_oem_fcoe_features ncsi_oem_fcoe_features;
+};
+
+struct shmem2_region {
+
+	u32 size;					/* 0x0000 */
+
+	u32 dcc_support;				/* 0x0004 */
+	#define SHMEM_DCC_SUPPORT_NONE                      0x00000000
+	#define SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV     0x00000001
+	#define SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV  0x00000004
+	#define SHMEM_DCC_SUPPORT_CHANGE_MAC_ADDRESS_TLV    0x00000008
+	#define SHMEM_DCC_SUPPORT_SET_PROTOCOL_TLV          0x00000040
+	#define SHMEM_DCC_SUPPORT_SET_PRIORITY_TLV          0x00000080
+
+	u32 ext_phy_fw_version2[PORT_MAX];		/* 0x0008 */
+	/*
+	 * For backwards compatibility, if the mf_cfg_addr does not exist
+	 * (the size filed is smaller than 0xc) the mf_cfg resides at the
+	 * end of struct shmem_region
+	 */
+	u32 mf_cfg_addr;				/* 0x0010 */
+	#define SHMEM_MF_CFG_ADDR_NONE                  0x00000000
+
+	struct fw_flr_mb flr_mb;			/* 0x0014 */
+	u32 dcbx_lldp_params_offset;			/* 0x0028 */
+	#define SHMEM_LLDP_DCBX_PARAMS_NONE             0x00000000
+	u32 dcbx_neg_res_offset;			/* 0x002c */
+	#define SHMEM_DCBX_NEG_RES_NONE			0x00000000
+	u32 dcbx_remote_mib_offset;			/* 0x0030 */
+	#define SHMEM_DCBX_REMOTE_MIB_NONE              0x00000000
+	/*
+	 * The other shmemX_base_addr holds the other path's shmem address
+	 * required for example in case of common phy init, or for path1 to know
+	 * the address of mcp debug trace which is located in offset from shmem
+	 * of path0
+	 */
+	u32 other_shmem_base_addr;			/* 0x0034 */
+	u32 other_shmem2_base_addr;			/* 0x0038 */
+	/*
+	 * mcp_vf_disabled is set by the MCP to indicate the driver about VFs
+	 * which were disabled/flred
+	 */
+	u32 mcp_vf_disabled[E2_VF_MAX / 32];		/* 0x003c */
+
+	/*
+	 * drv_ack_vf_disabled is set by the PF driver to ack handled disabled
+	 * VFs
+	 */
+	u32 drv_ack_vf_disabled[E2_FUNC_MAX][E2_VF_MAX / 32]; /* 0x0044 */
+
+	u32 dcbx_lldp_dcbx_stat_offset;			/* 0x0064 */
+	#define SHMEM_LLDP_DCBX_STAT_NONE               0x00000000
+
+	/*
+	 * edebug_driver_if field is used to transfer messages between edebug
+	 * app to the driver through shmem2.
+	 *
+	 * message format:
+	 * bits 0-2 -  function number / instance of driver to perform request
+	 * bits 3-5 -  op code / is_ack?
+	 * bits 6-63 - data
+	 */
+	u32 edebug_driver_if[2];			/* 0x0068 */
+	#define EDEBUG_DRIVER_IF_OP_CODE_GET_PHYS_ADDR  1
+	#define EDEBUG_DRIVER_IF_OP_CODE_GET_BUS_ADDR   2
+	#define EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT   3
+
+	u32 nvm_retain_bitmap_addr;			/* 0x0070 */
+
+	u32	reserved1;	/* 0x0074 */
+
+	u32	reserved2[E2_FUNC_MAX];
+
+	u32	reserved3[E2_FUNC_MAX];/* 0x0088 */
+	u32	reserved4[E2_FUNC_MAX];/* 0x0098 */
+
+	u32 swim_base_addr;				/* 0x0108 */
+	u32 swim_funcs;
+	u32 swim_main_cb;
+
+	u32	reserved5[2];
+
+	/* generic flags controlled by the driver */
+	u32 drv_flags;
+	#define DRV_FLAGS_DCB_CONFIGURED                0x1
+
+	/* pointer to extended dev_info shared data copied from nvm image */
+	u32 extended_dev_info_shared_addr;
+	u32 ncsi_oem_data_addr;
+
+	u32 ocsd_host_addr;
+	u32 ocbb_host_addr;
+	u32 ocsd_req_update_interval;
+};
+
+
+struct emac_stats {
+	u32     rx_stat_ifhcinoctets;
+	u32     rx_stat_ifhcinbadoctets;
+	u32     rx_stat_etherstatsfragments;
+	u32     rx_stat_ifhcinucastpkts;
+	u32     rx_stat_ifhcinmulticastpkts;
+	u32     rx_stat_ifhcinbroadcastpkts;
+	u32     rx_stat_dot3statsfcserrors;
+	u32     rx_stat_dot3statsalignmenterrors;
+	u32     rx_stat_dot3statscarriersenseerrors;
+	u32     rx_stat_xonpauseframesreceived;
+	u32     rx_stat_xoffpauseframesreceived;
+	u32     rx_stat_maccontrolframesreceived;
+	u32     rx_stat_xoffstateentered;
+	u32     rx_stat_dot3statsframestoolong;
+	u32     rx_stat_etherstatsjabbers;
+	u32     rx_stat_etherstatsundersizepkts;
+	u32     rx_stat_etherstatspkts64octets;
+	u32     rx_stat_etherstatspkts65octetsto127octets;
+	u32     rx_stat_etherstatspkts128octetsto255octets;
+	u32     rx_stat_etherstatspkts256octetsto511octets;
+	u32     rx_stat_etherstatspkts512octetsto1023octets;
+	u32     rx_stat_etherstatspkts1024octetsto1522octets;
+	u32     rx_stat_etherstatspktsover1522octets;
+
+	u32     rx_stat_falsecarriererrors;
+
+	u32     tx_stat_ifhcoutoctets;
+	u32     tx_stat_ifhcoutbadoctets;
+	u32     tx_stat_etherstatscollisions;
+	u32     tx_stat_outxonsent;
+	u32     tx_stat_outxoffsent;
+	u32     tx_stat_flowcontroldone;
+	u32     tx_stat_dot3statssinglecollisionframes;
+	u32     tx_stat_dot3statsmultiplecollisionframes;
+	u32     tx_stat_dot3statsdeferredtransmissions;
+	u32     tx_stat_dot3statsexcessivecollisions;
+	u32     tx_stat_dot3statslatecollisions;
+	u32     tx_stat_ifhcoutucastpkts;
+	u32     tx_stat_ifhcoutmulticastpkts;
+	u32     tx_stat_ifhcoutbroadcastpkts;
+	u32     tx_stat_etherstatspkts64octets;
+	u32     tx_stat_etherstatspkts65octetsto127octets;
+	u32     tx_stat_etherstatspkts128octetsto255octets;
+	u32     tx_stat_etherstatspkts256octetsto511octets;
+	u32     tx_stat_etherstatspkts512octetsto1023octets;
+	u32     tx_stat_etherstatspkts1024octetsto1522octets;
+	u32     tx_stat_etherstatspktsover1522octets;
+	u32     tx_stat_dot3statsinternalmactransmiterrors;
+};
+
+
+struct bmac1_stats {
+	u32	tx_stat_gtpkt_lo;
+	u32	tx_stat_gtpkt_hi;
+	u32	tx_stat_gtxpf_lo;
+	u32	tx_stat_gtxpf_hi;
+	u32	tx_stat_gtfcs_lo;
+	u32	tx_stat_gtfcs_hi;
+	u32	tx_stat_gtmca_lo;
+	u32	tx_stat_gtmca_hi;
+	u32	tx_stat_gtbca_lo;
+	u32	tx_stat_gtbca_hi;
+	u32	tx_stat_gtfrg_lo;
+	u32	tx_stat_gtfrg_hi;
+	u32	tx_stat_gtovr_lo;
+	u32	tx_stat_gtovr_hi;
+	u32	tx_stat_gt64_lo;
+	u32	tx_stat_gt64_hi;
+	u32	tx_stat_gt127_lo;
+	u32	tx_stat_gt127_hi;
+	u32	tx_stat_gt255_lo;
+	u32	tx_stat_gt255_hi;
+	u32	tx_stat_gt511_lo;
+	u32	tx_stat_gt511_hi;
+	u32	tx_stat_gt1023_lo;
+	u32	tx_stat_gt1023_hi;
+	u32	tx_stat_gt1518_lo;
+	u32	tx_stat_gt1518_hi;
+	u32	tx_stat_gt2047_lo;
+	u32	tx_stat_gt2047_hi;
+	u32	tx_stat_gt4095_lo;
+	u32	tx_stat_gt4095_hi;
+	u32	tx_stat_gt9216_lo;
+	u32	tx_stat_gt9216_hi;
+	u32	tx_stat_gt16383_lo;
+	u32	tx_stat_gt16383_hi;
+	u32	tx_stat_gtmax_lo;
+	u32	tx_stat_gtmax_hi;
+	u32	tx_stat_gtufl_lo;
+	u32	tx_stat_gtufl_hi;
+	u32	tx_stat_gterr_lo;
+	u32	tx_stat_gterr_hi;
+	u32	tx_stat_gtbyt_lo;
+	u32	tx_stat_gtbyt_hi;
+
+	u32	rx_stat_gr64_lo;
+	u32	rx_stat_gr64_hi;
+	u32	rx_stat_gr127_lo;
+	u32	rx_stat_gr127_hi;
+	u32	rx_stat_gr255_lo;
+	u32	rx_stat_gr255_hi;
+	u32	rx_stat_gr511_lo;
+	u32	rx_stat_gr511_hi;
+	u32	rx_stat_gr1023_lo;
+	u32	rx_stat_gr1023_hi;
+	u32	rx_stat_gr1518_lo;
+	u32	rx_stat_gr1518_hi;
+	u32	rx_stat_gr2047_lo;
+	u32	rx_stat_gr2047_hi;
+	u32	rx_stat_gr4095_lo;
+	u32	rx_stat_gr4095_hi;
+	u32	rx_stat_gr9216_lo;
+	u32	rx_stat_gr9216_hi;
+	u32	rx_stat_gr16383_lo;
+	u32	rx_stat_gr16383_hi;
+	u32	rx_stat_grmax_lo;
+	u32	rx_stat_grmax_hi;
+	u32	rx_stat_grpkt_lo;
+	u32	rx_stat_grpkt_hi;
+	u32	rx_stat_grfcs_lo;
+	u32	rx_stat_grfcs_hi;
+	u32	rx_stat_grmca_lo;
+	u32	rx_stat_grmca_hi;
+	u32	rx_stat_grbca_lo;
+	u32	rx_stat_grbca_hi;
+	u32	rx_stat_grxcf_lo;
+	u32	rx_stat_grxcf_hi;
+	u32	rx_stat_grxpf_lo;
+	u32	rx_stat_grxpf_hi;
+	u32	rx_stat_grxuo_lo;
+	u32	rx_stat_grxuo_hi;
+	u32	rx_stat_grjbr_lo;
+	u32	rx_stat_grjbr_hi;
+	u32	rx_stat_grovr_lo;
+	u32	rx_stat_grovr_hi;
+	u32	rx_stat_grflr_lo;
+	u32	rx_stat_grflr_hi;
+	u32	rx_stat_grmeg_lo;
+	u32	rx_stat_grmeg_hi;
+	u32	rx_stat_grmeb_lo;
+	u32	rx_stat_grmeb_hi;
+	u32	rx_stat_grbyt_lo;
+	u32	rx_stat_grbyt_hi;
+	u32	rx_stat_grund_lo;
+	u32	rx_stat_grund_hi;
+	u32	rx_stat_grfrg_lo;
+	u32	rx_stat_grfrg_hi;
+	u32	rx_stat_grerb_lo;
+	u32	rx_stat_grerb_hi;
+	u32	rx_stat_grfre_lo;
+	u32	rx_stat_grfre_hi;
+	u32	rx_stat_gripj_lo;
+	u32	rx_stat_gripj_hi;
+};
+
+struct bmac2_stats {
+	u32	tx_stat_gtpk_lo; /* gtpok */
+	u32	tx_stat_gtpk_hi; /* gtpok */
+	u32	tx_stat_gtxpf_lo; /* gtpf */
+	u32	tx_stat_gtxpf_hi; /* gtpf */
+	u32	tx_stat_gtpp_lo; /* NEW BMAC2 */
+	u32	tx_stat_gtpp_hi; /* NEW BMAC2 */
+	u32	tx_stat_gtfcs_lo;
+	u32	tx_stat_gtfcs_hi;
+	u32	tx_stat_gtuca_lo; /* NEW BMAC2 */
+	u32	tx_stat_gtuca_hi; /* NEW BMAC2 */
+	u32	tx_stat_gtmca_lo;
+	u32	tx_stat_gtmca_hi;
+	u32	tx_stat_gtbca_lo;
+	u32	tx_stat_gtbca_hi;
+	u32	tx_stat_gtovr_lo;
+	u32	tx_stat_gtovr_hi;
+	u32	tx_stat_gtfrg_lo;
+	u32	tx_stat_gtfrg_hi;
+	u32	tx_stat_gtpkt1_lo; /* gtpkt */
+	u32	tx_stat_gtpkt1_hi; /* gtpkt */
+	u32	tx_stat_gt64_lo;
+	u32	tx_stat_gt64_hi;
+	u32	tx_stat_gt127_lo;
+	u32	tx_stat_gt127_hi;
+	u32	tx_stat_gt255_lo;
+	u32	tx_stat_gt255_hi;
+	u32	tx_stat_gt511_lo;
+	u32	tx_stat_gt511_hi;
+	u32	tx_stat_gt1023_lo;
+	u32	tx_stat_gt1023_hi;
+	u32	tx_stat_gt1518_lo;
+	u32	tx_stat_gt1518_hi;
+	u32	tx_stat_gt2047_lo;
+	u32	tx_stat_gt2047_hi;
+	u32	tx_stat_gt4095_lo;
+	u32	tx_stat_gt4095_hi;
+	u32	tx_stat_gt9216_lo;
+	u32	tx_stat_gt9216_hi;
+	u32	tx_stat_gt16383_lo;
+	u32	tx_stat_gt16383_hi;
+	u32	tx_stat_gtmax_lo;
+	u32	tx_stat_gtmax_hi;
+	u32	tx_stat_gtufl_lo;
+	u32	tx_stat_gtufl_hi;
+	u32	tx_stat_gterr_lo;
+	u32	tx_stat_gterr_hi;
+	u32	tx_stat_gtbyt_lo;
+	u32	tx_stat_gtbyt_hi;
+
+	u32	rx_stat_gr64_lo;
+	u32	rx_stat_gr64_hi;
+	u32	rx_stat_gr127_lo;
+	u32	rx_stat_gr127_hi;
+	u32	rx_stat_gr255_lo;
+	u32	rx_stat_gr255_hi;
+	u32	rx_stat_gr511_lo;
+	u32	rx_stat_gr511_hi;
+	u32	rx_stat_gr1023_lo;
+	u32	rx_stat_gr1023_hi;
+	u32	rx_stat_gr1518_lo;
+	u32	rx_stat_gr1518_hi;
+	u32	rx_stat_gr2047_lo;
+	u32	rx_stat_gr2047_hi;
+	u32	rx_stat_gr4095_lo;
+	u32	rx_stat_gr4095_hi;
+	u32	rx_stat_gr9216_lo;
+	u32	rx_stat_gr9216_hi;
+	u32	rx_stat_gr16383_lo;
+	u32	rx_stat_gr16383_hi;
+	u32	rx_stat_grmax_lo;
+	u32	rx_stat_grmax_hi;
+	u32	rx_stat_grpkt_lo;
+	u32	rx_stat_grpkt_hi;
+	u32	rx_stat_grfcs_lo;
+	u32	rx_stat_grfcs_hi;
+	u32	rx_stat_gruca_lo;
+	u32	rx_stat_gruca_hi;
+	u32	rx_stat_grmca_lo;
+	u32	rx_stat_grmca_hi;
+	u32	rx_stat_grbca_lo;
+	u32	rx_stat_grbca_hi;
+	u32	rx_stat_grxpf_lo; /* grpf */
+	u32	rx_stat_grxpf_hi; /* grpf */
+	u32	rx_stat_grpp_lo;
+	u32	rx_stat_grpp_hi;
+	u32	rx_stat_grxuo_lo; /* gruo */
+	u32	rx_stat_grxuo_hi; /* gruo */
+	u32	rx_stat_grjbr_lo;
+	u32	rx_stat_grjbr_hi;
+	u32	rx_stat_grovr_lo;
+	u32	rx_stat_grovr_hi;
+	u32	rx_stat_grxcf_lo; /* grcf */
+	u32	rx_stat_grxcf_hi; /* grcf */
+	u32	rx_stat_grflr_lo;
+	u32	rx_stat_grflr_hi;
+	u32	rx_stat_grpok_lo;
+	u32	rx_stat_grpok_hi;
+	u32	rx_stat_grmeg_lo;
+	u32	rx_stat_grmeg_hi;
+	u32	rx_stat_grmeb_lo;
+	u32	rx_stat_grmeb_hi;
+	u32	rx_stat_grbyt_lo;
+	u32	rx_stat_grbyt_hi;
+	u32	rx_stat_grund_lo;
+	u32	rx_stat_grund_hi;
+	u32	rx_stat_grfrg_lo;
+	u32	rx_stat_grfrg_hi;
+	u32	rx_stat_grerb_lo; /* grerrbyt */
+	u32	rx_stat_grerb_hi; /* grerrbyt */
+	u32	rx_stat_grfre_lo; /* grfrerr */
+	u32	rx_stat_grfre_hi; /* grfrerr */
+	u32	rx_stat_gripj_lo;
+	u32	rx_stat_gripj_hi;
+};
+
+struct mstat_stats {
+	struct {
+		/* OTE MSTAT on E3 has a bug where this register's contents are
+		 * actually tx_gtxpok + tx_gtxpf + (possibly)tx_gtxpp
+		 */
+		u32 tx_gtxpok_lo;
+		u32 tx_gtxpok_hi;
+		u32 tx_gtxpf_lo;
+		u32 tx_gtxpf_hi;
+		u32 tx_gtxpp_lo;
+		u32 tx_gtxpp_hi;
+		u32 tx_gtfcs_lo;
+		u32 tx_gtfcs_hi;
+		u32 tx_gtuca_lo;
+		u32 tx_gtuca_hi;
+		u32 tx_gtmca_lo;
+		u32 tx_gtmca_hi;
+		u32 tx_gtgca_lo;
+		u32 tx_gtgca_hi;
+		u32 tx_gtpkt_lo;
+		u32 tx_gtpkt_hi;
+		u32 tx_gt64_lo;
+		u32 tx_gt64_hi;
+		u32 tx_gt127_lo;
+		u32 tx_gt127_hi;
+		u32 tx_gt255_lo;
+		u32 tx_gt255_hi;
+		u32 tx_gt511_lo;
+		u32 tx_gt511_hi;
+		u32 tx_gt1023_lo;
+		u32 tx_gt1023_hi;
+		u32 tx_gt1518_lo;
+		u32 tx_gt1518_hi;
+		u32 tx_gt2047_lo;
+		u32 tx_gt2047_hi;
+		u32 tx_gt4095_lo;
+		u32 tx_gt4095_hi;
+		u32 tx_gt9216_lo;
+		u32 tx_gt9216_hi;
+		u32 tx_gt16383_lo;
+		u32 tx_gt16383_hi;
+		u32 tx_gtufl_lo;
+		u32 tx_gtufl_hi;
+		u32 tx_gterr_lo;
+		u32 tx_gterr_hi;
+		u32 tx_gtbyt_lo;
+		u32 tx_gtbyt_hi;
+		u32 tx_collisions_lo;
+		u32 tx_collisions_hi;
+		u32 tx_singlecollision_lo;
+		u32 tx_singlecollision_hi;
+		u32 tx_multiplecollisions_lo;
+		u32 tx_multiplecollisions_hi;
+		u32 tx_deferred_lo;
+		u32 tx_deferred_hi;
+		u32 tx_excessivecollisions_lo;
+		u32 tx_excessivecollisions_hi;
+		u32 tx_latecollisions_lo;
+		u32 tx_latecollisions_hi;
+	} stats_tx;
+
+	struct {
+		u32 rx_gr64_lo;
+		u32 rx_gr64_hi;
+		u32 rx_gr127_lo;
+		u32 rx_gr127_hi;
+		u32 rx_gr255_lo;
+		u32 rx_gr255_hi;
+		u32 rx_gr511_lo;
+		u32 rx_gr511_hi;
+		u32 rx_gr1023_lo;
+		u32 rx_gr1023_hi;
+		u32 rx_gr1518_lo;
+		u32 rx_gr1518_hi;
+		u32 rx_gr2047_lo;
+		u32 rx_gr2047_hi;
+		u32 rx_gr4095_lo;
+		u32 rx_gr4095_hi;
+		u32 rx_gr9216_lo;
+		u32 rx_gr9216_hi;
+		u32 rx_gr16383_lo;
+		u32 rx_gr16383_hi;
+		u32 rx_grpkt_lo;
+		u32 rx_grpkt_hi;
+		u32 rx_grfcs_lo;
+		u32 rx_grfcs_hi;
+		u32 rx_gruca_lo;
+		u32 rx_gruca_hi;
+		u32 rx_grmca_lo;
+		u32 rx_grmca_hi;
+		u32 rx_grbca_lo;
+		u32 rx_grbca_hi;
+		u32 rx_grxpf_lo;
+		u32 rx_grxpf_hi;
+		u32 rx_grxpp_lo;
+		u32 rx_grxpp_hi;
+		u32 rx_grxuo_lo;
+		u32 rx_grxuo_hi;
+		u32 rx_grovr_lo;
+		u32 rx_grovr_hi;
+		u32 rx_grxcf_lo;
+		u32 rx_grxcf_hi;
+		u32 rx_grflr_lo;
+		u32 rx_grflr_hi;
+		u32 rx_grpok_lo;
+		u32 rx_grpok_hi;
+		u32 rx_grbyt_lo;
+		u32 rx_grbyt_hi;
+		u32 rx_grund_lo;
+		u32 rx_grund_hi;
+		u32 rx_grfrg_lo;
+		u32 rx_grfrg_hi;
+		u32 rx_grerb_lo;
+		u32 rx_grerb_hi;
+		u32 rx_grfre_lo;
+		u32 rx_grfre_hi;
+
+		u32 rx_alignmenterrors_lo;
+		u32 rx_alignmenterrors_hi;
+		u32 rx_falsecarrier_lo;
+		u32 rx_falsecarrier_hi;
+		u32 rx_llfcmsgcnt_lo;
+		u32 rx_llfcmsgcnt_hi;
+	} stats_rx;
+};
+
+union mac_stats {
+	struct emac_stats	emac_stats;
+	struct bmac1_stats	bmac1_stats;
+	struct bmac2_stats	bmac2_stats;
+	struct mstat_stats	mstat_stats;
+};
+
+
+struct mac_stx {
+	/* in_bad_octets */
+	u32     rx_stat_ifhcinbadoctets_hi;
+	u32     rx_stat_ifhcinbadoctets_lo;
+
+	/* out_bad_octets */
+	u32     tx_stat_ifhcoutbadoctets_hi;
+	u32     tx_stat_ifhcoutbadoctets_lo;
+
+	/* crc_receive_errors */
+	u32     rx_stat_dot3statsfcserrors_hi;
+	u32     rx_stat_dot3statsfcserrors_lo;
+	/* alignment_errors */
+	u32     rx_stat_dot3statsalignmenterrors_hi;
+	u32     rx_stat_dot3statsalignmenterrors_lo;
+	/* carrier_sense_errors */
+	u32     rx_stat_dot3statscarriersenseerrors_hi;
+	u32     rx_stat_dot3statscarriersenseerrors_lo;
+	/* false_carrier_detections */
+	u32     rx_stat_falsecarriererrors_hi;
+	u32     rx_stat_falsecarriererrors_lo;
+
+	/* runt_packets_received */
+	u32     rx_stat_etherstatsundersizepkts_hi;
+	u32     rx_stat_etherstatsundersizepkts_lo;
+	/* jabber_packets_received */
+	u32     rx_stat_dot3statsframestoolong_hi;
+	u32     rx_stat_dot3statsframestoolong_lo;
+
+	/* error_runt_packets_received */
+	u32     rx_stat_etherstatsfragments_hi;
+	u32     rx_stat_etherstatsfragments_lo;
+	/* error_jabber_packets_received */
+	u32     rx_stat_etherstatsjabbers_hi;
+	u32     rx_stat_etherstatsjabbers_lo;
+
+	/* control_frames_received */
+	u32     rx_stat_maccontrolframesreceived_hi;
+	u32     rx_stat_maccontrolframesreceived_lo;
+	u32     rx_stat_mac_xpf_hi;
+	u32     rx_stat_mac_xpf_lo;
+	u32     rx_stat_mac_xcf_hi;
+	u32     rx_stat_mac_xcf_lo;
+
+	/* xoff_state_entered */
+	u32     rx_stat_xoffstateentered_hi;
+	u32     rx_stat_xoffstateentered_lo;
+	/* pause_xon_frames_received */
+	u32     rx_stat_xonpauseframesreceived_hi;
+	u32     rx_stat_xonpauseframesreceived_lo;
+	/* pause_xoff_frames_received */
+	u32     rx_stat_xoffpauseframesreceived_hi;
+	u32     rx_stat_xoffpauseframesreceived_lo;
+	/* pause_xon_frames_transmitted */
+	u32     tx_stat_outxonsent_hi;
+	u32     tx_stat_outxonsent_lo;
+	/* pause_xoff_frames_transmitted */
+	u32     tx_stat_outxoffsent_hi;
+	u32     tx_stat_outxoffsent_lo;
+	/* flow_control_done */
+	u32     tx_stat_flowcontroldone_hi;
+	u32     tx_stat_flowcontroldone_lo;
+
+	/* ether_stats_collisions */
+	u32     tx_stat_etherstatscollisions_hi;
+	u32     tx_stat_etherstatscollisions_lo;
+	/* single_collision_transmit_frames */
+	u32     tx_stat_dot3statssinglecollisionframes_hi;
+	u32     tx_stat_dot3statssinglecollisionframes_lo;
+	/* multiple_collision_transmit_frames */
+	u32     tx_stat_dot3statsmultiplecollisionframes_hi;
+	u32     tx_stat_dot3statsmultiplecollisionframes_lo;
+	/* deferred_transmissions */
+	u32     tx_stat_dot3statsdeferredtransmissions_hi;
+	u32     tx_stat_dot3statsdeferredtransmissions_lo;
+	/* excessive_collision_frames */
+	u32     tx_stat_dot3statsexcessivecollisions_hi;
+	u32     tx_stat_dot3statsexcessivecollisions_lo;
+	/* late_collision_frames */
+	u32     tx_stat_dot3statslatecollisions_hi;
+	u32     tx_stat_dot3statslatecollisions_lo;
+
+	/* frames_transmitted_64_bytes */
+	u32     tx_stat_etherstatspkts64octets_hi;
+	u32     tx_stat_etherstatspkts64octets_lo;
+	/* frames_transmitted_65_127_bytes */
+	u32     tx_stat_etherstatspkts65octetsto127octets_hi;
+	u32     tx_stat_etherstatspkts65octetsto127octets_lo;
+	/* frames_transmitted_128_255_bytes */
+	u32     tx_stat_etherstatspkts128octetsto255octets_hi;
+	u32     tx_stat_etherstatspkts128octetsto255octets_lo;
+	/* frames_transmitted_256_511_bytes */
+	u32     tx_stat_etherstatspkts256octetsto511octets_hi;
+	u32     tx_stat_etherstatspkts256octetsto511octets_lo;
+	/* frames_transmitted_512_1023_bytes */
+	u32     tx_stat_etherstatspkts512octetsto1023octets_hi;
+	u32     tx_stat_etherstatspkts512octetsto1023octets_lo;
+	/* frames_transmitted_1024_1522_bytes */
+	u32     tx_stat_etherstatspkts1024octetsto1522octets_hi;
+	u32     tx_stat_etherstatspkts1024octetsto1522octets_lo;
+	/* frames_transmitted_1523_9022_bytes */
+	u32     tx_stat_etherstatspktsover1522octets_hi;
+	u32     tx_stat_etherstatspktsover1522octets_lo;
+	u32     tx_stat_mac_2047_hi;
+	u32     tx_stat_mac_2047_lo;
+	u32     tx_stat_mac_4095_hi;
+	u32     tx_stat_mac_4095_lo;
+	u32     tx_stat_mac_9216_hi;
+	u32     tx_stat_mac_9216_lo;
+	u32     tx_stat_mac_16383_hi;
+	u32     tx_stat_mac_16383_lo;
+
+	/* internal_mac_transmit_errors */
+	u32     tx_stat_dot3statsinternalmactransmiterrors_hi;
+	u32     tx_stat_dot3statsinternalmactransmiterrors_lo;
+
+	/* if_out_discards */
+	u32     tx_stat_mac_ufl_hi;
+	u32     tx_stat_mac_ufl_lo;
+};
+
+
+#define MAC_STX_IDX_MAX                     2
+
+struct host_port_stats {
+	u32            host_port_stats_start;
+
+	struct mac_stx mac_stx[MAC_STX_IDX_MAX];
+
+	u32            brb_drop_hi;
+	u32            brb_drop_lo;
+
+	u32            host_port_stats_end;
+};
+
+
+struct host_func_stats {
+	u32     host_func_stats_start;
+
+	u32     total_bytes_received_hi;
+	u32     total_bytes_received_lo;
+
+	u32     total_bytes_transmitted_hi;
+	u32     total_bytes_transmitted_lo;
+
+	u32     total_unicast_packets_received_hi;
+	u32     total_unicast_packets_received_lo;
+
+	u32     total_multicast_packets_received_hi;
+	u32     total_multicast_packets_received_lo;
+
+	u32     total_broadcast_packets_received_hi;
+	u32     total_broadcast_packets_received_lo;
+
+	u32     total_unicast_packets_transmitted_hi;
+	u32     total_unicast_packets_transmitted_lo;
+
+	u32     total_multicast_packets_transmitted_hi;
+	u32     total_multicast_packets_transmitted_lo;
+
+	u32     total_broadcast_packets_transmitted_hi;
+	u32     total_broadcast_packets_transmitted_lo;
+
+	u32     valid_bytes_received_hi;
+	u32     valid_bytes_received_lo;
+
+	u32     host_func_stats_end;
+};
+
+/* VIC definitions */
+#define VICSTATST_UIF_INDEX 2
+
+#define BCM_5710_FW_MAJOR_VERSION			7
+#define BCM_5710_FW_MINOR_VERSION			0
+#define BCM_5710_FW_REVISION_VERSION		23
+#define BCM_5710_FW_ENGINEERING_VERSION		0
+#define BCM_5710_FW_COMPILE_FLAGS			1
+
+
+/*
+ * attention bits
+ */
+struct atten_sp_status_block {
+	__le32 attn_bits;
+	__le32 attn_bits_ack;
+	u8 status_block_id;
+	u8 reserved0;
+	__le16 attn_bits_index;
+	__le32 reserved1;
+};
+
+
+/*
+ * The eth aggregative context of Cstorm
+ */
+struct cstorm_eth_ag_context {
+	u32 __reserved0[10];
+};
+
+
+/*
+ * dmae command structure
+ */
+struct dmae_command {
+	u32 opcode;
+#define DMAE_COMMAND_SRC (0x1<<0)
+#define DMAE_COMMAND_SRC_SHIFT 0
+#define DMAE_COMMAND_DST (0x3<<1)
+#define DMAE_COMMAND_DST_SHIFT 1
+#define DMAE_COMMAND_C_DST (0x1<<3)
+#define DMAE_COMMAND_C_DST_SHIFT 3
+#define DMAE_COMMAND_C_TYPE_ENABLE (0x1<<4)
+#define DMAE_COMMAND_C_TYPE_ENABLE_SHIFT 4
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE (0x1<<5)
+#define DMAE_COMMAND_C_TYPE_CRC_ENABLE_SHIFT 5
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET (0x7<<6)
+#define DMAE_COMMAND_C_TYPE_CRC_OFFSET_SHIFT 6
+#define DMAE_COMMAND_ENDIANITY (0x3<<9)
+#define DMAE_COMMAND_ENDIANITY_SHIFT 9
+#define DMAE_COMMAND_PORT (0x1<<11)
+#define DMAE_COMMAND_PORT_SHIFT 11
+#define DMAE_COMMAND_CRC_RESET (0x1<<12)
+#define DMAE_COMMAND_CRC_RESET_SHIFT 12
+#define DMAE_COMMAND_SRC_RESET (0x1<<13)
+#define DMAE_COMMAND_SRC_RESET_SHIFT 13
+#define DMAE_COMMAND_DST_RESET (0x1<<14)
+#define DMAE_COMMAND_DST_RESET_SHIFT 14
+#define DMAE_COMMAND_E1HVN (0x3<<15)
+#define DMAE_COMMAND_E1HVN_SHIFT 15
+#define DMAE_COMMAND_DST_VN (0x3<<17)
+#define DMAE_COMMAND_DST_VN_SHIFT 17
+#define DMAE_COMMAND_C_FUNC (0x1<<19)
+#define DMAE_COMMAND_C_FUNC_SHIFT 19
+#define DMAE_COMMAND_ERR_POLICY (0x3<<20)
+#define DMAE_COMMAND_ERR_POLICY_SHIFT 20
+#define DMAE_COMMAND_RESERVED0 (0x3FF<<22)
+#define DMAE_COMMAND_RESERVED0_SHIFT 22
+	u32 src_addr_lo;
+	u32 src_addr_hi;
+	u32 dst_addr_lo;
+	u32 dst_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u16 opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0)
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6)
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7)
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8)
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14)
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15)
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+	u16 len;
+#elif defined(__LITTLE_ENDIAN)
+	u16 len;
+	u16 opcode_iov;
+#define DMAE_COMMAND_SRC_VFID (0x3F<<0)
+#define DMAE_COMMAND_SRC_VFID_SHIFT 0
+#define DMAE_COMMAND_SRC_VFPF (0x1<<6)
+#define DMAE_COMMAND_SRC_VFPF_SHIFT 6
+#define DMAE_COMMAND_RESERVED1 (0x1<<7)
+#define DMAE_COMMAND_RESERVED1_SHIFT 7
+#define DMAE_COMMAND_DST_VFID (0x3F<<8)
+#define DMAE_COMMAND_DST_VFID_SHIFT 8
+#define DMAE_COMMAND_DST_VFPF (0x1<<14)
+#define DMAE_COMMAND_DST_VFPF_SHIFT 14
+#define DMAE_COMMAND_RESERVED2 (0x1<<15)
+#define DMAE_COMMAND_RESERVED2_SHIFT 15
+#endif
+	u32 comp_addr_lo;
+	u32 comp_addr_hi;
+	u32 comp_val;
+	u32 crc32;
+	u32 crc32_c;
+#if defined(__BIG_ENDIAN)
+	u16 crc16_c;
+	u16 crc16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 crc16;
+	u16 crc16_c;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u16 crc_t10;
+#elif defined(__LITTLE_ENDIAN)
+	u16 crc_t10;
+	u16 reserved3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 xsum8;
+	u16 xsum16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 xsum16;
+	u16 xsum8;
+#endif
+};
+
+
+/*
+ * common data for all protocols
+ */
+struct doorbell_hdr {
+	u8 header;
+#define DOORBELL_HDR_RX (0x1<<0)
+#define DOORBELL_HDR_RX_SHIFT 0
+#define DOORBELL_HDR_DB_TYPE (0x1<<1)
+#define DOORBELL_HDR_DB_TYPE_SHIFT 1
+#define DOORBELL_HDR_DPM_SIZE (0x3<<2)
+#define DOORBELL_HDR_DPM_SIZE_SHIFT 2
+#define DOORBELL_HDR_CONN_TYPE (0xF<<4)
+#define DOORBELL_HDR_CONN_TYPE_SHIFT 4
+};
+
+/*
+ * Ethernet doorbell
+ */
+struct eth_tx_doorbell {
+#if defined(__BIG_ENDIAN)
+	u16 npackets;
+	u8 params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7)
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+	struct doorbell_hdr hdr;
+#elif defined(__LITTLE_ENDIAN)
+	struct doorbell_hdr hdr;
+	u8 params;
+#define ETH_TX_DOORBELL_NUM_BDS (0x3F<<0)
+#define ETH_TX_DOORBELL_NUM_BDS_SHIFT 0
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG (0x1<<6)
+#define ETH_TX_DOORBELL_RESERVED_TX_FIN_FLAG_SHIFT 6
+#define ETH_TX_DOORBELL_SPARE (0x1<<7)
+#define ETH_TX_DOORBELL_SPARE_SHIFT 7
+	u16 npackets;
+#endif
+};
+
+
+/*
+ * 3 lines. status block
+ */
+struct hc_status_block_e1x {
+	__le16 index_values[HC_SB_MAX_INDICES_E1X];
+	__le16 running_index[HC_SB_MAX_SM];
+	__le32 rsrv[11];
+};
+
+/*
+ * host status block
+ */
+struct host_hc_status_block_e1x {
+	struct hc_status_block_e1x sb;
+};
+
+
+/*
+ * 3 lines. status block
+ */
+struct hc_status_block_e2 {
+	__le16 index_values[HC_SB_MAX_INDICES_E2];
+	__le16 running_index[HC_SB_MAX_SM];
+	__le32 reserved[11];
+};
+
+/*
+ * host status block
+ */
+struct host_hc_status_block_e2 {
+	struct hc_status_block_e2 sb;
+};
+
+
+/*
+ * 5 lines. slow-path status block
+ */
+struct hc_sp_status_block {
+	__le16 index_values[HC_SP_SB_MAX_INDICES];
+	__le16 running_index;
+	__le16 rsrv;
+	u32 rsrv1;
+};
+
+/*
+ * host status block
+ */
+struct host_sp_status_block {
+	struct atten_sp_status_block atten_status_block;
+	struct hc_sp_status_block sp_sb;
+};
+
+
+/*
+ * IGU driver acknowledgment register
+ */
+struct igu_ack_register {
+#if defined(__BIG_ENDIAN)
+	u16 sb_id_and_flags;
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0)
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
+#define IGU_ACK_REGISTER_STORM_ID (0x7<<5)
+#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
+#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8)
+#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
+#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9)
+#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
+#define IGU_ACK_REGISTER_RESERVED (0x1F<<11)
+#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
+	u16 status_block_index;
+#elif defined(__LITTLE_ENDIAN)
+	u16 status_block_index;
+	u16 sb_id_and_flags;
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID (0x1F<<0)
+#define IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT 0
+#define IGU_ACK_REGISTER_STORM_ID (0x7<<5)
+#define IGU_ACK_REGISTER_STORM_ID_SHIFT 5
+#define IGU_ACK_REGISTER_UPDATE_INDEX (0x1<<8)
+#define IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT 8
+#define IGU_ACK_REGISTER_INTERRUPT_MODE (0x3<<9)
+#define IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT 9
+#define IGU_ACK_REGISTER_RESERVED (0x1F<<11)
+#define IGU_ACK_REGISTER_RESERVED_SHIFT 11
+#endif
+};
+
+
+/*
+ * IGU driver acknowledgement register
+ */
+struct igu_backward_compatible {
+	u32 sb_id_and_flags;
+#define IGU_BACKWARD_COMPATIBLE_SB_INDEX (0xFFFF<<0)
+#define IGU_BACKWARD_COMPATIBLE_SB_INDEX_SHIFT 0
+#define IGU_BACKWARD_COMPATIBLE_SB_SELECT (0x1F<<16)
+#define IGU_BACKWARD_COMPATIBLE_SB_SELECT_SHIFT 16
+#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS (0x7<<21)
+#define IGU_BACKWARD_COMPATIBLE_SEGMENT_ACCESS_SHIFT 21
+#define IGU_BACKWARD_COMPATIBLE_BUPDATE (0x1<<24)
+#define IGU_BACKWARD_COMPATIBLE_BUPDATE_SHIFT 24
+#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT (0x3<<25)
+#define IGU_BACKWARD_COMPATIBLE_ENABLE_INT_SHIFT 25
+#define IGU_BACKWARD_COMPATIBLE_RESERVED_0 (0x1F<<27)
+#define IGU_BACKWARD_COMPATIBLE_RESERVED_0_SHIFT 27
+	u32 reserved_2;
+};
+
+
+/*
+ * IGU driver acknowledgement register
+ */
+struct igu_regular {
+	u32 sb_id_and_flags;
+#define IGU_REGULAR_SB_INDEX (0xFFFFF<<0)
+#define IGU_REGULAR_SB_INDEX_SHIFT 0
+#define IGU_REGULAR_RESERVED0 (0x1<<20)
+#define IGU_REGULAR_RESERVED0_SHIFT 20
+#define IGU_REGULAR_SEGMENT_ACCESS (0x7<<21)
+#define IGU_REGULAR_SEGMENT_ACCESS_SHIFT 21
+#define IGU_REGULAR_BUPDATE (0x1<<24)
+#define IGU_REGULAR_BUPDATE_SHIFT 24
+#define IGU_REGULAR_ENABLE_INT (0x3<<25)
+#define IGU_REGULAR_ENABLE_INT_SHIFT 25
+#define IGU_REGULAR_RESERVED_1 (0x1<<27)
+#define IGU_REGULAR_RESERVED_1_SHIFT 27
+#define IGU_REGULAR_CLEANUP_TYPE (0x3<<28)
+#define IGU_REGULAR_CLEANUP_TYPE_SHIFT 28
+#define IGU_REGULAR_CLEANUP_SET (0x1<<30)
+#define IGU_REGULAR_CLEANUP_SET_SHIFT 30
+#define IGU_REGULAR_BCLEANUP (0x1<<31)
+#define IGU_REGULAR_BCLEANUP_SHIFT 31
+	u32 reserved_2;
+};
+
+/*
+ * IGU driver acknowledgement register
+ */
+union igu_consprod_reg {
+	struct igu_regular regular;
+	struct igu_backward_compatible backward_compatible;
+};
+
+
+/*
+ * Igu control commands
+ */
+enum igu_ctrl_cmd {
+	IGU_CTRL_CMD_TYPE_RD,
+	IGU_CTRL_CMD_TYPE_WR,
+	MAX_IGU_CTRL_CMD
+};
+
+
+/*
+ * Control register for the IGU command register
+ */
+struct igu_ctrl_reg {
+	u32 ctrl_data;
+#define IGU_CTRL_REG_ADDRESS (0xFFF<<0)
+#define IGU_CTRL_REG_ADDRESS_SHIFT 0
+#define IGU_CTRL_REG_FID (0x7F<<12)
+#define IGU_CTRL_REG_FID_SHIFT 12
+#define IGU_CTRL_REG_RESERVED (0x1<<19)
+#define IGU_CTRL_REG_RESERVED_SHIFT 19
+#define IGU_CTRL_REG_TYPE (0x1<<20)
+#define IGU_CTRL_REG_TYPE_SHIFT 20
+#define IGU_CTRL_REG_UNUSED (0x7FF<<21)
+#define IGU_CTRL_REG_UNUSED_SHIFT 21
+};
+
+
+/*
+ * Igu interrupt command
+ */
+enum igu_int_cmd {
+	IGU_INT_ENABLE,
+	IGU_INT_DISABLE,
+	IGU_INT_NOP,
+	IGU_INT_NOP2,
+	MAX_IGU_INT_CMD
+};
+
+
+/*
+ * Igu segments
+ */
+enum igu_seg_access {
+	IGU_SEG_ACCESS_NORM,
+	IGU_SEG_ACCESS_DEF,
+	IGU_SEG_ACCESS_ATTN,
+	MAX_IGU_SEG_ACCESS
+};
+
+
+/*
+ * Parser parsing flags field
+ */
+struct parsing_flags {
+	__le16 flags;
+#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE (0x1<<0)
+#define PARSING_FLAGS_ETHERNET_ADDRESS_TYPE_SHIFT 0
+#define PARSING_FLAGS_VLAN (0x1<<1)
+#define PARSING_FLAGS_VLAN_SHIFT 1
+#define PARSING_FLAGS_EXTRA_VLAN (0x1<<2)
+#define PARSING_FLAGS_EXTRA_VLAN_SHIFT 2
+#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL (0x3<<3)
+#define PARSING_FLAGS_OVER_ETHERNET_PROTOCOL_SHIFT 3
+#define PARSING_FLAGS_IP_OPTIONS (0x1<<5)
+#define PARSING_FLAGS_IP_OPTIONS_SHIFT 5
+#define PARSING_FLAGS_FRAGMENTATION_STATUS (0x1<<6)
+#define PARSING_FLAGS_FRAGMENTATION_STATUS_SHIFT 6
+#define PARSING_FLAGS_OVER_IP_PROTOCOL (0x3<<7)
+#define PARSING_FLAGS_OVER_IP_PROTOCOL_SHIFT 7
+#define PARSING_FLAGS_PURE_ACK_INDICATION (0x1<<9)
+#define PARSING_FLAGS_PURE_ACK_INDICATION_SHIFT 9
+#define PARSING_FLAGS_TCP_OPTIONS_EXIST (0x1<<10)
+#define PARSING_FLAGS_TCP_OPTIONS_EXIST_SHIFT 10
+#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG (0x1<<11)
+#define PARSING_FLAGS_TIME_STAMP_EXIST_FLAG_SHIFT 11
+#define PARSING_FLAGS_CONNECTION_MATCH (0x1<<12)
+#define PARSING_FLAGS_CONNECTION_MATCH_SHIFT 12
+#define PARSING_FLAGS_LLC_SNAP (0x1<<13)
+#define PARSING_FLAGS_LLC_SNAP_SHIFT 13
+#define PARSING_FLAGS_RESERVED0 (0x3<<14)
+#define PARSING_FLAGS_RESERVED0_SHIFT 14
+};
+
+
+/*
+ * Parsing flags for TCP ACK type
+ */
+enum prs_flags_ack_type {
+	PRS_FLAG_PUREACK_PIGGY,
+	PRS_FLAG_PUREACK_PURE,
+	MAX_PRS_FLAGS_ACK_TYPE
+};
+
+
+/*
+ * Parsing flags for Ethernet address type
+ */
+enum prs_flags_eth_addr_type {
+	PRS_FLAG_ETHTYPE_NON_UNICAST,
+	PRS_FLAG_ETHTYPE_UNICAST,
+	MAX_PRS_FLAGS_ETH_ADDR_TYPE
+};
+
+
+/*
+ * Parsing flags for over-ethernet protocol
+ */
+enum prs_flags_over_eth {
+	PRS_FLAG_OVERETH_UNKNOWN,
+	PRS_FLAG_OVERETH_IPV4,
+	PRS_FLAG_OVERETH_IPV6,
+	PRS_FLAG_OVERETH_LLCSNAP_UNKNOWN,
+	MAX_PRS_FLAGS_OVER_ETH
+};
+
+
+/*
+ * Parsing flags for over-IP protocol
+ */
+enum prs_flags_over_ip {
+	PRS_FLAG_OVERIP_UNKNOWN,
+	PRS_FLAG_OVERIP_TCP,
+	PRS_FLAG_OVERIP_UDP,
+	MAX_PRS_FLAGS_OVER_IP
+};
+
+
+/*
+ * SDM operation gen command (generate aggregative interrupt)
+ */
+struct sdm_op_gen {
+	__le32 command;
+#define SDM_OP_GEN_COMP_PARAM (0x1F<<0)
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE (0x7<<5)
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 5
+#define SDM_OP_GEN_AGG_VECT_IDX (0xFF<<8)
+#define SDM_OP_GEN_AGG_VECT_IDX_SHIFT 8
+#define SDM_OP_GEN_AGG_VECT_IDX_VALID (0x1<<16)
+#define SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT 16
+#define SDM_OP_GEN_RESERVED (0x7FFF<<17)
+#define SDM_OP_GEN_RESERVED_SHIFT 17
+};
+
+
+/*
+ * Timers connection context
+ */
+struct timers_block_context {
+	u32 __reserved_0;
+	u32 __reserved_1;
+	u32 __reserved_2;
+	u32 flags;
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
+#define __TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
+#define TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
+#define __TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
+};
+
+
+/*
+ * The eth aggregative context of Tstorm
+ */
+struct tstorm_eth_ag_context {
+	u32 __reserved0[14];
+};
+
+
+/*
+ * The eth aggregative context of Ustorm
+ */
+struct ustorm_eth_ag_context {
+	u32 __reserved0;
+#if defined(__BIG_ENDIAN)
+	u8 cdu_usage;
+	u8 __reserved2;
+	u16 __reserved1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __reserved1;
+	u8 __reserved2;
+	u8 cdu_usage;
+#endif
+	u32 __reserved3[6];
+};
+
+
+/*
+ * The eth aggregative context of Xstorm
+ */
+struct xstorm_eth_ag_context {
+	u32 reserved0;
+#if defined(__BIG_ENDIAN)
+	u8 cdu_reserved;
+	u8 reserved2;
+	u16 reserved1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved1;
+	u8 reserved2;
+	u8 cdu_reserved;
+#endif
+	u32 reserved3[30];
+};
+
+
+/*
+ * doorbell message sent to the chip
+ */
+struct doorbell {
+#if defined(__BIG_ENDIAN)
+	u16 zero_fill2;
+	u8 zero_fill1;
+	struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+	struct doorbell_hdr header;
+	u8 zero_fill1;
+	u16 zero_fill2;
+#endif
+};
+
+
+/*
+ * doorbell message sent to the chip
+ */
+struct doorbell_set_prod {
+#if defined(__BIG_ENDIAN)
+	u16 prod;
+	u8 zero_fill1;
+	struct doorbell_hdr header;
+#elif defined(__LITTLE_ENDIAN)
+	struct doorbell_hdr header;
+	u8 zero_fill1;
+	u16 prod;
+#endif
+};
+
+
+struct regpair {
+	__le32 lo;
+	__le32 hi;
+};
+
+
+/*
+ * Classify rule opcodes in E2/E3
+ */
+enum classify_rule {
+	CLASSIFY_RULE_OPCODE_MAC,
+	CLASSIFY_RULE_OPCODE_VLAN,
+	CLASSIFY_RULE_OPCODE_PAIR,
+	MAX_CLASSIFY_RULE
+};
+
+
+/*
+ * Classify rule types in E2/E3
+ */
+enum classify_rule_action_type {
+	CLASSIFY_RULE_REMOVE,
+	CLASSIFY_RULE_ADD,
+	MAX_CLASSIFY_RULE_ACTION_TYPE
+};
+
+
+/*
+ * client init ramrod data
+ */
+struct client_init_general_data {
+	u8 client_id;
+	u8 statistics_counter_id;
+	u8 statistics_en_flg;
+	u8 is_fcoe_flg;
+	u8 activate_flg;
+	u8 sp_client_id;
+	__le16 mtu;
+	u8 statistics_zero_flg;
+	u8 func_id;
+	u8 cos;
+	u8 traffic_type;
+	u32 reserved0;
+};
+
+
+/*
+ * client init rx data
+ */
+struct client_init_rx_data {
+	u8 tpa_en;
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4 (0x1<<0)
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1)
+#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
+#define CLIENT_INIT_RX_DATA_RESERVED5 (0x3F<<2)
+#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 2
+	u8 vmqueue_mode_en_flg;
+	u8 extra_data_over_sgl_en_flg;
+	u8 cache_line_alignment_log_size;
+	u8 enable_dynamic_hc;
+	u8 max_sges_for_packet;
+	u8 client_qzone_id;
+	u8 drop_ip_cs_err_flg;
+	u8 drop_tcp_cs_err_flg;
+	u8 drop_ttl0_flg;
+	u8 drop_udp_cs_err_flg;
+	u8 inner_vlan_removal_enable_flg;
+	u8 outer_vlan_removal_enable_flg;
+	u8 status_block_id;
+	u8 rx_sb_index_number;
+	u8 reserved0;
+	u8 max_tpa_queues;
+	u8 silent_vlan_removal_flg;
+	__le16 max_bytes_on_bd;
+	__le16 sge_buff_size;
+	u8 approx_mcast_engine_id;
+	u8 rss_engine_id;
+	struct regpair bd_page_base;
+	struct regpair sge_page_base;
+	struct regpair cqe_page_base;
+	u8 is_leading_rss;
+	u8 is_approx_mcast;
+	__le16 max_agg_size;
+	__le16 state;
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL (0x1<<0)
+#define CLIENT_INIT_RX_DATA_UCAST_DROP_ALL_SHIFT 0
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL (0x1<<1)
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED (0x1<<2)
+#define CLIENT_INIT_RX_DATA_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL (0x1<<3)
+#define CLIENT_INIT_RX_DATA_MCAST_DROP_ALL_SHIFT 3
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL (0x1<<4)
+#define CLIENT_INIT_RX_DATA_MCAST_ACCEPT_ALL_SHIFT 4
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL (0x1<<5)
+#define CLIENT_INIT_RX_DATA_BCAST_ACCEPT_ALL_SHIFT 5
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN (0x1<<6)
+#define CLIENT_INIT_RX_DATA_ACCEPT_ANY_VLAN_SHIFT 6
+#define CLIENT_INIT_RX_DATA_RESERVED2 (0x1FF<<7)
+#define CLIENT_INIT_RX_DATA_RESERVED2_SHIFT 7
+	__le16 cqe_pause_thr_low;
+	__le16 cqe_pause_thr_high;
+	__le16 bd_pause_thr_low;
+	__le16 bd_pause_thr_high;
+	__le16 sge_pause_thr_low;
+	__le16 sge_pause_thr_high;
+	__le16 rx_cos_mask;
+	__le16 silent_vlan_value;
+	__le16 silent_vlan_mask;
+	__le32 reserved6[2];
+};
+
+/*
+ * client init tx data
+ */
+struct client_init_tx_data {
+	u8 enforce_security_flg;
+	u8 tx_status_block_id;
+	u8 tx_sb_index_number;
+	u8 tss_leading_client_id;
+	u8 tx_switching_flg;
+	u8 anti_spoofing_flg;
+	__le16 default_vlan;
+	struct regpair tx_bd_page_base;
+	__le16 state;
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL (0x1<<0)
+#define CLIENT_INIT_TX_DATA_UCAST_ACCEPT_ALL_SHIFT 0
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL (0x1<<1)
+#define CLIENT_INIT_TX_DATA_MCAST_ACCEPT_ALL_SHIFT 1
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL (0x1<<2)
+#define CLIENT_INIT_TX_DATA_BCAST_ACCEPT_ALL_SHIFT 2
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN (0x1<<3)
+#define CLIENT_INIT_TX_DATA_ACCEPT_ANY_VLAN_SHIFT 3
+#define CLIENT_INIT_TX_DATA_RESERVED1 (0xFFF<<4)
+#define CLIENT_INIT_TX_DATA_RESERVED1_SHIFT 4
+	u8 default_vlan_flg;
+	u8 reserved2;
+	__le32 reserved3;
+};
+
+/*
+ * client init ramrod data
+ */
+struct client_init_ramrod_data {
+	struct client_init_general_data general;
+	struct client_init_rx_data rx;
+	struct client_init_tx_data tx;
+};
+
+
+/*
+ * client update ramrod data
+ */
+struct client_update_ramrod_data {
+	u8 client_id;
+	u8 func_id;
+	u8 inner_vlan_removal_enable_flg;
+	u8 inner_vlan_removal_change_flg;
+	u8 outer_vlan_removal_enable_flg;
+	u8 outer_vlan_removal_change_flg;
+	u8 anti_spoofing_enable_flg;
+	u8 anti_spoofing_change_flg;
+	u8 activate_flg;
+	u8 activate_change_flg;
+	__le16 default_vlan;
+	u8 default_vlan_enable_flg;
+	u8 default_vlan_change_flg;
+	__le16 silent_vlan_value;
+	__le16 silent_vlan_mask;
+	u8 silent_vlan_removal_flg;
+	u8 silent_vlan_change_flg;
+	__le32 echo;
+};
+
+
+/*
+ * The eth storm context of Cstorm
+ */
+struct cstorm_eth_st_context {
+	u32 __reserved0[4];
+};
+
+
+struct double_regpair {
+	u32 regpair0_lo;
+	u32 regpair0_hi;
+	u32 regpair1_lo;
+	u32 regpair1_hi;
+};
+
+
+/*
+ * Ethernet address typesm used in ethernet tx BDs
+ */
+enum eth_addr_type {
+	UNKNOWN_ADDRESS,
+	UNICAST_ADDRESS,
+	MULTICAST_ADDRESS,
+	BROADCAST_ADDRESS,
+	MAX_ETH_ADDR_TYPE
+};
+
+
+/*
+ *
+ */
+struct eth_classify_cmd_header {
+	u8 cmd_general_data;
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD (0x1<<0)
+#define ETH_CLASSIFY_CMD_HEADER_RX_CMD_SHIFT 0
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD (0x1<<1)
+#define ETH_CLASSIFY_CMD_HEADER_TX_CMD_SHIFT 1
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE (0x3<<2)
+#define ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT 2
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD (0x1<<4)
+#define ETH_CLASSIFY_CMD_HEADER_IS_ADD_SHIFT 4
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0 (0x7<<5)
+#define ETH_CLASSIFY_CMD_HEADER_RESERVED0_SHIFT 5
+	u8 func_id;
+	u8 client_id;
+	u8 reserved1;
+};
+
+
+/*
+ * header for eth classification config ramrod
+ */
+struct eth_classify_header {
+	u8 rule_cnt;
+	u8 reserved0;
+	__le16 reserved1;
+	__le32 echo;
+};
+
+
+/*
+ * Command for adding/removing a MAC classification rule
+ */
+struct eth_classify_mac_cmd {
+	struct eth_classify_cmd_header header;
+	__le32 reserved0;
+	__le16 mac_lsb;
+	__le16 mac_mid;
+	__le16 mac_msb;
+	__le16 reserved1;
+};
+
+
+/*
+ * Command for adding/removing a MAC-VLAN pair classification rule
+ */
+struct eth_classify_pair_cmd {
+	struct eth_classify_cmd_header header;
+	__le32 reserved0;
+	__le16 mac_lsb;
+	__le16 mac_mid;
+	__le16 mac_msb;
+	__le16 vlan;
+};
+
+
+/*
+ * Command for adding/removing a VLAN classification rule
+ */
+struct eth_classify_vlan_cmd {
+	struct eth_classify_cmd_header header;
+	__le32 reserved0;
+	__le32 reserved1;
+	__le16 reserved2;
+	__le16 vlan;
+};
+
+/*
+ * union for eth classification rule
+ */
+union eth_classify_rule_cmd {
+	struct eth_classify_mac_cmd mac;
+	struct eth_classify_vlan_cmd vlan;
+	struct eth_classify_pair_cmd pair;
+};
+
+/*
+ * parameters for eth classification configuration ramrod
+ */
+struct eth_classify_rules_ramrod_data {
+	struct eth_classify_header header;
+	union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
+};
+
+
+/*
+ * The data contain client ID need to the ramrod
+ */
+struct eth_common_ramrod_data {
+	__le32 client_id;
+	__le32 reserved1;
+};
+
+
+/*
+ * The eth storm context of Ustorm
+ */
+struct ustorm_eth_st_context {
+	u32 reserved0[52];
+};
+
+/*
+ * The eth storm context of Tstorm
+ */
+struct tstorm_eth_st_context {
+	u32 __reserved0[28];
+};
+
+/*
+ * The eth storm context of Xstorm
+ */
+struct xstorm_eth_st_context {
+	u32 reserved0[60];
+};
+
+/*
+ * Ethernet connection context
+ */
+struct eth_context {
+	struct ustorm_eth_st_context ustorm_st_context;
+	struct tstorm_eth_st_context tstorm_st_context;
+	struct xstorm_eth_ag_context xstorm_ag_context;
+	struct tstorm_eth_ag_context tstorm_ag_context;
+	struct cstorm_eth_ag_context cstorm_ag_context;
+	struct ustorm_eth_ag_context ustorm_ag_context;
+	struct timers_block_context timers_context;
+	struct xstorm_eth_st_context xstorm_st_context;
+	struct cstorm_eth_st_context cstorm_st_context;
+};
+
+
+/*
+ * union for sgl and raw data.
+ */
+union eth_sgl_or_raw_data {
+	__le16 sgl[8];
+	u32 raw_data[4];
+};
+
+/*
+ * eth FP end aggregation CQE parameters struct
+ */
+struct eth_end_agg_rx_cqe {
+	u8 type_error_flags;
+#define ETH_END_AGG_RX_CQE_TYPE (0x3<<0)
+#define ETH_END_AGG_RX_CQE_TYPE_SHIFT 0
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL (0x1<<2)
+#define ETH_END_AGG_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_END_AGG_RX_CQE_RESERVED0 (0x1F<<3)
+#define ETH_END_AGG_RX_CQE_RESERVED0_SHIFT 3
+	u8 reserved1;
+	u8 queue_index;
+	u8 reserved2;
+	__le32 timestamp_delta;
+	__le16 num_of_coalesced_segs;
+	__le16 pkt_len;
+	u8 pure_ack_count;
+	u8 reserved3;
+	__le16 reserved4;
+	union eth_sgl_or_raw_data sgl_or_raw_data;
+	__le32 reserved5[8];
+};
+
+
+/*
+ * regular eth FP CQE parameters struct
+ */
+struct eth_fast_path_rx_cqe {
+	u8 type_error_flags;
+#define ETH_FAST_PATH_RX_CQE_TYPE (0x3<<0)
+#define ETH_FAST_PATH_RX_CQE_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL (0x1<<2)
+#define ETH_FAST_PATH_RX_CQE_SGL_RAW_SEL_SHIFT 2
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG (0x1<<3)
+#define ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG_SHIFT 3
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG (0x1<<4)
+#define ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG_SHIFT 4
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG (0x1<<5)
+#define ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG_SHIFT 5
+#define ETH_FAST_PATH_RX_CQE_RESERVED0 (0x3<<6)
+#define ETH_FAST_PATH_RX_CQE_RESERVED0_SHIFT 6
+	u8 status_flags;
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE (0x7<<0)
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE_SHIFT 0
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG (0x1<<3)
+#define ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG_SHIFT 3
+#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG (0x1<<4)
+#define ETH_FAST_PATH_RX_CQE_BROADCAST_FLG_SHIFT 4
+#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG (0x1<<5)
+#define ETH_FAST_PATH_RX_CQE_MAC_MATCH_FLG_SHIFT 5
+#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG (0x1<<6)
+#define ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG_SHIFT 6
+#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG (0x1<<7)
+#define ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG_SHIFT 7
+	u8 queue_index;
+	u8 placement_offset;
+	__le32 rss_hash_result;
+	__le16 vlan_tag;
+	__le16 pkt_len;
+	__le16 len_on_bd;
+	struct parsing_flags pars_flags;
+	union eth_sgl_or_raw_data sgl_or_raw_data;
+	__le32 reserved1[8];
+};
+
+
+/*
+ * Command for setting classification flags for a client
+ */
+struct eth_filter_rules_cmd {
+	u8 cmd_general_data;
+#define ETH_FILTER_RULES_CMD_RX_CMD (0x1<<0)
+#define ETH_FILTER_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_FILTER_RULES_CMD_TX_CMD (0x1<<1)
+#define ETH_FILTER_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_FILTER_RULES_CMD_RESERVED0 (0x3F<<2)
+#define ETH_FILTER_RULES_CMD_RESERVED0_SHIFT 2
+	u8 func_id;
+	u8 client_id;
+	u8 reserved1;
+	__le16 state;
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL (0x1<<0)
+#define ETH_FILTER_RULES_CMD_UCAST_DROP_ALL_SHIFT 0
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL (0x1<<1)
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED (0x1<<2)
+#define ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL (0x1<<3)
+#define ETH_FILTER_RULES_CMD_MCAST_DROP_ALL_SHIFT 3
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL (0x1<<4)
+#define ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL (0x1<<5)
+#define ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN (0x1<<6)
+#define ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN_SHIFT 6
+#define ETH_FILTER_RULES_CMD_RESERVED2 (0x1FF<<7)
+#define ETH_FILTER_RULES_CMD_RESERVED2_SHIFT 7
+	__le16 reserved3;
+	struct regpair reserved4;
+};
+
+
+/*
+ * parameters for eth classification filters ramrod
+ */
+struct eth_filter_rules_ramrod_data {
+	struct eth_classify_header header;
+	struct eth_filter_rules_cmd rules[FILTER_RULES_COUNT];
+};
+
+
+/*
+ * parameters for eth classification configuration ramrod
+ */
+struct eth_general_rules_ramrod_data {
+	struct eth_classify_header header;
+	union eth_classify_rule_cmd rules[CLASSIFY_RULES_COUNT];
+};
+
+
+/*
+ * The data for Halt ramrod
+ */
+struct eth_halt_ramrod_data {
+	__le32 client_id;
+	__le32 reserved0;
+};
+
+
+/*
+ * Command for setting multicast classification for a client
+ */
+struct eth_multicast_rules_cmd {
+	u8 cmd_general_data;
+#define ETH_MULTICAST_RULES_CMD_RX_CMD (0x1<<0)
+#define ETH_MULTICAST_RULES_CMD_RX_CMD_SHIFT 0
+#define ETH_MULTICAST_RULES_CMD_TX_CMD (0x1<<1)
+#define ETH_MULTICAST_RULES_CMD_TX_CMD_SHIFT 1
+#define ETH_MULTICAST_RULES_CMD_IS_ADD (0x1<<2)
+#define ETH_MULTICAST_RULES_CMD_IS_ADD_SHIFT 2
+#define ETH_MULTICAST_RULES_CMD_RESERVED0 (0x1F<<3)
+#define ETH_MULTICAST_RULES_CMD_RESERVED0_SHIFT 3
+	u8 func_id;
+	u8 bin_id;
+	u8 engine_id;
+	__le32 reserved2;
+	struct regpair reserved3;
+};
+
+
+/*
+ * parameters for multicast classification ramrod
+ */
+struct eth_multicast_rules_ramrod_data {
+	struct eth_classify_header header;
+	struct eth_multicast_rules_cmd rules[MULTICAST_RULES_COUNT];
+};
+
+
+/*
+ * Place holder for ramrods protocol specific data
+ */
+struct ramrod_data {
+	__le32 data_lo;
+	__le32 data_hi;
+};
+
+/*
+ * union for ramrod data for Ethernet protocol (CQE) (force size of 16 bits)
+ */
+union eth_ramrod_data {
+	struct ramrod_data general;
+};
+
+
+/*
+ * RSS toeplitz hash type, as reported in CQE
+ */
+enum eth_rss_hash_type {
+	DEFAULT_HASH_TYPE,
+	IPV4_HASH_TYPE,
+	TCP_IPV4_HASH_TYPE,
+	IPV6_HASH_TYPE,
+	TCP_IPV6_HASH_TYPE,
+	VLAN_PRI_HASH_TYPE,
+	E1HOV_PRI_HASH_TYPE,
+	DSCP_HASH_TYPE,
+	MAX_ETH_RSS_HASH_TYPE
+};
+
+
+/*
+ * Ethernet RSS mode
+ */
+enum eth_rss_mode {
+	ETH_RSS_MODE_DISABLED,
+	ETH_RSS_MODE_REGULAR,
+	ETH_RSS_MODE_VLAN_PRI,
+	ETH_RSS_MODE_E1HOV_PRI,
+	ETH_RSS_MODE_IP_DSCP,
+	MAX_ETH_RSS_MODE
+};
+
+
+/*
+ * parameters for RSS update ramrod (E2)
+ */
+struct eth_rss_update_ramrod_data {
+	u8 rss_engine_id;
+	u8 capabilities;
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY (0x1<<0)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY_SHIFT 0
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY (0x1<<1)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY_SHIFT 1
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY (0x1<<2)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY_SHIFT 2
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY (0x1<<3)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY_SHIFT 3
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY (0x1<<4)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY_SHIFT 4
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY (0x1<<5)
+#define ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY (0x1<<6)
+#define ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY_SHIFT 6
+#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0 (0x1<<7)
+#define __ETH_RSS_UPDATE_RAMROD_DATA_RESERVED0_SHIFT 7
+	u8 rss_result_mask;
+	u8 rss_mode;
+	__le32 __reserved2;
+	u8 indirection_table[T_ETH_INDIRECTION_TABLE_SIZE];
+	__le32 rss_key[T_ETH_RSS_KEY];
+	__le32 echo;
+	__le32 reserved3;
+};
+
+
+/*
+ * The eth Rx Buffer Descriptor
+ */
+struct eth_rx_bd {
+	__le32 addr_lo;
+	__le32 addr_hi;
+};
+
+
+/*
+ * Eth Rx Cqe structure- general structure for ramrods
+ */
+struct common_ramrod_eth_rx_cqe {
+	u8 ramrod_type;
+#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x3<<0)
+#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<2)
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 2
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x1F<<3)
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 3
+	u8 conn_type;
+	__le16 reserved1;
+	__le32 conn_and_cmd_data;
+#define COMMON_RAMROD_ETH_RX_CQE_CID (0xFFFFFF<<0)
+#define COMMON_RAMROD_ETH_RX_CQE_CID_SHIFT 0
+#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID (0xFF<<24)
+#define COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT 24
+	struct ramrod_data protocol_data;
+	__le32 echo;
+	__le32 reserved2[11];
+};
+
+/*
+ * Rx Last CQE in page (in ETH)
+ */
+struct eth_rx_cqe_next_page {
+	__le32 addr_lo;
+	__le32 addr_hi;
+	__le32 reserved[14];
+};
+
+/*
+ * union for all eth rx cqe types (fix their sizes)
+ */
+union eth_rx_cqe {
+	struct eth_fast_path_rx_cqe fast_path_cqe;
+	struct common_ramrod_eth_rx_cqe ramrod_cqe;
+	struct eth_rx_cqe_next_page next_page_cqe;
+	struct eth_end_agg_rx_cqe end_agg_cqe;
+};
+
+
+/*
+ * Values for RX ETH CQE type field
+ */
+enum eth_rx_cqe_type {
+	RX_ETH_CQE_TYPE_ETH_FASTPATH,
+	RX_ETH_CQE_TYPE_ETH_RAMROD,
+	RX_ETH_CQE_TYPE_ETH_START_AGG,
+	RX_ETH_CQE_TYPE_ETH_STOP_AGG,
+	MAX_ETH_RX_CQE_TYPE
+};
+
+
+/*
+ * Type of SGL/Raw field in ETH RX fast path CQE
+ */
+enum eth_rx_fp_sel {
+	ETH_FP_CQE_REGULAR,
+	ETH_FP_CQE_RAW,
+	MAX_ETH_RX_FP_SEL
+};
+
+
+/*
+ * The eth Rx SGE Descriptor
+ */
+struct eth_rx_sge {
+	__le32 addr_lo;
+	__le32 addr_hi;
+};
+
+
+/*
+ * common data for all protocols
+ */
+struct spe_hdr {
+	__le32 conn_and_cmd_data;
+#define SPE_HDR_CID (0xFFFFFF<<0)
+#define SPE_HDR_CID_SHIFT 0
+#define SPE_HDR_CMD_ID (0xFF<<24)
+#define SPE_HDR_CMD_ID_SHIFT 24
+	__le16 type;
+#define SPE_HDR_CONN_TYPE (0xFF<<0)
+#define SPE_HDR_CONN_TYPE_SHIFT 0
+#define SPE_HDR_FUNCTION_ID (0xFF<<8)
+#define SPE_HDR_FUNCTION_ID_SHIFT 8
+	__le16 reserved1;
+};
+
+/*
+ * specific data for ethernet slow path element
+ */
+union eth_specific_data {
+	u8 protocol_data[8];
+	struct regpair client_update_ramrod_data;
+	struct regpair client_init_ramrod_init_data;
+	struct eth_halt_ramrod_data halt_ramrod_data;
+	struct regpair update_data_addr;
+	struct eth_common_ramrod_data common_ramrod_data;
+	struct regpair classify_cfg_addr;
+	struct regpair filter_cfg_addr;
+	struct regpair mcast_cfg_addr;
+};
+
+/*
+ * Ethernet slow path element
+ */
+struct eth_spe {
+	struct spe_hdr hdr;
+	union eth_specific_data data;
+};
+
+
+/*
+ * Ethernet command ID for slow path elements
+ */
+enum eth_spqe_cmd_id {
+	RAMROD_CMD_ID_ETH_UNUSED,
+	RAMROD_CMD_ID_ETH_CLIENT_SETUP,
+	RAMROD_CMD_ID_ETH_HALT,
+	RAMROD_CMD_ID_ETH_FORWARD_SETUP,
+	RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP,
+	RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
+	RAMROD_CMD_ID_ETH_EMPTY,
+	RAMROD_CMD_ID_ETH_TERMINATE,
+	RAMROD_CMD_ID_ETH_TPA_UPDATE,
+	RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES,
+	RAMROD_CMD_ID_ETH_FILTER_RULES,
+	RAMROD_CMD_ID_ETH_MULTICAST_RULES,
+	RAMROD_CMD_ID_ETH_RSS_UPDATE,
+	RAMROD_CMD_ID_ETH_SET_MAC,
+	MAX_ETH_SPQE_CMD_ID
+};
+
+
+/*
+ * eth tpa update command
+ */
+enum eth_tpa_update_command {
+	TPA_UPDATE_NONE_COMMAND,
+	TPA_UPDATE_ENABLE_COMMAND,
+	TPA_UPDATE_DISABLE_COMMAND,
+	MAX_ETH_TPA_UPDATE_COMMAND
+};
+
+
+/*
+ * Tx regular BD structure
+ */
+struct eth_tx_bd {
+	__le32 addr_lo;
+	__le32 addr_hi;
+	__le16 total_pkt_bytes;
+	__le16 nbytes;
+	u8 reserved[4];
+};
+
+
+/*
+ * structure for easy accessibility to assembler
+ */
+struct eth_tx_bd_flags {
+	u8 as_bitfield;
+#define ETH_TX_BD_FLAGS_IP_CSUM (0x1<<0)
+#define ETH_TX_BD_FLAGS_IP_CSUM_SHIFT 0
+#define ETH_TX_BD_FLAGS_L4_CSUM (0x1<<1)
+#define ETH_TX_BD_FLAGS_L4_CSUM_SHIFT 1
+#define ETH_TX_BD_FLAGS_VLAN_MODE (0x3<<2)
+#define ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT 2
+#define ETH_TX_BD_FLAGS_START_BD (0x1<<4)
+#define ETH_TX_BD_FLAGS_START_BD_SHIFT 4
+#define ETH_TX_BD_FLAGS_IS_UDP (0x1<<5)
+#define ETH_TX_BD_FLAGS_IS_UDP_SHIFT 5
+#define ETH_TX_BD_FLAGS_SW_LSO (0x1<<6)
+#define ETH_TX_BD_FLAGS_SW_LSO_SHIFT 6
+#define ETH_TX_BD_FLAGS_IPV6 (0x1<<7)
+#define ETH_TX_BD_FLAGS_IPV6_SHIFT 7
+};
+
+/*
+ * The eth Tx Buffer Descriptor
+ */
+struct eth_tx_start_bd {
+	__le32 addr_lo;
+	__le32 addr_hi;
+	__le16 nbd;
+	__le16 nbytes;
+	__le16 vlan_or_ethertype;
+	struct eth_tx_bd_flags bd_flags;
+	u8 general_data;
+#define ETH_TX_START_BD_HDR_NBDS (0xF<<0)
+#define ETH_TX_START_BD_HDR_NBDS_SHIFT 0
+#define ETH_TX_START_BD_FORCE_VLAN_MODE (0x1<<4)
+#define ETH_TX_START_BD_FORCE_VLAN_MODE_SHIFT 4
+#define ETH_TX_START_BD_RESREVED (0x1<<5)
+#define ETH_TX_START_BD_RESREVED_SHIFT 5
+#define ETH_TX_START_BD_ETH_ADDR_TYPE (0x3<<6)
+#define ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT 6
+};
+
+/*
+ * Tx parsing BD structure for ETH E1/E1h
+ */
+struct eth_tx_parse_bd_e1x {
+	u8 global_data;
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W (0xF<<0)
+#define ETH_TX_PARSE_BD_E1X_IP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_RESERVED0 (0x1<<4)
+#define ETH_TX_PARSE_BD_E1X_RESERVED0_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN (0x1<<5)
+#define ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN_SHIFT 5
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_NS_FLG (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_NS_FLG_SHIFT 7
+	u8 tcp_flags;
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG (0x1<<0)
+#define ETH_TX_PARSE_BD_E1X_FIN_FLG_SHIFT 0
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG (0x1<<1)
+#define ETH_TX_PARSE_BD_E1X_SYN_FLG_SHIFT 1
+#define ETH_TX_PARSE_BD_E1X_RST_FLG (0x1<<2)
+#define ETH_TX_PARSE_BD_E1X_RST_FLG_SHIFT 2
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG (0x1<<3)
+#define ETH_TX_PARSE_BD_E1X_PSH_FLG_SHIFT 3
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG (0x1<<4)
+#define ETH_TX_PARSE_BD_E1X_ACK_FLG_SHIFT 4
+#define ETH_TX_PARSE_BD_E1X_URG_FLG (0x1<<5)
+#define ETH_TX_PARSE_BD_E1X_URG_FLG_SHIFT 5
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG (0x1<<6)
+#define ETH_TX_PARSE_BD_E1X_ECE_FLG_SHIFT 6
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG (0x1<<7)
+#define ETH_TX_PARSE_BD_E1X_CWR_FLG_SHIFT 7
+	u8 ip_hlen_w;
+	s8 reserved;
+	__le16 total_hlen_w;
+	__le16 tcp_pseudo_csum;
+	__le16 lso_mss;
+	__le16 ip_id;
+	__le32 tcp_send_seq;
+};
+
+/*
+ * Tx parsing BD structure for ETH E2
+ */
+struct eth_tx_parse_bd_e2 {
+	__le16 dst_mac_addr_lo;
+	__le16 dst_mac_addr_mid;
+	__le16 dst_mac_addr_hi;
+	__le16 src_mac_addr_lo;
+	__le16 src_mac_addr_mid;
+	__le16 src_mac_addr_hi;
+	__le32 parsing_data;
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W (0x1FFF<<0)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT 0
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW (0xF<<13)
+#define ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT 13
+#define ETH_TX_PARSE_BD_E2_LSO_MSS (0x3FFF<<17)
+#define ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT 17
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR (0x1<<31)
+#define ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR_SHIFT 31
+};
+
+/*
+ * The last BD in the BD memory will hold a pointer to the next BD memory
+ */
+struct eth_tx_next_bd {
+	__le32 addr_lo;
+	__le32 addr_hi;
+	u8 reserved[8];
+};
+
+/*
+ * union for 4 Bd types
+ */
+union eth_tx_bd_types {
+	struct eth_tx_start_bd start_bd;
+	struct eth_tx_bd reg_bd;
+	struct eth_tx_parse_bd_e1x parse_bd_e1x;
+	struct eth_tx_parse_bd_e2 parse_bd_e2;
+	struct eth_tx_next_bd next_bd;
+};
+
+/*
+ * array of 13 bds as appears in the eth xstorm context
+ */
+struct eth_tx_bds_array {
+	union eth_tx_bd_types bds[13];
+};
+
+
+/*
+ * VLAN mode on TX BDs
+ */
+enum eth_tx_vlan_type {
+	X_ETH_NO_VLAN,
+	X_ETH_OUTBAND_VLAN,
+	X_ETH_INBAND_VLAN,
+	X_ETH_FW_ADDED_VLAN,
+	MAX_ETH_TX_VLAN_TYPE
+};
+
+
+/*
+ * Ethernet VLAN filtering mode in E1x
+ */
+enum eth_vlan_filter_mode {
+	ETH_VLAN_FILTER_ANY_VLAN,
+	ETH_VLAN_FILTER_SPECIFIC_VLAN,
+	ETH_VLAN_FILTER_CLASSIFY,
+	MAX_ETH_VLAN_FILTER_MODE
+};
+
+
+/*
+ * MAC filtering configuration command header
+ */
+struct mac_configuration_hdr {
+	u8 length;
+	u8 offset;
+	__le16 client_id;
+	__le32 echo;
+};
+
+/*
+ * MAC address in list for ramrod
+ */
+struct mac_configuration_entry {
+	__le16 lsb_mac_addr;
+	__le16 middle_mac_addr;
+	__le16 msb_mac_addr;
+	__le16 vlan_id;
+	u8 pf_id;
+	u8 flags;
+#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE (0x1<<0)
+#define MAC_CONFIGURATION_ENTRY_ACTION_TYPE_SHIFT 0
+#define MAC_CONFIGURATION_ENTRY_RDMA_MAC (0x1<<1)
+#define MAC_CONFIGURATION_ENTRY_RDMA_MAC_SHIFT 1
+#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE (0x3<<2)
+#define MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE_SHIFT 2
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL (0x1<<4)
+#define MAC_CONFIGURATION_ENTRY_OVERRIDE_VLAN_REMOVAL_SHIFT 4
+#define MAC_CONFIGURATION_ENTRY_BROADCAST (0x1<<5)
+#define MAC_CONFIGURATION_ENTRY_BROADCAST_SHIFT 5
+#define MAC_CONFIGURATION_ENTRY_RESERVED1 (0x3<<6)
+#define MAC_CONFIGURATION_ENTRY_RESERVED1_SHIFT 6
+	__le16 reserved0;
+	__le32 clients_bit_vector;
+};
+
+/*
+ * MAC filtering configuration command
+ */
+struct mac_configuration_cmd {
+	struct mac_configuration_hdr hdr;
+	struct mac_configuration_entry config_table[64];
+};
+
+
+/*
+ * Set-MAC command type (in E1x)
+ */
+enum set_mac_action_type {
+	T_ETH_MAC_COMMAND_INVALIDATE,
+	T_ETH_MAC_COMMAND_SET,
+	MAX_SET_MAC_ACTION_TYPE
+};
+
+
+/*
+ * tpa update ramrod data
+ */
+struct tpa_update_ramrod_data {
+	u8 update_ipv4;
+	u8 update_ipv6;
+	u8 client_id;
+	u8 max_tpa_queues;
+	u8 max_sges_for_packet;
+	u8 complete_on_both_clients;
+	__le16 reserved1;
+	__le16 sge_buff_size;
+	__le16 max_agg_size;
+	__le32 sge_page_base_lo;
+	__le32 sge_page_base_hi;
+	__le16 sge_pause_thr_low;
+	__le16 sge_pause_thr_high;
+};
+
+
+/*
+ * approximate-match multicast filtering for E1H per function in Tstorm
+ */
+struct tstorm_eth_approximate_match_multicast_filtering {
+	u32 mcast_add_hash_bit_array[8];
+};
+
+
+/*
+ * Common configuration parameters per function in Tstorm
+ */
+struct tstorm_eth_function_common_config {
+	__le16 config_flags;
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY (0x1<<0)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY_SHIFT 0
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY (0x1<<1)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY_SHIFT 1
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY (0x1<<2)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY_SHIFT 2
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY (0x1<<3)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY_SHIFT 3
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE (0x7<<4)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT 4
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE (0x1<<7)
+#define TSTORM_ETH_FUNCTION_COMMON_CONFIG_VLAN_FILTERING_ENABLE_SHIFT 7
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0 (0xFF<<8)
+#define __TSTORM_ETH_FUNCTION_COMMON_CONFIG_RESERVED0_SHIFT 8
+	u8 rss_result_mask;
+	u8 reserved1;
+	__le16 vlan_id[2];
+};
+
+
+/*
+ * MAC filtering configuration parameters per port in Tstorm
+ */
+struct tstorm_eth_mac_filter_config {
+	__le32 ucast_drop_all;
+	__le32 ucast_accept_all;
+	__le32 mcast_drop_all;
+	__le32 mcast_accept_all;
+	__le32 bcast_accept_all;
+	__le32 vlan_filter[2];
+	__le32 unmatched_unicast;
+};
+
+
+/*
+ * tx only queue init ramrod data
+ */
+struct tx_queue_init_ramrod_data {
+	struct client_init_general_data general;
+	struct client_init_tx_data tx;
+};
+
+
+/*
+ * Three RX producers for ETH
+ */
+struct ustorm_eth_rx_producers {
+#if defined(__BIG_ENDIAN)
+	u16 bd_prod;
+	u16 cqe_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cqe_prod;
+	u16 bd_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved;
+	u16 sge_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sge_prod;
+	u16 reserved;
+#endif
+};
+
+
+/*
+ * cfc delete event data
+ */
+struct cfc_del_event_data {
+	u32 cid;
+	u32 reserved0;
+	u32 reserved1;
+};
+
+
+/*
+ * per-port SAFC demo variables
+ */
+struct cmng_flags_per_port {
+	u32 cmng_enables;
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN (0x1<<0)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_VN_SHIFT 0
+#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN (0x1<<1)
+#define CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN_SHIFT 1
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS (0x1<<2)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_SHIFT 2
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE (0x1<<3)
+#define CMNG_FLAGS_PER_PORT_FAIRNESS_COS_MODE_SHIFT 3
+#define __CMNG_FLAGS_PER_PORT_RESERVED0 (0xFFFFFFF<<4)
+#define __CMNG_FLAGS_PER_PORT_RESERVED0_SHIFT 4
+	u32 __reserved1;
+};
+
+
+/*
+ * per-port rate shaping variables
+ */
+struct rate_shaping_vars_per_port {
+	u32 rs_periodic_timeout;
+	u32 rs_threshold;
+};
+
+/*
+ * per-port fairness variables
+ */
+struct fairness_vars_per_port {
+	u32 upper_bound;
+	u32 fair_threshold;
+	u32 fairness_timeout;
+	u32 reserved0;
+};
+
+/*
+ * per-port SAFC variables
+ */
+struct safc_struct_per_port {
+#if defined(__BIG_ENDIAN)
+	u16 __reserved1;
+	u8 __reserved0;
+	u8 safc_timeout_usec;
+#elif defined(__LITTLE_ENDIAN)
+	u8 safc_timeout_usec;
+	u8 __reserved0;
+	u16 __reserved1;
+#endif
+	u8 cos_to_traffic_types[MAX_COS_NUMBER];
+	u16 cos_to_pause_mask[NUM_OF_SAFC_BITS];
+};
+
+/*
+ * Per-port congestion management variables
+ */
+struct cmng_struct_per_port {
+	struct rate_shaping_vars_per_port rs_vars;
+	struct fairness_vars_per_port fair_vars;
+	struct safc_struct_per_port safc_vars;
+	struct cmng_flags_per_port flags;
+};
+
+
+/*
+ * Protocol-common command ID for slow path elements
+ */
+enum common_spqe_cmd_id {
+	RAMROD_CMD_ID_COMMON_UNUSED,
+	RAMROD_CMD_ID_COMMON_FUNCTION_START,
+	RAMROD_CMD_ID_COMMON_FUNCTION_STOP,
+	RAMROD_CMD_ID_COMMON_CFC_DEL,
+	RAMROD_CMD_ID_COMMON_CFC_DEL_WB,
+	RAMROD_CMD_ID_COMMON_STAT_QUERY,
+	RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
+	RAMROD_CMD_ID_COMMON_START_TRAFFIC,
+	RAMROD_CMD_ID_COMMON_RESERVED1,
+	RAMROD_CMD_ID_COMMON_RESERVED2,
+	MAX_COMMON_SPQE_CMD_ID
+};
+
+
+/*
+ * Per-protocol connection types
+ */
+enum connection_type {
+	ETH_CONNECTION_TYPE,
+	TOE_CONNECTION_TYPE,
+	RDMA_CONNECTION_TYPE,
+	ISCSI_CONNECTION_TYPE,
+	FCOE_CONNECTION_TYPE,
+	RESERVED_CONNECTION_TYPE_0,
+	RESERVED_CONNECTION_TYPE_1,
+	RESERVED_CONNECTION_TYPE_2,
+	NONE_CONNECTION_TYPE,
+	MAX_CONNECTION_TYPE
+};
+
+
+/*
+ * Cos modes
+ */
+enum cos_mode {
+	OVERRIDE_COS,
+	STATIC_COS,
+	FW_WRR,
+	MAX_COS_MODE
+};
+
+
+/*
+ * Dynamic HC counters set by the driver
+ */
+struct hc_dynamic_drv_counter {
+	u32 val[HC_SB_MAX_DYNAMIC_INDICES];
+};
+
+/*
+ * zone A per-queue data
+ */
+struct cstorm_queue_zone_data {
+	struct hc_dynamic_drv_counter hc_dyn_drv_cnt;
+	struct regpair reserved[2];
+};
+
+
+/*
+ * Vf-PF channel data in cstorm ram (non-triggered zone)
+ */
+struct vf_pf_channel_zone_data {
+	u32 msg_addr_lo;
+	u32 msg_addr_hi;
+};
+
+/*
+ * zone for VF non-triggered data
+ */
+struct non_trigger_vf_zone {
+	struct vf_pf_channel_zone_data vf_pf_channel;
+};
+
+/*
+ * Vf-PF channel trigger zone in cstorm ram
+ */
+struct vf_pf_channel_zone_trigger {
+	u8 addr_valid;
+};
+
+/*
+ * zone that triggers the in-bound interrupt
+ */
+struct trigger_vf_zone {
+#if defined(__BIG_ENDIAN)
+	u16 reserved1;
+	u8 reserved0;
+	struct vf_pf_channel_zone_trigger vf_pf_channel;
+#elif defined(__LITTLE_ENDIAN)
+	struct vf_pf_channel_zone_trigger vf_pf_channel;
+	u8 reserved0;
+	u16 reserved1;
+#endif
+	u32 reserved2;
+};
+
+/*
+ * zone B per-VF data
+ */
+struct cstorm_vf_zone_data {
+	struct non_trigger_vf_zone non_trigger;
+	struct trigger_vf_zone trigger;
+};
+
+
+/*
+ * Dynamic host coalescing init parameters, per state machine
+ */
+struct dynamic_hc_sm_config {
+	u32 threshold[3];
+	u8 shift_per_protocol[HC_SB_MAX_DYNAMIC_INDICES];
+	u8 hc_timeout0[HC_SB_MAX_DYNAMIC_INDICES];
+	u8 hc_timeout1[HC_SB_MAX_DYNAMIC_INDICES];
+	u8 hc_timeout2[HC_SB_MAX_DYNAMIC_INDICES];
+	u8 hc_timeout3[HC_SB_MAX_DYNAMIC_INDICES];
+};
+
+/*
+ * Dynamic host coalescing init parameters
+ */
+struct dynamic_hc_config {
+	struct dynamic_hc_sm_config sm_config[HC_SB_MAX_SM];
+};
+
+
+struct e2_integ_data {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0)
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1)
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2)
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3)
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4)
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5)
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+	u8 cos;
+	u8 voq;
+	u8 pbf_queue;
+#elif defined(__LITTLE_ENDIAN)
+	u8 pbf_queue;
+	u8 voq;
+	u8 cos;
+	u8 flags;
+#define E2_INTEG_DATA_TESTING_EN (0x1<<0)
+#define E2_INTEG_DATA_TESTING_EN_SHIFT 0
+#define E2_INTEG_DATA_LB_TX (0x1<<1)
+#define E2_INTEG_DATA_LB_TX_SHIFT 1
+#define E2_INTEG_DATA_COS_TX (0x1<<2)
+#define E2_INTEG_DATA_COS_TX_SHIFT 2
+#define E2_INTEG_DATA_OPPORTUNISTICQM (0x1<<3)
+#define E2_INTEG_DATA_OPPORTUNISTICQM_SHIFT 3
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ (0x1<<4)
+#define E2_INTEG_DATA_DPMTESTRELEASEDQ_SHIFT 4
+#define E2_INTEG_DATA_RESERVED (0x7<<5)
+#define E2_INTEG_DATA_RESERVED_SHIFT 5
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u8 reserved2;
+	u8 ramEn;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ramEn;
+	u8 reserved2;
+	u16 reserved3;
+#endif
+};
+
+
+/*
+ * set mac event data
+ */
+struct eth_event_data {
+	u32 echo;
+	u32 reserved0;
+	u32 reserved1;
+};
+
+
+/*
+ * pf-vf event data
+ */
+struct vf_pf_event_data {
+	u8 vf_id;
+	u8 reserved0;
+	u16 reserved1;
+	u32 msg_addr_lo;
+	u32 msg_addr_hi;
+};
+
+/*
+ * VF FLR event data
+ */
+struct vf_flr_event_data {
+	u8 vf_id;
+	u8 reserved0;
+	u16 reserved1;
+	u32 reserved2;
+	u32 reserved3;
+};
+
+/*
+ * malicious VF event data
+ */
+struct malicious_vf_event_data {
+	u8 vf_id;
+	u8 reserved0;
+	u16 reserved1;
+	u32 reserved2;
+	u32 reserved3;
+};
+
+/*
+ * union for all event ring message types
+ */
+union event_data {
+	struct vf_pf_event_data vf_pf_event;
+	struct eth_event_data eth_event;
+	struct cfc_del_event_data cfc_del_event;
+	struct vf_flr_event_data vf_flr_event;
+	struct malicious_vf_event_data malicious_vf_event;
+};
+
+
+/*
+ * per PF event ring data
+ */
+struct event_ring_data {
+	struct regpair base_addr;
+#if defined(__BIG_ENDIAN)
+	u8 index_id;
+	u8 sb_id;
+	u16 producer;
+#elif defined(__LITTLE_ENDIAN)
+	u16 producer;
+	u8 sb_id;
+	u8 index_id;
+#endif
+	u32 reserved0;
+};
+
+
+/*
+ * event ring message element (each element is 128 bits)
+ */
+struct event_ring_msg {
+	u8 opcode;
+	u8 error;
+	u16 reserved1;
+	union event_data data;
+};
+
+/*
+ * event ring next page element (128 bits)
+ */
+struct event_ring_next {
+	struct regpair addr;
+	u32 reserved[2];
+};
+
+/*
+ * union for event ring element types (each element is 128 bits)
+ */
+union event_ring_elem {
+	struct event_ring_msg message;
+	struct event_ring_next next_page;
+};
+
+
+/*
+ * Common event ring opcodes
+ */
+enum event_ring_opcode {
+	EVENT_RING_OPCODE_VF_PF_CHANNEL,
+	EVENT_RING_OPCODE_FUNCTION_START,
+	EVENT_RING_OPCODE_FUNCTION_STOP,
+	EVENT_RING_OPCODE_CFC_DEL,
+	EVENT_RING_OPCODE_CFC_DEL_WB,
+	EVENT_RING_OPCODE_STAT_QUERY,
+	EVENT_RING_OPCODE_STOP_TRAFFIC,
+	EVENT_RING_OPCODE_START_TRAFFIC,
+	EVENT_RING_OPCODE_VF_FLR,
+	EVENT_RING_OPCODE_MALICIOUS_VF,
+	EVENT_RING_OPCODE_FORWARD_SETUP,
+	EVENT_RING_OPCODE_RSS_UPDATE_RULES,
+	EVENT_RING_OPCODE_RESERVED1,
+	EVENT_RING_OPCODE_RESERVED2,
+	EVENT_RING_OPCODE_SET_MAC,
+	EVENT_RING_OPCODE_CLASSIFICATION_RULES,
+	EVENT_RING_OPCODE_FILTERS_RULES,
+	EVENT_RING_OPCODE_MULTICAST_RULES,
+	MAX_EVENT_RING_OPCODE
+};
+
+
+/*
+ * Modes for fairness algorithm
+ */
+enum fairness_mode {
+	FAIRNESS_COS_WRR_MODE,
+	FAIRNESS_COS_ETS_MODE,
+	MAX_FAIRNESS_MODE
+};
+
+
+/*
+ * per-vnic fairness variables
+ */
+struct fairness_vars_per_vn {
+	u32 cos_credit_delta[MAX_COS_NUMBER];
+	u32 vn_credit_delta;
+	u32 __reserved0;
+};
+
+
+/*
+ * Priority and cos
+ */
+struct priority_cos {
+	u8 priority;
+	u8 cos;
+	__le16 reserved1;
+};
+
+/*
+ * The data for flow control configuration
+ */
+struct flow_control_configuration {
+	struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
+	u8 dcb_enabled;
+	u8 dcb_version;
+	u8 dont_add_pri_0_en;
+	u8 reserved1;
+	__le32 reserved2;
+};
+
+
+/*
+ *
+ */
+struct function_start_data {
+	__le16 function_mode;
+	__le16 sd_vlan_tag;
+	u16 reserved;
+	u8 path_id;
+	u8 network_cos_mode;
+};
+
+
+/*
+ * FW version stored in the Xstorm RAM
+ */
+struct fw_version {
+#if defined(__BIG_ENDIAN)
+	u8 engineering;
+	u8 revision;
+	u8 minor;
+	u8 major;
+#elif defined(__LITTLE_ENDIAN)
+	u8 major;
+	u8 minor;
+	u8 revision;
+	u8 engineering;
+#endif
+	u32 flags;
+#define FW_VERSION_OPTIMIZED (0x1<<0)
+#define FW_VERSION_OPTIMIZED_SHIFT 0
+#define FW_VERSION_BIG_ENDIEN (0x1<<1)
+#define FW_VERSION_BIG_ENDIEN_SHIFT 1
+#define FW_VERSION_CHIP_VERSION (0x3<<2)
+#define FW_VERSION_CHIP_VERSION_SHIFT 2
+#define __FW_VERSION_RESERVED (0xFFFFFFF<<4)
+#define __FW_VERSION_RESERVED_SHIFT 4
+};
+
+
+/*
+ * Dynamic Host-Coalescing - Driver(host) counters
+ */
+struct hc_dynamic_sb_drv_counters {
+	u32 dynamic_hc_drv_counter[HC_SB_MAX_DYNAMIC_INDICES];
+};
+
+
+/*
+ * 2 bytes. configuration/state parameters for a single protocol index
+ */
+struct hc_index_data {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define HC_INDEX_DATA_SM_ID (0x1<<0)
+#define HC_INDEX_DATA_SM_ID_SHIFT 0
+#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
+#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
+#define HC_INDEX_DATA_RESERVE (0x1F<<3)
+#define HC_INDEX_DATA_RESERVE_SHIFT 3
+	u8 timeout;
+#elif defined(__LITTLE_ENDIAN)
+	u8 timeout;
+	u8 flags;
+#define HC_INDEX_DATA_SM_ID (0x1<<0)
+#define HC_INDEX_DATA_SM_ID_SHIFT 0
+#define HC_INDEX_DATA_HC_ENABLED (0x1<<1)
+#define HC_INDEX_DATA_HC_ENABLED_SHIFT 1
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED (0x1<<2)
+#define HC_INDEX_DATA_DYNAMIC_HC_ENABLED_SHIFT 2
+#define HC_INDEX_DATA_RESERVE (0x1F<<3)
+#define HC_INDEX_DATA_RESERVE_SHIFT 3
+#endif
+};
+
+
+/*
+ * HC state-machine
+ */
+struct hc_status_block_sm {
+#if defined(__BIG_ENDIAN)
+	u8 igu_seg_id;
+	u8 igu_sb_id;
+	u8 timer_value;
+	u8 __flags;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __flags;
+	u8 timer_value;
+	u8 igu_sb_id;
+	u8 igu_seg_id;
+#endif
+	u32 time_to_expire;
+};
+
+/*
+ * hold PCI identification variables- used in various places in firmware
+ */
+struct pci_entity {
+#if defined(__BIG_ENDIAN)
+	u8 vf_valid;
+	u8 vf_id;
+	u8 vnic_id;
+	u8 pf_id;
+#elif defined(__LITTLE_ENDIAN)
+	u8 pf_id;
+	u8 vnic_id;
+	u8 vf_id;
+	u8 vf_valid;
+#endif
+};
+
+/*
+ * The fast-path status block meta-data, common to all chips
+ */
+struct hc_sb_data {
+	struct regpair host_sb_addr;
+	struct hc_status_block_sm state_machine[HC_SB_MAX_SM];
+	struct pci_entity p_func;
+#if defined(__BIG_ENDIAN)
+	u8 rsrv0;
+	u8 state;
+	u8 dhc_qzone_id;
+	u8 same_igu_sb_1b;
+#elif defined(__LITTLE_ENDIAN)
+	u8 same_igu_sb_1b;
+	u8 dhc_qzone_id;
+	u8 state;
+	u8 rsrv0;
+#endif
+	struct regpair rsrv1[2];
+};
+
+
+/*
+ * Segment types for host coaslescing
+ */
+enum hc_segment {
+	HC_REGULAR_SEGMENT,
+	HC_DEFAULT_SEGMENT,
+	MAX_HC_SEGMENT
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_sp_status_block_data {
+	struct regpair host_sb_addr;
+#if defined(__BIG_ENDIAN)
+	u8 rsrv1;
+	u8 state;
+	u8 igu_seg_id;
+	u8 igu_sb_id;
+#elif defined(__LITTLE_ENDIAN)
+	u8 igu_sb_id;
+	u8 igu_seg_id;
+	u8 state;
+	u8 rsrv1;
+#endif
+	struct pci_entity p_func;
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_status_block_data_e1x {
+	struct hc_index_data index_data[HC_SB_MAX_INDICES_E1X];
+	struct hc_sb_data common;
+};
+
+
+/*
+ * The fast-path status block meta-data
+ */
+struct hc_status_block_data_e2 {
+	struct hc_index_data index_data[HC_SB_MAX_INDICES_E2];
+	struct hc_sb_data common;
+};
+
+
+/*
+ * IGU block operartion modes (in Everest2)
+ */
+enum igu_mode {
+	HC_IGU_BC_MODE,
+	HC_IGU_NBC_MODE,
+	MAX_IGU_MODE
+};
+
+
+/*
+ * IP versions
+ */
+enum ip_ver {
+	IP_V4,
+	IP_V6,
+	MAX_IP_VER
+};
+
+
+/*
+ * Multi-function modes
+ */
+enum mf_mode {
+	SINGLE_FUNCTION,
+	MULTI_FUNCTION_SD,
+	MULTI_FUNCTION_SI,
+	MULTI_FUNCTION_RESERVED,
+	MAX_MF_MODE
+};
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per pf)
+ */
+struct tstorm_per_pf_stats {
+	struct regpair rcv_error_bytes;
+};
+
+/*
+ *
+ */
+struct per_pf_stats {
+	struct tstorm_per_pf_stats tstorm_pf_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per port)
+ */
+struct tstorm_per_port_stats {
+	__le32 mac_discard;
+	__le32 mac_filter_discard;
+	__le32 brb_truncate_discard;
+	__le32 mf_tag_discard;
+	__le32 packet_drop;
+	__le32 reserved;
+};
+
+/*
+ *
+ */
+struct per_port_stats {
+	struct tstorm_per_port_stats tstorm_port_statistics;
+};
+
+
+/*
+ * Protocol-common statistics collected by the Tstorm (per client)
+ */
+struct tstorm_per_queue_stats {
+	struct regpair rcv_ucast_bytes;
+	__le32 rcv_ucast_pkts;
+	__le32 checksum_discard;
+	struct regpair rcv_bcast_bytes;
+	__le32 rcv_bcast_pkts;
+	__le32 pkts_too_big_discard;
+	struct regpair rcv_mcast_bytes;
+	__le32 rcv_mcast_pkts;
+	__le32 ttl0_discard;
+	__le16 no_buff_discard;
+	__le16 reserved0;
+	__le32 reserved1;
+};
+
+/*
+ * Protocol-common statistics collected by the Ustorm (per client)
+ */
+struct ustorm_per_queue_stats {
+	struct regpair ucast_no_buff_bytes;
+	struct regpair mcast_no_buff_bytes;
+	struct regpair bcast_no_buff_bytes;
+	__le32 ucast_no_buff_pkts;
+	__le32 mcast_no_buff_pkts;
+	__le32 bcast_no_buff_pkts;
+	__le32 coalesced_pkts;
+	struct regpair coalesced_bytes;
+	__le32 coalesced_events;
+	__le32 coalesced_aborts;
+};
+
+/*
+ * Protocol-common statistics collected by the Xstorm (per client)
+ */
+struct xstorm_per_queue_stats {
+	struct regpair ucast_bytes_sent;
+	struct regpair mcast_bytes_sent;
+	struct regpair bcast_bytes_sent;
+	__le32 ucast_pkts_sent;
+	__le32 mcast_pkts_sent;
+	__le32 bcast_pkts_sent;
+	__le32 error_drop_pkts;
+};
+
+/*
+ *
+ */
+struct per_queue_stats {
+	struct tstorm_per_queue_stats tstorm_queue_statistics;
+	struct ustorm_per_queue_stats ustorm_queue_statistics;
+	struct xstorm_per_queue_stats xstorm_queue_statistics;
+};
+
+
+/*
+ * FW version stored in first line of pram
+ */
+struct pram_fw_version {
+	u8 major;
+	u8 minor;
+	u8 revision;
+	u8 engineering;
+	u8 flags;
+#define PRAM_FW_VERSION_OPTIMIZED (0x1<<0)
+#define PRAM_FW_VERSION_OPTIMIZED_SHIFT 0
+#define PRAM_FW_VERSION_STORM_ID (0x3<<1)
+#define PRAM_FW_VERSION_STORM_ID_SHIFT 1
+#define PRAM_FW_VERSION_BIG_ENDIEN (0x1<<3)
+#define PRAM_FW_VERSION_BIG_ENDIEN_SHIFT 3
+#define PRAM_FW_VERSION_CHIP_VERSION (0x3<<4)
+#define PRAM_FW_VERSION_CHIP_VERSION_SHIFT 4
+#define __PRAM_FW_VERSION_RESERVED0 (0x3<<6)
+#define __PRAM_FW_VERSION_RESERVED0_SHIFT 6
+};
+
+
+/*
+ * Ethernet slow path element
+ */
+union protocol_common_specific_data {
+	u8 protocol_data[8];
+	struct regpair phy_address;
+	struct regpair mac_config_addr;
+};
+
+/*
+ * The send queue element
+ */
+struct protocol_common_spe {
+	struct spe_hdr hdr;
+	union protocol_common_specific_data data;
+};
+
+
+/*
+ * a single rate shaping counter. can be used as protocol or vnic counter
+ */
+struct rate_shaping_counter {
+	u32 quota;
+#if defined(__BIG_ENDIAN)
+	u16 __reserved0;
+	u16 rate;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rate;
+	u16 __reserved0;
+#endif
+};
+
+
+/*
+ * per-vnic rate shaping variables
+ */
+struct rate_shaping_vars_per_vn {
+	struct rate_shaping_counter vn_counter;
+};
+
+
+/*
+ * The send queue element
+ */
+struct slow_path_element {
+	struct spe_hdr hdr;
+	struct regpair protocol_data;
+};
+
+
+/*
+ * Protocol-common statistics counter
+ */
+struct stats_counter {
+	__le16 xstats_counter;
+	__le16 reserved0;
+	__le32 reserved1;
+	__le16 tstats_counter;
+	__le16 reserved2;
+	__le32 reserved3;
+	__le16 ustats_counter;
+	__le16 reserved4;
+	__le32 reserved5;
+	__le16 cstats_counter;
+	__le16 reserved6;
+	__le32 reserved7;
+};
+
+
+/*
+ *
+ */
+struct stats_query_entry {
+	u8 kind;
+	u8 index;
+	__le16 funcID;
+	__le32 reserved;
+	struct regpair address;
+};
+
+/*
+ * statistic command
+ */
+struct stats_query_cmd_group {
+	struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+};
+
+
+/*
+ * statistic command header
+ */
+struct stats_query_header {
+	u8 cmd_num;
+	u8 reserved0;
+	__le16 drv_stats_counter;
+	__le32 reserved1;
+	struct regpair stats_counters_addrs;
+};
+
+
+/*
+ * Types of statistcis query entry
+ */
+enum stats_query_type {
+	STATS_TYPE_QUEUE,
+	STATS_TYPE_PORT,
+	STATS_TYPE_PF,
+	STATS_TYPE_TOE,
+	STATS_TYPE_FCOE,
+	MAX_STATS_QUERY_TYPE
+};
+
+
+/*
+ * Indicate of the function status block state
+ */
+enum status_block_state {
+	SB_DISABLED,
+	SB_ENABLED,
+	SB_CLEANED,
+	MAX_STATUS_BLOCK_STATE
+};
+
+
+/*
+ * Storm IDs (including attentions for IGU related enums)
+ */
+enum storm_id {
+	USTORM_ID,
+	CSTORM_ID,
+	XSTORM_ID,
+	TSTORM_ID,
+	ATTENTION_ID,
+	MAX_STORM_ID
+};
+
+
+/*
+ * Taffic types used in ETS and flow control algorithms
+ */
+enum traffic_type {
+	LLFC_TRAFFIC_TYPE_NW,
+	LLFC_TRAFFIC_TYPE_FCOE,
+	LLFC_TRAFFIC_TYPE_ISCSI,
+	MAX_TRAFFIC_TYPE
+};
+
+
+/*
+ * zone A per-queue data
+ */
+struct tstorm_queue_zone_data {
+	struct regpair reserved[4];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct tstorm_vf_zone_data {
+	struct regpair reserved;
+};
+
+
+/*
+ * zone A per-queue data
+ */
+struct ustorm_queue_zone_data {
+	struct ustorm_eth_rx_producers eth_rx_producers;
+	struct regpair reserved[3];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct ustorm_vf_zone_data {
+	struct regpair reserved;
+};
+
+
+/*
+ * data per VF-PF channel
+ */
+struct vf_pf_channel_data {
+#if defined(__BIG_ENDIAN)
+	u16 reserved0;
+	u8 valid;
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 valid;
+	u16 reserved0;
+#endif
+	u32 reserved1;
+};
+
+
+/*
+ * State of VF-PF channel
+ */
+enum vf_pf_channel_state {
+	VF_PF_CHANNEL_STATE_READY,
+	VF_PF_CHANNEL_STATE_WAITING_FOR_ACK,
+	MAX_VF_PF_CHANNEL_STATE
+};
+
+
+/*
+ * zone A per-queue data
+ */
+struct xstorm_queue_zone_data {
+	struct regpair reserved[4];
+};
+
+
+/*
+ * zone B per-VF data
+ */
+struct xstorm_vf_zone_data {
+	struct regpair reserved;
+};
+
+#endif /* BNX2X_HSI_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
new file mode 100644
index 0000000..4d748e7
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -0,0 +1,567 @@
+/* bnx2x_init.h: Broadcom Everest network driver.
+ *               Structures and macroes needed during the initialization.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Modified by: Vladislav Zolotarov <vladz@broadcom.com>
+ */
+
+#ifndef BNX2X_INIT_H
+#define BNX2X_INIT_H
+
+/* Init operation types and structures */
+enum {
+	OP_RD = 0x1,	/* read a single register */
+	OP_WR,		/* write a single register */
+	OP_SW,		/* copy a string to the device */
+	OP_ZR,		/* clear memory */
+	OP_ZP,		/* unzip then copy with DMAE */
+	OP_WR_64,	/* write 64 bit pattern */
+	OP_WB,		/* copy a string using DMAE */
+	OP_WB_ZR,	/* Clear a string using DMAE or indirect-wr */
+	/* Skip the following ops if all of the init modes don't match */
+	OP_IF_MODE_OR,
+	/* Skip the following ops if any of the init modes don't match */
+	OP_IF_MODE_AND,
+	OP_MAX
+};
+
+enum {
+	STAGE_START,
+	STAGE_END,
+};
+
+/* Returns the index of start or end of a specific block stage in ops array*/
+#define BLOCK_OPS_IDX(block, stage, end) \
+	(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+
+
+/* structs for the various opcodes */
+struct raw_op {
+	u32 op:8;
+	u32 offset:24;
+	u32 raw_data;
+};
+
+struct op_read {
+	u32 op:8;
+	u32 offset:24;
+	u32 val;
+};
+
+struct op_write {
+	u32 op:8;
+	u32 offset:24;
+	u32 val;
+};
+
+struct op_arr_write {
+	u32 op:8;
+	u32 offset:24;
+#ifdef __BIG_ENDIAN
+	u16 data_len;
+	u16 data_off;
+#else /* __LITTLE_ENDIAN */
+	u16 data_off;
+	u16 data_len;
+#endif
+};
+
+struct op_zero {
+	u32 op:8;
+	u32 offset:24;
+	u32 len;
+};
+
+struct op_if_mode {
+	u32 op:8;
+	u32 cmd_offset:24;
+	u32 mode_bit_map;
+};
+
+
+union init_op {
+	struct op_read		read;
+	struct op_write		write;
+	struct op_arr_write	arr_wr;
+	struct op_zero		zero;
+	struct raw_op		raw;
+	struct op_if_mode	if_mode;
+};
+
+
+/* Init Phases */
+enum {
+	PHASE_COMMON,
+	PHASE_PORT0,
+	PHASE_PORT1,
+	PHASE_PF0,
+	PHASE_PF1,
+	PHASE_PF2,
+	PHASE_PF3,
+	PHASE_PF4,
+	PHASE_PF5,
+	PHASE_PF6,
+	PHASE_PF7,
+	NUM_OF_INIT_PHASES
+};
+
+/* Init Modes */
+enum {
+	MODE_ASIC                      = 0x00000001,
+	MODE_FPGA                      = 0x00000002,
+	MODE_EMUL                      = 0x00000004,
+	MODE_E2                        = 0x00000008,
+	MODE_E3                        = 0x00000010,
+	MODE_PORT2                     = 0x00000020,
+	MODE_PORT4                     = 0x00000040,
+	MODE_SF                        = 0x00000080,
+	MODE_MF                        = 0x00000100,
+	MODE_MF_SD                     = 0x00000200,
+	MODE_MF_SI                     = 0x00000400,
+	MODE_MF_NIV                    = 0x00000800,
+	MODE_E3_A0                     = 0x00001000,
+	MODE_E3_B0                     = 0x00002000,
+	MODE_COS3                      = 0x00004000,
+	MODE_COS6                      = 0x00008000,
+	MODE_LITTLE_ENDIAN             = 0x00010000,
+	MODE_BIG_ENDIAN                = 0x00020000,
+};
+
+/* Init Blocks */
+enum {
+	BLOCK_ATC,
+	BLOCK_BRB1,
+	BLOCK_CCM,
+	BLOCK_CDU,
+	BLOCK_CFC,
+	BLOCK_CSDM,
+	BLOCK_CSEM,
+	BLOCK_DBG,
+	BLOCK_DMAE,
+	BLOCK_DORQ,
+	BLOCK_HC,
+	BLOCK_IGU,
+	BLOCK_MISC,
+	BLOCK_NIG,
+	BLOCK_PBF,
+	BLOCK_PGLUE_B,
+	BLOCK_PRS,
+	BLOCK_PXP2,
+	BLOCK_PXP,
+	BLOCK_QM,
+	BLOCK_SRC,
+	BLOCK_TCM,
+	BLOCK_TM,
+	BLOCK_TSDM,
+	BLOCK_TSEM,
+	BLOCK_UCM,
+	BLOCK_UPB,
+	BLOCK_USDM,
+	BLOCK_USEM,
+	BLOCK_XCM,
+	BLOCK_XPB,
+	BLOCK_XSDM,
+	BLOCK_XSEM,
+	BLOCK_MISC_AEU,
+	NUM_OF_INIT_BLOCKS
+};
+
+/* QM queue numbers */
+#define BNX2X_ETH_Q		0
+#define BNX2X_TOE_Q		3
+#define BNX2X_TOE_ACK_Q		6
+#define BNX2X_ISCSI_Q		9
+#define BNX2X_ISCSI_ACK_Q	11
+#define BNX2X_FCOE_Q		10
+
+/* Vnics per mode */
+#define BNX2X_PORT2_MODE_NUM_VNICS 4
+#define BNX2X_PORT4_MODE_NUM_VNICS 2
+
+/* COS offset for port1 in E3 B0 4port mode */
+#define BNX2X_E3B0_PORT1_COS_OFFSET 3
+
+/* QM Register addresses */
+#define BNX2X_Q_VOQ_REG_ADDR(pf_q_num)\
+	(QM_REG_QVOQIDX_0 + 4 * (pf_q_num))
+#define BNX2X_VOQ_Q_REG_ADDR(cos, pf_q_num)\
+	(QM_REG_VOQQMASK_0_LSB + 4 * ((cos) * 2 + ((pf_q_num) >> 5)))
+#define BNX2X_Q_CMDQ_REG_ADDR(pf_q_num)\
+	(QM_REG_BYTECRDCMDQ_0 + 4 * ((pf_q_num) >> 4))
+
+/* extracts the QM queue number for the specified port and vnic */
+#define BNX2X_PF_Q_NUM(q_num, port, vnic)\
+	((((port) << 1) | (vnic)) * 16 + (q_num))
+
+
+/* Maps the specified queue to the specified COS */
+static inline void bnx2x_map_q_cos(struct bnx2x *bp, u32 q_num, u32 new_cos)
+{
+	/* find current COS mapping */
+	u32 curr_cos = REG_RD(bp, QM_REG_QVOQIDX_0 + q_num * 4);
+
+	/* check if queue->COS mapping has changed */
+	if (curr_cos != new_cos) {
+		u32 num_vnics = BNX2X_PORT2_MODE_NUM_VNICS;
+		u32 reg_addr, reg_bit_map, vnic;
+
+		/* update parameters for 4port mode */
+		if (INIT_MODE_FLAGS(bp) & MODE_PORT4) {
+			num_vnics = BNX2X_PORT4_MODE_NUM_VNICS;
+			if (BP_PORT(bp)) {
+				curr_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
+				new_cos += BNX2X_E3B0_PORT1_COS_OFFSET;
+			}
+		}
+
+		/* change queue mapping for each VNIC */
+		for (vnic = 0; vnic < num_vnics; vnic++) {
+			u32 pf_q_num =
+				BNX2X_PF_Q_NUM(q_num, BP_PORT(bp), vnic);
+			u32 q_bit_map = 1 << (pf_q_num & 0x1f);
+
+			/* overwrite queue->VOQ mapping */
+			REG_WR(bp, BNX2X_Q_VOQ_REG_ADDR(pf_q_num), new_cos);
+
+			/* clear queue bit from current COS bit map */
+			reg_addr = BNX2X_VOQ_Q_REG_ADDR(curr_cos, pf_q_num);
+			reg_bit_map = REG_RD(bp, reg_addr);
+			REG_WR(bp, reg_addr, reg_bit_map & (~q_bit_map));
+
+			/* set queue bit in new COS bit map */
+			reg_addr = BNX2X_VOQ_Q_REG_ADDR(new_cos, pf_q_num);
+			reg_bit_map = REG_RD(bp, reg_addr);
+			REG_WR(bp, reg_addr, reg_bit_map | q_bit_map);
+
+			/* set/clear queue bit in command-queue bit map
+			(E2/E3A0 only, valid COS values are 0/1) */
+			if (!(INIT_MODE_FLAGS(bp) & MODE_E3_B0)) {
+				reg_addr = BNX2X_Q_CMDQ_REG_ADDR(pf_q_num);
+				reg_bit_map = REG_RD(bp, reg_addr);
+				q_bit_map = 1 << (2 * (pf_q_num & 0xf));
+				reg_bit_map = new_cos ?
+					      (reg_bit_map | q_bit_map) :
+					      (reg_bit_map & (~q_bit_map));
+				REG_WR(bp, reg_addr, reg_bit_map);
+			}
+		}
+	}
+}
+
+/* Configures the QM according to the specified per-traffic-type COSes */
+static inline void bnx2x_dcb_config_qm(struct bnx2x *bp, enum cos_mode mode,
+				       struct priority_cos *traffic_cos)
+{
+	bnx2x_map_q_cos(bp, BNX2X_FCOE_Q,
+			traffic_cos[LLFC_TRAFFIC_TYPE_FCOE].cos);
+	bnx2x_map_q_cos(bp, BNX2X_ISCSI_Q,
+			traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+	bnx2x_map_q_cos(bp, BNX2X_ISCSI_ACK_Q,
+		traffic_cos[LLFC_TRAFFIC_TYPE_ISCSI].cos);
+	if (mode != STATIC_COS) {
+		/* required only in backward compatible COS mode */
+		bnx2x_map_q_cos(bp, BNX2X_ETH_Q,
+				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+		bnx2x_map_q_cos(bp, BNX2X_TOE_Q,
+				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+		bnx2x_map_q_cos(bp, BNX2X_TOE_ACK_Q,
+				traffic_cos[LLFC_TRAFFIC_TYPE_NW].cos);
+	}
+}
+
+
+/* Returns the index of start or end of a specific block stage in ops array*/
+#define BLOCK_OPS_IDX(block, stage, end) \
+			(2*(((block)*NUM_OF_INIT_PHASES) + (stage)) + (end))
+
+
+#define INITOP_SET		0	/* set the HW directly */
+#define INITOP_CLEAR		1	/* clear the HW directly */
+#define INITOP_INIT		2	/* set the init-value array */
+
+/****************************************************************************
+* ILT management
+****************************************************************************/
+struct ilt_line {
+	dma_addr_t page_mapping;
+	void *page;
+	u32 size;
+};
+
+struct ilt_client_info {
+	u32 page_size;
+	u16 start;
+	u16 end;
+	u16 client_num;
+	u16 flags;
+#define ILT_CLIENT_SKIP_INIT	0x1
+#define ILT_CLIENT_SKIP_MEM	0x2
+};
+
+struct bnx2x_ilt {
+	u32 start_line;
+	struct ilt_line		*lines;
+	struct ilt_client_info	clients[4];
+#define ILT_CLIENT_CDU	0
+#define ILT_CLIENT_QM	1
+#define ILT_CLIENT_SRC	2
+#define ILT_CLIENT_TM	3
+};
+
+/****************************************************************************
+* SRC configuration
+****************************************************************************/
+struct src_ent {
+	u8 opaque[56];
+	u64 next;
+};
+
+/****************************************************************************
+* Parity configuration
+****************************************************************************/
+#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2, m3) \
+{ \
+	block##_REG_##block##_PRTY_MASK, \
+	block##_REG_##block##_PRTY_STS_CLR, \
+	en_mask, {m1, m1h, m2, m3}, #block \
+}
+
+#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2, m3) \
+{ \
+	block##_REG_##block##_PRTY_MASK_0, \
+	block##_REG_##block##_PRTY_STS_CLR_0, \
+	en_mask, {m1, m1h, m2, m3}, #block"_0" \
+}
+
+#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2, m3) \
+{ \
+	block##_REG_##block##_PRTY_MASK_1, \
+	block##_REG_##block##_PRTY_STS_CLR_1, \
+	en_mask, {m1, m1h, m2, m3}, #block"_1" \
+}
+
+static const struct {
+	u32 mask_addr;
+	u32 sts_clr_addr;
+	u32 en_mask;		/* Mask to enable parity attentions */
+	struct {
+		u32 e1;		/* 57710 */
+		u32 e1h;	/* 57711 */
+		u32 e2;		/* 57712 */
+		u32 e3;		/* 578xx */
+	} reg_mask;		/* Register mask (all valid bits) */
+	char name[7];		/* Block's longest name is 6 characters long
+				 * (name + suffix)
+				 */
+} bnx2x_blocks_parity_data[] = {
+	/* bit 19 masked */
+	/* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
+	/* bit 5,18,20-31 */
+	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
+	/* bit 5 */
+	/* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20);	*/
+	/* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
+	/* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
+
+	/* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
+	 * want to handle "system kill" flow at the moment.
+	 */
+	BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff,
+			0x7ffffff),
+	BLOCK_PRTY_INFO_0(PXP2,	0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
+			  0xffffffff),
+	BLOCK_PRTY_INFO_1(PXP2,	0x1ffffff, 0x7f, 0x7f, 0x7ff, 0x1ffffff),
+	BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0, 0),
+	BLOCK_PRTY_INFO(NIG, 0xffffffff, 0x3fffffff, 0xffffffff, 0, 0),
+	BLOCK_PRTY_INFO_0(NIG,	0xffffffff, 0, 0, 0xffffffff, 0xffffffff),
+	BLOCK_PRTY_INFO_1(NIG,	0xffff, 0, 0, 0xff, 0xffff),
+	BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1, 0x1),
+	BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff, 0xfff),
+	BLOCK_PRTY_INFO(ATC, 0x1f, 0, 0, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO(PGLUE_B, 0x3, 0, 0, 0x3, 0x3),
+	BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3, 0x3),
+	{GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
+		GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0xf,
+		{0xf, 0xf, 0xf, 0xf}, "UPB"},
+	{GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
+		GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
+		{0xf, 0xf, 0xf, 0xf}, "XPB"},
+	BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7, 0x7),
+	BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf, 0x3f),
+	BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1, 0x1),
+	BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf, 0xf),
+	BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf, 0xf),
+	BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff, 0xff),
+	BLOCK_PRTY_INFO(PBF, 0, 0, 0x3ffff, 0xfffff, 0xfffffff),
+	BLOCK_PRTY_INFO(TM, 0, 0, 0x7f, 0x7f, 0x7f),
+	BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff, 0x7ff),
+	BLOCK_PRTY_INFO(TCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+	BLOCK_PRTY_INFO(CCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+	BLOCK_PRTY_INFO(UCM, 0, 0, 0x7ffffff, 0x7ffffff, 0x7ffffff),
+	BLOCK_PRTY_INFO(XCM, 0, 0, 0x3fffffff, 0x3fffffff, 0x3fffffff),
+	BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+			  0xffffffff),
+	BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
+	BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+			  0xffffffff),
+	BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+			  0xffffffff),
+	BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f, 0x1f),
+	BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff,
+			  0xffffffff),
+	BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f, 0x3f),
+};
+
+
+/* [28] MCP Latched rom_parity
+ * [29] MCP Latched ump_rx_parity
+ * [30] MCP Latched ump_tx_parity
+ * [31] MCP Latched scpad_parity
+ */
+#define MISC_AEU_ENABLE_MCP_PRTY_BITS	\
+	(AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
+	 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
+
+/* Below registers control the MCP parity attention output. When
+ * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
+ * enabled, when cleared - disabled.
+ */
+static const u32 mcp_attn_ctl_regs[] = {
+	MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
+	MISC_REG_AEU_ENABLE4_NIG_0,
+	MISC_REG_AEU_ENABLE4_PXP_0,
+	MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
+	MISC_REG_AEU_ENABLE4_NIG_1,
+	MISC_REG_AEU_ENABLE4_PXP_1
+};
+
+static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
+{
+	int i;
+	u32 reg_val;
+
+	for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
+		reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
+
+		if (enable)
+			reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
+		else
+			reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
+
+		REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
+	}
+}
+
+static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
+{
+	if (CHIP_IS_E1(bp))
+		return bnx2x_blocks_parity_data[idx].reg_mask.e1;
+	else if (CHIP_IS_E1H(bp))
+		return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
+	else if (CHIP_IS_E2(bp))
+		return bnx2x_blocks_parity_data[idx].reg_mask.e2;
+	else /* CHIP_IS_E3 */
+		return bnx2x_blocks_parity_data[idx].reg_mask.e3;
+}
+
+static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+		u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
+
+		if (dis_mask) {
+			REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+			       dis_mask);
+			DP(NETIF_MSG_HW, "Setting parity mask "
+						 "for %s to\t\t0x%x\n",
+				    bnx2x_blocks_parity_data[i].name, dis_mask);
+		}
+	}
+
+	/* Disable MCP parity attentions */
+	bnx2x_set_mcp_parity(bp, false);
+}
+
+/**
+ * Clear the parity error status registers.
+ */
+static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
+{
+	int i;
+	u32 reg_val, mcp_aeu_bits =
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
+		AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
+
+	/* Clear SEM_FAST parities */
+	REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+	REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+	REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+	REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
+
+	for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+		u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+		if (reg_mask) {
+			reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
+					 sts_clr_addr);
+			if (reg_val & reg_mask)
+				DP(NETIF_MSG_HW,
+					    "Parity errors in %s: 0x%x\n",
+					    bnx2x_blocks_parity_data[i].name,
+					    reg_val & reg_mask);
+		}
+	}
+
+	/* Check if there were parity attentions in MCP */
+	reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
+	if (reg_val & mcp_aeu_bits)
+		DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
+		   reg_val & mcp_aeu_bits);
+
+	/* Clear parity attentions in MCP:
+	 * [7]  clears Latched rom_parity
+	 * [8]  clears Latched ump_rx_parity
+	 * [9]  clears Latched ump_tx_parity
+	 * [10] clears Latched scpad_parity (both ports)
+	 */
+	REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
+}
+
+static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
+		u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
+
+		if (reg_mask)
+			REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
+				bnx2x_blocks_parity_data[i].en_mask & reg_mask);
+	}
+
+	/* Enable MCP parity attentions */
+	bnx2x_set_mcp_parity(bp, true);
+}
+
+
+#endif /* BNX2X_INIT_H */
+
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
new file mode 100644
index 0000000..7ec1724
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -0,0 +1,912 @@
+/* bnx2x_init_ops.h: Broadcom Everest network driver.
+ *               Static functions needed during the initialization.
+ *               This file is "included" in bnx2x_main.c.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ */
+
+#ifndef BNX2X_INIT_OPS_H
+#define BNX2X_INIT_OPS_H
+
+
+#ifndef BP_ILT
+#define BP_ILT(bp)	NULL
+#endif
+
+#ifndef BP_FUNC
+#define BP_FUNC(bp)	0
+#endif
+
+#ifndef BP_PORT
+#define BP_PORT(bp)	0
+#endif
+
+#ifndef BNX2X_ILT_FREE
+#define BNX2X_ILT_FREE(x, y, sz)
+#endif
+
+#ifndef BNX2X_ILT_ZALLOC
+#define BNX2X_ILT_ZALLOC(x, y, sz)
+#endif
+
+#ifndef ILOG2
+#define ILOG2(x)	x
+#endif
+
+static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len);
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val);
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp,
+				      dma_addr_t phys_addr, u32 addr,
+				      u32 len);
+
+static void bnx2x_init_str_wr(struct bnx2x *bp, u32 addr,
+			      const u32 *data, u32 len)
+{
+	u32 i;
+
+	for (i = 0; i < len; i++)
+		REG_WR(bp, addr + i*4, data[i]);
+}
+
+static void bnx2x_init_ind_wr(struct bnx2x *bp, u32 addr,
+			      const u32 *data, u32 len)
+{
+	u32 i;
+
+	for (i = 0; i < len; i++)
+		bnx2x_reg_wr_ind(bp, addr + i*4, data[i]);
+}
+
+static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
+				u8 wb)
+{
+	if (bp->dmae_ready)
+		bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+	else if (wb)
+		/*
+		 * Wide bus registers with no dmae need to be written
+		 * using indirect write.
+		 */
+		bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
+	else
+		bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
+}
+
+static void bnx2x_init_fill(struct bnx2x *bp, u32 addr, int fill,
+			    u32 len, u8 wb)
+{
+	u32 buf_len = (((len*4) > FW_BUF_SIZE) ? FW_BUF_SIZE : (len*4));
+	u32 buf_len32 = buf_len/4;
+	u32 i;
+
+	memset(GUNZIP_BUF(bp), (u8)fill, buf_len);
+
+	for (i = 0; i < len; i += buf_len32) {
+		u32 cur_len = min(buf_len32, len - i);
+
+		bnx2x_write_big_buf(bp, addr + i*4, cur_len, wb);
+	}
+}
+
+static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
+{
+	if (bp->dmae_ready)
+		bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
+	else
+		bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
+}
+
+static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
+			     const u32 *data, u32 len64)
+{
+	u32 buf_len32 = FW_BUF_SIZE/4;
+	u32 len = len64*2;
+	u64 data64 = 0;
+	u32 i;
+
+	/* 64 bit value is in a blob: first low DWORD, then high DWORD */
+	data64 = HILO_U64((*(data + 1)), (*data));
+
+	len64 = min((u32)(FW_BUF_SIZE/8), len64);
+	for (i = 0; i < len64; i++) {
+		u64 *pdata = ((u64 *)(GUNZIP_BUF(bp))) + i;
+
+		*pdata = data64;
+	}
+
+	for (i = 0; i < len; i += buf_len32) {
+		u32 cur_len = min(buf_len32, len - i);
+
+		bnx2x_write_big_buf_wb(bp, addr + i*4, cur_len);
+	}
+}
+
+/*********************************************************
+   There are different blobs for each PRAM section.
+   In addition, each blob write operation is divided into a few operations
+   in order to decrease the amount of phys. contiguous buffer needed.
+   Thus, when we select a blob the address may be with some offset
+   from the beginning of PRAM section.
+   The same holds for the INT_TABLE sections.
+**********************************************************/
+#define IF_IS_INT_TABLE_ADDR(base, addr) \
+			if (((base) <= (addr)) && ((base) + 0x400 >= (addr)))
+
+#define IF_IS_PRAM_ADDR(base, addr) \
+			if (((base) <= (addr)) && ((base) + 0x40000 >= (addr)))
+
+static const u8 *bnx2x_sel_blob(struct bnx2x *bp, u32 addr,
+				const u8 *data)
+{
+	IF_IS_INT_TABLE_ADDR(TSEM_REG_INT_TABLE, addr)
+		data = INIT_TSEM_INT_TABLE_DATA(bp);
+	else
+		IF_IS_INT_TABLE_ADDR(CSEM_REG_INT_TABLE, addr)
+			data = INIT_CSEM_INT_TABLE_DATA(bp);
+	else
+		IF_IS_INT_TABLE_ADDR(USEM_REG_INT_TABLE, addr)
+			data = INIT_USEM_INT_TABLE_DATA(bp);
+	else
+		IF_IS_INT_TABLE_ADDR(XSEM_REG_INT_TABLE, addr)
+			data = INIT_XSEM_INT_TABLE_DATA(bp);
+	else
+		IF_IS_PRAM_ADDR(TSEM_REG_PRAM, addr)
+			data = INIT_TSEM_PRAM_DATA(bp);
+	else
+		IF_IS_PRAM_ADDR(CSEM_REG_PRAM, addr)
+			data = INIT_CSEM_PRAM_DATA(bp);
+	else
+		IF_IS_PRAM_ADDR(USEM_REG_PRAM, addr)
+			data = INIT_USEM_PRAM_DATA(bp);
+	else
+		IF_IS_PRAM_ADDR(XSEM_REG_PRAM, addr)
+			data = INIT_XSEM_PRAM_DATA(bp);
+
+	return data;
+}
+
+static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr,
+			     const u32 *data, u32 len)
+{
+	if (bp->dmae_ready)
+		VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
+	else
+		bnx2x_init_ind_wr(bp, addr, data, len);
+}
+
+static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
+			u32 val_hi)
+{
+	u32 wb_write[2];
+
+	wb_write[0] = val_lo;
+	wb_write[1] = val_hi;
+	REG_WR_DMAE_LEN(bp, reg, wb_write, 2);
+}
+static void bnx2x_init_wr_zp(struct bnx2x *bp, u32 addr, u32 len,
+			     u32 blob_off)
+{
+	const u8 *data = NULL;
+	int rc;
+	u32 i;
+
+	data = bnx2x_sel_blob(bp, addr, data) + blob_off*4;
+
+	rc = bnx2x_gunzip(bp, data, len);
+	if (rc)
+		return;
+
+	/* gunzip_outlen is in dwords */
+	len = GUNZIP_OUTLEN(bp);
+	for (i = 0; i < len; i++)
+		((u32 *)GUNZIP_BUF(bp))[i] =
+				cpu_to_le32(((u32 *)GUNZIP_BUF(bp))[i]);
+
+	bnx2x_write_big_buf_wb(bp, addr, len);
+}
+
+static void bnx2x_init_block(struct bnx2x *bp, u32 block, u32 stage)
+{
+	u16 op_start =
+		INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
+						     STAGE_START)];
+	u16 op_end =
+		INIT_OPS_OFFSETS(bp)[BLOCK_OPS_IDX(block, stage,
+						     STAGE_END)];
+	union init_op *op;
+	u32 op_idx, op_type, addr, len;
+	const u32 *data, *data_base;
+
+	/* If empty block */
+	if (op_start == op_end)
+		return;
+
+	data_base = INIT_DATA(bp);
+
+	for (op_idx = op_start; op_idx < op_end; op_idx++) {
+
+		op = (union init_op *)&(INIT_OPS(bp)[op_idx]);
+		/* Get generic data */
+		op_type = op->raw.op;
+		addr = op->raw.offset;
+		/* Get data that's used for OP_SW, OP_WB, OP_FW, OP_ZP and
+		 * OP_WR64 (we assume that op_arr_write and op_write have the
+		 * same structure).
+		 */
+		len = op->arr_wr.data_len;
+		data = data_base + op->arr_wr.data_off;
+
+		switch (op_type) {
+		case OP_RD:
+			REG_RD(bp, addr);
+			break;
+		case OP_WR:
+			REG_WR(bp, addr, op->write.val);
+			break;
+		case OP_SW:
+			bnx2x_init_str_wr(bp, addr, data, len);
+			break;
+		case OP_WB:
+			bnx2x_init_wr_wb(bp, addr, data, len);
+			break;
+		case OP_ZR:
+			bnx2x_init_fill(bp, addr, 0, op->zero.len, 0);
+			break;
+		case OP_WB_ZR:
+			bnx2x_init_fill(bp, addr, 0, op->zero.len, 1);
+			break;
+		case OP_ZP:
+			bnx2x_init_wr_zp(bp, addr, len,
+					 op->arr_wr.data_off);
+			break;
+		case OP_WR_64:
+			bnx2x_init_wr_64(bp, addr, data, len);
+			break;
+		case OP_IF_MODE_AND:
+			/* if any of the flags doesn't match, skip the
+			 * conditional block.
+			 */
+			if ((INIT_MODE_FLAGS(bp) &
+				op->if_mode.mode_bit_map) !=
+				op->if_mode.mode_bit_map)
+				op_idx += op->if_mode.cmd_offset;
+			break;
+		case OP_IF_MODE_OR:
+			/* if all the flags don't match, skip the conditional
+			 * block.
+			 */
+			if ((INIT_MODE_FLAGS(bp) &
+				op->if_mode.mode_bit_map) == 0)
+				op_idx += op->if_mode.cmd_offset;
+			break;
+		default:
+			/* Should never get here! */
+
+			break;
+		}
+	}
+}
+
+
+/****************************************************************************
+* PXP Arbiter
+****************************************************************************/
+/*
+ * This code configures the PCI read/write arbiter
+ * which implements a weighted round robin
+ * between the virtual queues in the chip.
+ *
+ * The values were derived for each PCI max payload and max request size.
+ * since max payload and max request size are only known at run time,
+ * this is done as a separate init stage.
+ */
+
+#define NUM_WR_Q			13
+#define NUM_RD_Q			29
+#define MAX_RD_ORD			3
+#define MAX_WR_ORD			2
+
+/* configuration for one arbiter queue */
+struct arb_line {
+	int l;
+	int add;
+	int ubound;
+};
+
+/* derived configuration for each read queue for each max request size */
+static const struct arb_line read_arb_data[NUM_RD_Q][MAX_RD_ORD + 1] = {
+/* 1 */	{ {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
+	{ {4, 8,  4},  {4,  8,  4},  {4,  8,  4},  {4,  8,  4}  },
+	{ {4, 3,  3},  {4,  3,  3},  {4,  3,  3},  {4,  3,  3}  },
+	{ {8, 3,  6},  {16, 3,  11}, {16, 3,  11}, {16, 3,  11} },
+	{ {8, 64, 25}, {16, 64, 25}, {32, 64, 25}, {64, 64, 41} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {64, 3,  41} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {64, 3,  41} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {64, 3,  41} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {64, 3,  41} },
+/* 10 */{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 64, 6},  {16, 64, 11}, {32, 64, 21}, {32, 64, 21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+/* 20 */{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 3,  6},  {16, 3,  11}, {32, 3,  21}, {32, 3,  21} },
+	{ {8, 64, 25}, {16, 64, 41}, {32, 64, 81}, {64, 64, 120} }
+};
+
+/* derived configuration for each write queue for each max request size */
+static const struct arb_line write_arb_data[NUM_WR_Q][MAX_WR_ORD + 1] = {
+/* 1 */	{ {4, 6,  3},  {4,  6,  3},  {4,  6,  3} },
+	{ {4, 2,  3},  {4,  2,  3},  {4,  2,  3} },
+	{ {8, 2,  6},  {16, 2,  11}, {16, 2,  11} },
+	{ {8, 2,  6},  {16, 2,  11}, {32, 2,  21} },
+	{ {8, 2,  6},  {16, 2,  11}, {32, 2,  21} },
+	{ {8, 2,  6},  {16, 2,  11}, {32, 2,  21} },
+	{ {8, 64, 25}, {16, 64, 25}, {32, 64, 25} },
+	{ {8, 2,  6},  {16, 2,  11}, {16, 2,  11} },
+	{ {8, 2,  6},  {16, 2,  11}, {16, 2,  11} },
+/* 10 */{ {8, 9,  6},  {16, 9,  11}, {32, 9,  21} },
+	{ {8, 47, 19}, {16, 47, 19}, {32, 47, 21} },
+	{ {8, 9,  6},  {16, 9,  11}, {16, 9,  11} },
+	{ {8, 64, 25}, {16, 64, 41}, {32, 64, 81} }
+};
+
+/* register addresses for read queues */
+static const struct arb_line read_arb_addr[NUM_RD_Q-1] = {
+/* 1 */	{PXP2_REG_RQ_BW_RD_L0, PXP2_REG_RQ_BW_RD_ADD0,
+		PXP2_REG_RQ_BW_RD_UBOUND0},
+	{PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
+		PXP2_REG_PSWRQ_BW_UB1},
+	{PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
+		PXP2_REG_PSWRQ_BW_UB2},
+	{PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
+		PXP2_REG_PSWRQ_BW_UB3},
+	{PXP2_REG_RQ_BW_RD_L4, PXP2_REG_RQ_BW_RD_ADD4,
+		PXP2_REG_RQ_BW_RD_UBOUND4},
+	{PXP2_REG_RQ_BW_RD_L5, PXP2_REG_RQ_BW_RD_ADD5,
+		PXP2_REG_RQ_BW_RD_UBOUND5},
+	{PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
+		PXP2_REG_PSWRQ_BW_UB6},
+	{PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
+		PXP2_REG_PSWRQ_BW_UB7},
+	{PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
+		PXP2_REG_PSWRQ_BW_UB8},
+/* 10 */{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
+		PXP2_REG_PSWRQ_BW_UB9},
+	{PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
+		PXP2_REG_PSWRQ_BW_UB10},
+	{PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
+		PXP2_REG_PSWRQ_BW_UB11},
+	{PXP2_REG_RQ_BW_RD_L12, PXP2_REG_RQ_BW_RD_ADD12,
+		PXP2_REG_RQ_BW_RD_UBOUND12},
+	{PXP2_REG_RQ_BW_RD_L13, PXP2_REG_RQ_BW_RD_ADD13,
+		PXP2_REG_RQ_BW_RD_UBOUND13},
+	{PXP2_REG_RQ_BW_RD_L14, PXP2_REG_RQ_BW_RD_ADD14,
+		PXP2_REG_RQ_BW_RD_UBOUND14},
+	{PXP2_REG_RQ_BW_RD_L15, PXP2_REG_RQ_BW_RD_ADD15,
+		PXP2_REG_RQ_BW_RD_UBOUND15},
+	{PXP2_REG_RQ_BW_RD_L16, PXP2_REG_RQ_BW_RD_ADD16,
+		PXP2_REG_RQ_BW_RD_UBOUND16},
+	{PXP2_REG_RQ_BW_RD_L17, PXP2_REG_RQ_BW_RD_ADD17,
+		PXP2_REG_RQ_BW_RD_UBOUND17},
+	{PXP2_REG_RQ_BW_RD_L18, PXP2_REG_RQ_BW_RD_ADD18,
+		PXP2_REG_RQ_BW_RD_UBOUND18},
+/* 20 */{PXP2_REG_RQ_BW_RD_L19, PXP2_REG_RQ_BW_RD_ADD19,
+		PXP2_REG_RQ_BW_RD_UBOUND19},
+	{PXP2_REG_RQ_BW_RD_L20, PXP2_REG_RQ_BW_RD_ADD20,
+		PXP2_REG_RQ_BW_RD_UBOUND20},
+	{PXP2_REG_RQ_BW_RD_L22, PXP2_REG_RQ_BW_RD_ADD22,
+		PXP2_REG_RQ_BW_RD_UBOUND22},
+	{PXP2_REG_RQ_BW_RD_L23, PXP2_REG_RQ_BW_RD_ADD23,
+		PXP2_REG_RQ_BW_RD_UBOUND23},
+	{PXP2_REG_RQ_BW_RD_L24, PXP2_REG_RQ_BW_RD_ADD24,
+		PXP2_REG_RQ_BW_RD_UBOUND24},
+	{PXP2_REG_RQ_BW_RD_L25, PXP2_REG_RQ_BW_RD_ADD25,
+		PXP2_REG_RQ_BW_RD_UBOUND25},
+	{PXP2_REG_RQ_BW_RD_L26, PXP2_REG_RQ_BW_RD_ADD26,
+		PXP2_REG_RQ_BW_RD_UBOUND26},
+	{PXP2_REG_RQ_BW_RD_L27, PXP2_REG_RQ_BW_RD_ADD27,
+		PXP2_REG_RQ_BW_RD_UBOUND27},
+	{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
+		PXP2_REG_PSWRQ_BW_UB28}
+};
+
+/* register addresses for write queues */
+static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
+/* 1 */	{PXP2_REG_PSWRQ_BW_L1, PXP2_REG_PSWRQ_BW_ADD1,
+		PXP2_REG_PSWRQ_BW_UB1},
+	{PXP2_REG_PSWRQ_BW_L2, PXP2_REG_PSWRQ_BW_ADD2,
+		PXP2_REG_PSWRQ_BW_UB2},
+	{PXP2_REG_PSWRQ_BW_L3, PXP2_REG_PSWRQ_BW_ADD3,
+		PXP2_REG_PSWRQ_BW_UB3},
+	{PXP2_REG_PSWRQ_BW_L6, PXP2_REG_PSWRQ_BW_ADD6,
+		PXP2_REG_PSWRQ_BW_UB6},
+	{PXP2_REG_PSWRQ_BW_L7, PXP2_REG_PSWRQ_BW_ADD7,
+		PXP2_REG_PSWRQ_BW_UB7},
+	{PXP2_REG_PSWRQ_BW_L8, PXP2_REG_PSWRQ_BW_ADD8,
+		PXP2_REG_PSWRQ_BW_UB8},
+	{PXP2_REG_PSWRQ_BW_L9, PXP2_REG_PSWRQ_BW_ADD9,
+		PXP2_REG_PSWRQ_BW_UB9},
+	{PXP2_REG_PSWRQ_BW_L10, PXP2_REG_PSWRQ_BW_ADD10,
+		PXP2_REG_PSWRQ_BW_UB10},
+	{PXP2_REG_PSWRQ_BW_L11, PXP2_REG_PSWRQ_BW_ADD11,
+		PXP2_REG_PSWRQ_BW_UB11},
+/* 10 */{PXP2_REG_PSWRQ_BW_L28, PXP2_REG_PSWRQ_BW_ADD28,
+		PXP2_REG_PSWRQ_BW_UB28},
+	{PXP2_REG_RQ_BW_WR_L29, PXP2_REG_RQ_BW_WR_ADD29,
+		PXP2_REG_RQ_BW_WR_UBOUND29},
+	{PXP2_REG_RQ_BW_WR_L30, PXP2_REG_RQ_BW_WR_ADD30,
+		PXP2_REG_RQ_BW_WR_UBOUND30}
+};
+
+static void bnx2x_init_pxp_arb(struct bnx2x *bp, int r_order,
+			       int w_order)
+{
+	u32 val, i;
+
+	if (r_order > MAX_RD_ORD) {
+		DP(NETIF_MSG_HW, "read order of %d  order adjusted to %d\n",
+		   r_order, MAX_RD_ORD);
+		r_order = MAX_RD_ORD;
+	}
+	if (w_order > MAX_WR_ORD) {
+		DP(NETIF_MSG_HW, "write order of %d  order adjusted to %d\n",
+		   w_order, MAX_WR_ORD);
+		w_order = MAX_WR_ORD;
+	}
+	if (CHIP_REV_IS_FPGA(bp)) {
+		DP(NETIF_MSG_HW, "write order adjusted to 1 for FPGA\n");
+		w_order = 0;
+	}
+	DP(NETIF_MSG_HW, "read order %d  write order %d\n", r_order, w_order);
+
+	for (i = 0; i < NUM_RD_Q-1; i++) {
+		REG_WR(bp, read_arb_addr[i].l, read_arb_data[i][r_order].l);
+		REG_WR(bp, read_arb_addr[i].add,
+		       read_arb_data[i][r_order].add);
+		REG_WR(bp, read_arb_addr[i].ubound,
+		       read_arb_data[i][r_order].ubound);
+	}
+
+	for (i = 0; i < NUM_WR_Q-1; i++) {
+		if ((write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L29) ||
+		    (write_arb_addr[i].l == PXP2_REG_RQ_BW_WR_L30)) {
+
+			REG_WR(bp, write_arb_addr[i].l,
+			       write_arb_data[i][w_order].l);
+
+			REG_WR(bp, write_arb_addr[i].add,
+			       write_arb_data[i][w_order].add);
+
+			REG_WR(bp, write_arb_addr[i].ubound,
+			       write_arb_data[i][w_order].ubound);
+		} else {
+
+			val = REG_RD(bp, write_arb_addr[i].l);
+			REG_WR(bp, write_arb_addr[i].l,
+			       val | (write_arb_data[i][w_order].l << 10));
+
+			val = REG_RD(bp, write_arb_addr[i].add);
+			REG_WR(bp, write_arb_addr[i].add,
+			       val | (write_arb_data[i][w_order].add << 10));
+
+			val = REG_RD(bp, write_arb_addr[i].ubound);
+			REG_WR(bp, write_arb_addr[i].ubound,
+			       val | (write_arb_data[i][w_order].ubound << 7));
+		}
+	}
+
+	val =  write_arb_data[NUM_WR_Q-1][w_order].add;
+	val += write_arb_data[NUM_WR_Q-1][w_order].ubound << 10;
+	val += write_arb_data[NUM_WR_Q-1][w_order].l << 17;
+	REG_WR(bp, PXP2_REG_PSWRQ_BW_RD, val);
+
+	val =  read_arb_data[NUM_RD_Q-1][r_order].add;
+	val += read_arb_data[NUM_RD_Q-1][r_order].ubound << 10;
+	val += read_arb_data[NUM_RD_Q-1][r_order].l << 17;
+	REG_WR(bp, PXP2_REG_PSWRQ_BW_WR, val);
+
+	REG_WR(bp, PXP2_REG_RQ_WR_MBS0, w_order);
+	REG_WR(bp, PXP2_REG_RQ_WR_MBS1, w_order);
+	REG_WR(bp, PXP2_REG_RQ_RD_MBS0, r_order);
+	REG_WR(bp, PXP2_REG_RQ_RD_MBS1, r_order);
+
+	if ((CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) && (r_order == MAX_RD_ORD))
+		REG_WR(bp, PXP2_REG_RQ_PDR_LIMIT, 0xe00);
+
+	if (CHIP_IS_E3(bp))
+		REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x4 << w_order));
+	else if (CHIP_IS_E2(bp))
+		REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x8 << w_order));
+	else
+		REG_WR(bp, PXP2_REG_WR_USDMDP_TH, (0x18 << w_order));
+
+	if (!CHIP_IS_E1(bp)) {
+		/*    MPS      w_order     optimal TH      presently TH
+		 *    128         0             0               2
+		 *    256         1             1               3
+		 *    >=512       2             2               3
+		 */
+		/* DMAE is special */
+		if (!CHIP_IS_E1H(bp)) {
+			/* E2 can use optimal TH */
+			val = w_order;
+			REG_WR(bp, PXP2_REG_WR_DMAE_MPS, val);
+		} else {
+			val = ((w_order == 0) ? 2 : 3);
+			REG_WR(bp, PXP2_REG_WR_DMAE_MPS, 2);
+		}
+
+		REG_WR(bp, PXP2_REG_WR_HC_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_USDM_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_CSDM_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_TSDM_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_XSDM_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_QM_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_TM_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_SRC_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_DBG_MPS, val);
+		REG_WR(bp, PXP2_REG_WR_CDU_MPS, val);
+	}
+
+	/* Validate number of tags suppoted by device */
+#define PCIE_REG_PCIER_TL_HDR_FC_ST		0x2980
+	val = REG_RD(bp, PCIE_REG_PCIER_TL_HDR_FC_ST);
+	val &= 0xFF;
+	if (val <= 0x20)
+		REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x20);
+}
+
+/****************************************************************************
+* ILT management
+****************************************************************************/
+/*
+ * This codes hides the low level HW interaction for ILT management and
+ * configuration. The API consists of a shadow ILT table which is set by the
+ * driver and a set of routines to use it to configure the HW.
+ *
+ */
+
+/* ILT HW init operations */
+
+/* ILT memory management operations */
+#define ILT_MEMOP_ALLOC		0
+#define ILT_MEMOP_FREE		1
+
+/* the phys address is shifted right 12 bits and has an added
+ * 1=valid bit added to the 53rd bit
+ * then since this is a wide register(TM)
+ * we split it into two 32 bit writes
+ */
+#define ILT_ADDR1(x)		((u32)(((u64)x >> 12) & 0xFFFFFFFF))
+#define ILT_ADDR2(x)		((u32)((1 << 20) | ((u64)x >> 44)))
+#define ILT_RANGE(f, l)		(((l) << 10) | f)
+
+static int bnx2x_ilt_line_mem_op(struct bnx2x *bp,
+				 struct ilt_line *line, u32 size, u8 memop)
+{
+	if (memop == ILT_MEMOP_FREE) {
+		BNX2X_ILT_FREE(line->page, line->page_mapping, line->size);
+		return 0;
+	}
+	BNX2X_ILT_ZALLOC(line->page, &line->page_mapping, size);
+	if (!line->page)
+		return -1;
+	line->size = size;
+	return 0;
+}
+
+
+static int bnx2x_ilt_client_mem_op(struct bnx2x *bp, int cli_num,
+				   u8 memop)
+{
+	int i, rc;
+	struct bnx2x_ilt *ilt = BP_ILT(bp);
+	struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+	if (!ilt || !ilt->lines)
+		return -1;
+
+	if (ilt_cli->flags & (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM))
+		return 0;
+
+	for (rc = 0, i = ilt_cli->start; i <= ilt_cli->end && !rc; i++) {
+		rc = bnx2x_ilt_line_mem_op(bp, &ilt->lines[i],
+					   ilt_cli->page_size, memop);
+	}
+	return rc;
+}
+
+static int bnx2x_ilt_mem_op(struct bnx2x *bp, u8 memop)
+{
+	int rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_CDU, memop);
+	if (!rc)
+		rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_QM, memop);
+	if (!rc)
+		rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_SRC, memop);
+	if (!rc)
+		rc = bnx2x_ilt_client_mem_op(bp, ILT_CLIENT_TM, memop);
+
+	return rc;
+}
+
+static void bnx2x_ilt_line_wr(struct bnx2x *bp, int abs_idx,
+			      dma_addr_t page_mapping)
+{
+	u32 reg;
+
+	if (CHIP_IS_E1(bp))
+		reg = PXP2_REG_RQ_ONCHIP_AT + abs_idx*8;
+	else
+		reg = PXP2_REG_RQ_ONCHIP_AT_B0 + abs_idx*8;
+
+	bnx2x_wr_64(bp, reg, ILT_ADDR1(page_mapping), ILT_ADDR2(page_mapping));
+}
+
+static void bnx2x_ilt_line_init_op(struct bnx2x *bp,
+				   struct bnx2x_ilt *ilt, int idx, u8 initop)
+{
+	dma_addr_t	null_mapping;
+	int abs_idx = ilt->start_line + idx;
+
+
+	switch (initop) {
+	case INITOP_INIT:
+		/* set in the init-value array */
+	case INITOP_SET:
+		bnx2x_ilt_line_wr(bp, abs_idx, ilt->lines[idx].page_mapping);
+		break;
+	case INITOP_CLEAR:
+		null_mapping = 0;
+		bnx2x_ilt_line_wr(bp, abs_idx, null_mapping);
+		break;
+	}
+}
+
+static void bnx2x_ilt_boundry_init_op(struct bnx2x *bp,
+				      struct ilt_client_info *ilt_cli,
+				      u32 ilt_start, u8 initop)
+{
+	u32 start_reg = 0;
+	u32 end_reg = 0;
+
+	/* The boundary is either SET or INIT,
+	   CLEAR => SET and for now SET ~~ INIT */
+
+	/* find the appropriate regs */
+	if (CHIP_IS_E1(bp)) {
+		switch (ilt_cli->client_num) {
+		case ILT_CLIENT_CDU:
+			start_reg = PXP2_REG_PSWRQ_CDU0_L2P;
+			break;
+		case ILT_CLIENT_QM:
+			start_reg = PXP2_REG_PSWRQ_QM0_L2P;
+			break;
+		case ILT_CLIENT_SRC:
+			start_reg = PXP2_REG_PSWRQ_SRC0_L2P;
+			break;
+		case ILT_CLIENT_TM:
+			start_reg = PXP2_REG_PSWRQ_TM0_L2P;
+			break;
+		}
+		REG_WR(bp, start_reg + BP_FUNC(bp)*4,
+		       ILT_RANGE((ilt_start + ilt_cli->start),
+				 (ilt_start + ilt_cli->end)));
+	} else {
+		switch (ilt_cli->client_num) {
+		case ILT_CLIENT_CDU:
+			start_reg = PXP2_REG_RQ_CDU_FIRST_ILT;
+			end_reg = PXP2_REG_RQ_CDU_LAST_ILT;
+			break;
+		case ILT_CLIENT_QM:
+			start_reg = PXP2_REG_RQ_QM_FIRST_ILT;
+			end_reg = PXP2_REG_RQ_QM_LAST_ILT;
+			break;
+		case ILT_CLIENT_SRC:
+			start_reg = PXP2_REG_RQ_SRC_FIRST_ILT;
+			end_reg = PXP2_REG_RQ_SRC_LAST_ILT;
+			break;
+		case ILT_CLIENT_TM:
+			start_reg = PXP2_REG_RQ_TM_FIRST_ILT;
+			end_reg = PXP2_REG_RQ_TM_LAST_ILT;
+			break;
+		}
+		REG_WR(bp, start_reg, (ilt_start + ilt_cli->start));
+		REG_WR(bp, end_reg, (ilt_start + ilt_cli->end));
+	}
+}
+
+static void bnx2x_ilt_client_init_op_ilt(struct bnx2x *bp,
+					 struct bnx2x_ilt *ilt,
+					 struct ilt_client_info *ilt_cli,
+					 u8 initop)
+{
+	int i;
+
+	if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
+		return;
+
+	for (i = ilt_cli->start; i <= ilt_cli->end; i++)
+		bnx2x_ilt_line_init_op(bp, ilt, i, initop);
+
+	/* init/clear the ILT boundries */
+	bnx2x_ilt_boundry_init_op(bp, ilt_cli, ilt->start_line, initop);
+}
+
+static void bnx2x_ilt_client_init_op(struct bnx2x *bp,
+				     struct ilt_client_info *ilt_cli, u8 initop)
+{
+	struct bnx2x_ilt *ilt = BP_ILT(bp);
+
+	bnx2x_ilt_client_init_op_ilt(bp, ilt, ilt_cli, initop);
+}
+
+static void bnx2x_ilt_client_id_init_op(struct bnx2x *bp,
+					int cli_num, u8 initop)
+{
+	struct bnx2x_ilt *ilt = BP_ILT(bp);
+	struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+	bnx2x_ilt_client_init_op(bp, ilt_cli, initop);
+}
+
+static void bnx2x_ilt_init_op(struct bnx2x *bp, u8 initop)
+{
+	bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_CDU, initop);
+	bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_QM, initop);
+	bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_SRC, initop);
+	bnx2x_ilt_client_id_init_op(bp, ILT_CLIENT_TM, initop);
+}
+
+static void bnx2x_ilt_init_client_psz(struct bnx2x *bp, int cli_num,
+				      u32 psz_reg, u8 initop)
+{
+	struct bnx2x_ilt *ilt = BP_ILT(bp);
+	struct ilt_client_info *ilt_cli = &ilt->clients[cli_num];
+
+	if (ilt_cli->flags & ILT_CLIENT_SKIP_INIT)
+		return;
+
+	switch (initop) {
+	case INITOP_INIT:
+		/* set in the init-value array */
+	case INITOP_SET:
+		REG_WR(bp, psz_reg, ILOG2(ilt_cli->page_size >> 12));
+		break;
+	case INITOP_CLEAR:
+		break;
+	}
+}
+
+/*
+ * called during init common stage, ilt clients should be initialized
+ * prioir to calling this function
+ */
+static void bnx2x_ilt_init_page_size(struct bnx2x *bp, u8 initop)
+{
+	bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_CDU,
+				  PXP2_REG_RQ_CDU_P_SIZE, initop);
+	bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_QM,
+				  PXP2_REG_RQ_QM_P_SIZE, initop);
+	bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_SRC,
+				  PXP2_REG_RQ_SRC_P_SIZE, initop);
+	bnx2x_ilt_init_client_psz(bp, ILT_CLIENT_TM,
+				  PXP2_REG_RQ_TM_P_SIZE, initop);
+}
+
+/****************************************************************************
+* QM initializations
+****************************************************************************/
+#define QM_QUEUES_PER_FUNC	16 /* E1 has 32, but only 16 are used */
+#define QM_INIT_MIN_CID_COUNT	31
+#define QM_INIT(cid_cnt)	(cid_cnt > QM_INIT_MIN_CID_COUNT)
+
+/* called during init port stage */
+static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
+				    u8 initop)
+{
+	int port = BP_PORT(bp);
+
+	if (QM_INIT(qm_cid_count)) {
+		switch (initop) {
+		case INITOP_INIT:
+			/* set in the init-value array */
+		case INITOP_SET:
+			REG_WR(bp, QM_REG_CONNNUM_0 + port*4,
+			       qm_cid_count/16 - 1);
+			break;
+		case INITOP_CLEAR:
+			break;
+		}
+	}
+}
+
+static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
+{
+	int i;
+	u32 wb_data[2];
+
+	wb_data[0] = wb_data[1] = 0;
+
+	for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
+		REG_WR(bp, QM_REG_BASEADDR + i*4,
+		       qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
+		bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8,
+				  wb_data, 2);
+
+		if (CHIP_IS_E1H(bp)) {
+			REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4,
+			       qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
+			bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
+					  wb_data, 2);
+		}
+	}
+}
+
+/* called during init common stage */
+static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
+				    u8 initop)
+{
+	if (!QM_INIT(qm_cid_count))
+		return;
+
+	switch (initop) {
+	case INITOP_INIT:
+		/* set in the init-value array */
+	case INITOP_SET:
+		bnx2x_qm_set_ptr_table(bp, qm_cid_count);
+		break;
+	case INITOP_CLEAR:
+		break;
+	}
+}
+
+/****************************************************************************
+* SRC initializations
+****************************************************************************/
+#ifdef BCM_CNIC
+/* called during init func stage */
+static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
+			      dma_addr_t t2_mapping, int src_cid_count)
+{
+	int i;
+	int port = BP_PORT(bp);
+
+	/* Initialize T2 */
+	for (i = 0; i < src_cid_count-1; i++)
+		t2[i].next = (u64)(t2_mapping +
+			     (i+1)*sizeof(struct src_ent));
+
+	/* tell the searcher where the T2 table is */
+	REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, src_cid_count);
+
+	bnx2x_wr_64(bp, SRC_REG_FIRSTFREE0 + port*16,
+		    U64_LO(t2_mapping), U64_HI(t2_mapping));
+
+	bnx2x_wr_64(bp, SRC_REG_LASTFREE0 + port*16,
+		    U64_LO((u64)t2_mapping +
+			   (src_cid_count-1) * sizeof(struct src_ent)),
+		    U64_HI((u64)t2_mapping +
+			   (src_cid_count-1) * sizeof(struct src_ent)));
+}
+#endif
+#endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
new file mode 100644
index 0000000..d45b155
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -0,0 +1,12472 @@
+/* Copyright 2008-2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Written by Yaniv Rosner
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mutex.h>
+
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+
+
+/********************************************************/
+#define ETH_HLEN			14
+/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
+#define ETH_OVREHEAD			(ETH_HLEN + 8 + 8)
+#define ETH_MIN_PACKET_SIZE		60
+#define ETH_MAX_PACKET_SIZE		1500
+#define ETH_MAX_JUMBO_PACKET_SIZE	9600
+#define MDIO_ACCESS_TIMEOUT		1000
+#define BMAC_CONTROL_RX_ENABLE		2
+#define WC_LANE_MAX			4
+#define I2C_SWITCH_WIDTH		2
+#define I2C_BSC0			0
+#define I2C_BSC1			1
+#define I2C_WA_RETRY_CNT		3
+#define MCPR_IMC_COMMAND_READ_OP	1
+#define MCPR_IMC_COMMAND_WRITE_OP	2
+
+/***********************************************************/
+/*			Shortcut definitions		   */
+/***********************************************************/
+
+#define NIG_LATCH_BC_ENABLE_MI_INT 0
+
+#define NIG_STATUS_EMAC0_MI_INT \
+		NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT
+#define NIG_STATUS_XGXS0_LINK10G \
+		NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G
+#define NIG_STATUS_XGXS0_LINK_STATUS \
+		NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS
+#define NIG_STATUS_XGXS0_LINK_STATUS_SIZE \
+		NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE
+#define NIG_STATUS_SERDES0_LINK_STATUS \
+		NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS
+#define NIG_MASK_MI_INT \
+		NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT
+#define NIG_MASK_XGXS0_LINK10G \
+		NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G
+#define NIG_MASK_XGXS0_LINK_STATUS \
+		NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS
+#define NIG_MASK_SERDES0_LINK_STATUS \
+		NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS
+
+#define MDIO_AN_CL73_OR_37_COMPLETE \
+		(MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE | \
+		 MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE)
+
+#define XGXS_RESET_BITS \
+	(MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW |   \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ |      \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN |    \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD | \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB)
+
+#define SERDES_RESET_BITS \
+	(MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW | \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ |    \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN |  \
+	 MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD)
+
+#define AUTONEG_CL37		SHARED_HW_CFG_AN_ENABLE_CL37
+#define AUTONEG_CL73		SHARED_HW_CFG_AN_ENABLE_CL73
+#define AUTONEG_BAM		SHARED_HW_CFG_AN_ENABLE_BAM
+#define AUTONEG_PARALLEL \
+				SHARED_HW_CFG_AN_ENABLE_PARALLEL_DETECTION
+#define AUTONEG_SGMII_FIBER_AUTODET \
+				SHARED_HW_CFG_AN_EN_SGMII_FIBER_AUTO_DETECT
+#define AUTONEG_REMOTE_PHY	SHARED_HW_CFG_AN_ENABLE_REMOTE_PHY
+
+#define GP_STATUS_PAUSE_RSOLUTION_TXSIDE \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE
+#define GP_STATUS_PAUSE_RSOLUTION_RXSIDE \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE
+#define GP_STATUS_SPEED_MASK \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK
+#define GP_STATUS_10M	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M
+#define GP_STATUS_100M	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M
+#define GP_STATUS_1G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G
+#define GP_STATUS_2_5G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G
+#define GP_STATUS_5G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G
+#define GP_STATUS_6G	MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G
+#define GP_STATUS_10G_HIG \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG
+#define GP_STATUS_10G_CX4 \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4
+#define GP_STATUS_1G_KX MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX
+#define GP_STATUS_10G_KX4 \
+			MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4
+#define	GP_STATUS_10G_KR MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR
+#define	GP_STATUS_10G_XFI   MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI
+#define	GP_STATUS_20G_DXGXS MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS
+#define	GP_STATUS_10G_SFI   MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI
+#define LINK_10THD		LINK_STATUS_SPEED_AND_DUPLEX_10THD
+#define LINK_10TFD		LINK_STATUS_SPEED_AND_DUPLEX_10TFD
+#define LINK_100TXHD		LINK_STATUS_SPEED_AND_DUPLEX_100TXHD
+#define LINK_100T4		LINK_STATUS_SPEED_AND_DUPLEX_100T4
+#define LINK_100TXFD		LINK_STATUS_SPEED_AND_DUPLEX_100TXFD
+#define LINK_1000THD		LINK_STATUS_SPEED_AND_DUPLEX_1000THD
+#define LINK_1000TFD		LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
+#define LINK_1000XFD		LINK_STATUS_SPEED_AND_DUPLEX_1000XFD
+#define LINK_2500THD		LINK_STATUS_SPEED_AND_DUPLEX_2500THD
+#define LINK_2500TFD		LINK_STATUS_SPEED_AND_DUPLEX_2500TFD
+#define LINK_2500XFD		LINK_STATUS_SPEED_AND_DUPLEX_2500XFD
+#define LINK_10GTFD		LINK_STATUS_SPEED_AND_DUPLEX_10GTFD
+#define LINK_10GXFD		LINK_STATUS_SPEED_AND_DUPLEX_10GXFD
+#define LINK_20GTFD		LINK_STATUS_SPEED_AND_DUPLEX_20GTFD
+#define LINK_20GXFD		LINK_STATUS_SPEED_AND_DUPLEX_20GXFD
+
+
+
+/* */
+#define SFP_EEPROM_CON_TYPE_ADDR		0x2
+	#define SFP_EEPROM_CON_TYPE_VAL_LC	0x7
+	#define SFP_EEPROM_CON_TYPE_VAL_COPPER	0x21
+
+
+#define SFP_EEPROM_COMP_CODE_ADDR		0x3
+	#define SFP_EEPROM_COMP_CODE_SR_MASK	(1<<4)
+	#define SFP_EEPROM_COMP_CODE_LR_MASK	(1<<5)
+	#define SFP_EEPROM_COMP_CODE_LRM_MASK	(1<<6)
+
+#define SFP_EEPROM_FC_TX_TECH_ADDR		0x8
+	#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4
+	#define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE  0x8
+
+#define SFP_EEPROM_OPTIONS_ADDR			0x40
+	#define SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK 0x1
+#define SFP_EEPROM_OPTIONS_SIZE			2
+
+#define EDC_MODE_LINEAR				0x0022
+#define EDC_MODE_LIMITING				0x0044
+#define EDC_MODE_PASSIVE_DAC			0x0055
+
+
+/* BRB thresholds for E2*/
+#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE		170
+#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE		0
+
+#define PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE		250
+#define PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE		0
+
+#define PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE		10
+#define PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE		90
+
+#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE			50
+#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE		250
+
+/* BRB thresholds for E3A0 */
+#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE		290
+#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE		0
+
+#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE		410
+#define PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE		0
+
+#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE		10
+#define PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE		170
+
+#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE		50
+#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE		410
+
+
+/* BRB thresholds for E3B0 2 port mode*/
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE		1025
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE	0
+
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE		1025
+#define PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE	0
+
+#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE		10
+#define PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE	1025
+
+#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE		50
+#define PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE	1025
+
+/* only for E3B0*/
+#define PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR			1025
+#define PFC_E3B0_2P_BRB_FULL_LB_XON_THR			1025
+
+/* Lossy +Lossless GUARANTIED == GUART */
+#define PFC_E3B0_2P_MIX_PAUSE_LB_GUART			284
+/* Lossless +Lossless*/
+#define PFC_E3B0_2P_PAUSE_LB_GUART			236
+/* Lossy +Lossy*/
+#define PFC_E3B0_2P_NON_PAUSE_LB_GUART			342
+
+/* Lossy +Lossless*/
+#define PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART		284
+/* Lossless +Lossless*/
+#define PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART		236
+/* Lossy +Lossy*/
+#define PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART		336
+#define PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST		80
+
+#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART		0
+#define PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST		0
+
+/* BRB thresholds for E3B0 4 port mode */
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE		304
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE	0
+
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE		384
+#define PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE	0
+
+#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE		10
+#define PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE	304
+
+#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE		50
+#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE	384
+
+
+/* only for E3B0*/
+#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR			304
+#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR			384
+#define PFC_E3B0_4P_LB_GUART				120
+
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART		120
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST		80
+
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART		80
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST		120
+
+#define DCBX_INVALID_COS					(0xFF)
+
+#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND		(0x5000)
+#define ETS_BW_LIMIT_CREDIT_WEIGHT		(0x5000)
+#define ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS		(1360)
+#define ETS_E3B0_NIG_MIN_W_VAL_20GBPS			(2720)
+#define ETS_E3B0_PBF_MIN_W_VAL				(10000)
+
+#define MAX_PACKET_SIZE					(9700)
+#define WC_UC_TIMEOUT					100
+
+/**********************************************************/
+/*                     INTERFACE                          */
+/**********************************************************/
+
+#define CL22_WR_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
+	bnx2x_cl45_write(_bp, _phy, \
+		(_phy)->def_md_devad, \
+		(_bank + (_addr & 0xf)), \
+		_val)
+
+#define CL22_RD_OVER_CL45(_bp, _phy, _bank, _addr, _val) \
+	bnx2x_cl45_read(_bp, _phy, \
+		(_phy)->def_md_devad, \
+		(_bank + (_addr & 0xf)), \
+		_val)
+
+static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
+{
+	u32 val = REG_RD(bp, reg);
+
+	val |= bits;
+	REG_WR(bp, reg, val);
+	return val;
+}
+
+static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
+{
+	u32 val = REG_RD(bp, reg);
+
+	val &= ~bits;
+	REG_WR(bp, reg, val);
+	return val;
+}
+
+/******************************************************************/
+/*			EPIO/GPIO section			  */
+/******************************************************************/
+static void bnx2x_get_epio(struct bnx2x *bp, u32 epio_pin, u32 *en)
+{
+	u32 epio_mask, gp_oenable;
+	*en = 0;
+	/* Sanity check */
+	if (epio_pin > 31) {
+		DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to get\n", epio_pin);
+		return;
+	}
+
+	epio_mask = 1 << epio_pin;
+	/* Set this EPIO to output */
+	gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
+	REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable & ~epio_mask);
+
+	*en = (REG_RD(bp, MCP_REG_MCPR_GP_INPUTS) & epio_mask) >> epio_pin;
+}
+static void bnx2x_set_epio(struct bnx2x *bp, u32 epio_pin, u32 en)
+{
+	u32 epio_mask, gp_output, gp_oenable;
+
+	/* Sanity check */
+	if (epio_pin > 31) {
+		DP(NETIF_MSG_LINK, "Invalid EPIO pin %d to set\n", epio_pin);
+		return;
+	}
+	DP(NETIF_MSG_LINK, "Setting EPIO pin %d to %d\n", epio_pin, en);
+	epio_mask = 1 << epio_pin;
+	/* Set this EPIO to output */
+	gp_output = REG_RD(bp, MCP_REG_MCPR_GP_OUTPUTS);
+	if (en)
+		gp_output |= epio_mask;
+	else
+		gp_output &= ~epio_mask;
+
+	REG_WR(bp, MCP_REG_MCPR_GP_OUTPUTS, gp_output);
+
+	/* Set the value for this EPIO */
+	gp_oenable = REG_RD(bp, MCP_REG_MCPR_GP_OENABLE);
+	REG_WR(bp, MCP_REG_MCPR_GP_OENABLE, gp_oenable | epio_mask);
+}
+
+static void bnx2x_set_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 val)
+{
+	if (pin_cfg == PIN_CFG_NA)
+		return;
+	if (pin_cfg >= PIN_CFG_EPIO0) {
+		bnx2x_set_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
+	} else {
+		u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+		u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+		bnx2x_set_gpio(bp, gpio_num, (u8)val, gpio_port);
+	}
+}
+
+static u32 bnx2x_get_cfg_pin(struct bnx2x *bp, u32 pin_cfg, u32 *val)
+{
+	if (pin_cfg == PIN_CFG_NA)
+		return -EINVAL;
+	if (pin_cfg >= PIN_CFG_EPIO0) {
+		bnx2x_get_epio(bp, pin_cfg - PIN_CFG_EPIO0, val);
+	} else {
+		u8 gpio_num = (pin_cfg - PIN_CFG_GPIO0_P0) & 0x3;
+		u8 gpio_port = (pin_cfg - PIN_CFG_GPIO0_P0) >> 2;
+		*val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+	}
+	return 0;
+
+}
+/******************************************************************/
+/*				ETS section			  */
+/******************************************************************/
+static void bnx2x_ets_e2e3a0_disabled(struct link_params *params)
+{
+	/* ETS disabled configuration*/
+	struct bnx2x *bp = params->bp;
+
+	DP(NETIF_MSG_LINK, "ETS E2E3 disabled configuration\n");
+
+	/*
+	 * mapping between entry  priority to client number (0,1,2 -debug and
+	 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+	 * 3bits client num.
+	 *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
+	 * cos1-100     cos0-011     dbg1-010     dbg0-001     MCP-000
+	 */
+
+	REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
+	/*
+	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	 * as strict.  Bits 0,1,2 - debug and management entries, 3 -
+	 * COS0 entry, 4 - COS1 entry.
+	 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+	 * bit4   bit3	  bit2   bit1	  bit0
+	 * MCP and debug are strict
+	 */
+
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+	/* defines which entries (clients) are subjected to WFQ arbitration */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+	/*
+	 * For strict priority entries defines the number of consecutive
+	 * slots for the highest priority.
+	 */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+	/*
+	 * mapping between the CREDIT_WEIGHT registers and actual client
+	 * numbers
+	 */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
+
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
+	REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
+	/* ETS mode disable */
+	REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
+	/*
+	 * If ETS mode is enabled (there is no strict priority) defines a WFQ
+	 * weight for COS0/COS1.
+	 */
+	REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
+	REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710);
+	/* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
+	REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680);
+	REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680);
+	/* Defines the number of consecutive slots for the strict priority */
+	REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+}
+/******************************************************************************
+* Description:
+*	Getting min_w_val will be set according to line speed .
+*.
+******************************************************************************/
+static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
+{
+	u32 min_w_val = 0;
+	/* Calculate min_w_val.*/
+	if (vars->link_up) {
+		if (SPEED_20000 == vars->line_speed)
+			min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+		else
+			min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
+	} else
+		min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
+	/**
+	 *  If the link isn't up (static configuration for example ) The
+	 *  link will be according to 20GBPS.
+	*/
+	return min_w_val;
+}
+/******************************************************************************
+* Description:
+*	Getting credit upper bound form min_w_val.
+*.
+******************************************************************************/
+static u32 bnx2x_ets_get_credit_upper_bound(const u32 min_w_val)
+{
+	const u32 credit_upper_bound = (u32)MAXVAL((150 * min_w_val),
+						MAX_PACKET_SIZE);
+	return credit_upper_bound;
+}
+/******************************************************************************
+* Description:
+*	Set credit upper bound for NIG.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
+	const struct link_params *params,
+	const u32 min_w_val)
+{
+	struct bnx2x *bp = params->bp;
+	const u8 port = params->port;
+	const u32 credit_upper_bound =
+	    bnx2x_ets_get_credit_upper_bound(min_w_val);
+
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0 :
+		NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, credit_upper_bound);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1 :
+		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, credit_upper_bound);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2 :
+		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2, credit_upper_bound);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3 :
+		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3, credit_upper_bound);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4 :
+		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4, credit_upper_bound);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
+		   NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
+
+	if (0 == port) {
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
+			credit_upper_bound);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
+			credit_upper_bound);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8,
+			credit_upper_bound);
+	}
+}
+/******************************************************************************
+* Description:
+*	Will return the NIG ETS registers to init values.Except
+*	credit_upper_bound.
+*	That isn't used in this configuration (No WFQ is enabled) and will be
+*	configured acording to spec
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
+					const struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	const u8 port = params->port;
+	const u32 min_w_val = bnx2x_ets_get_min_w_val_nig(vars);
+	/**
+	 * mapping between entry  priority to client number (0,1,2 -debug and
+	 * management clients, 3 - COS0 client, 4 - COS1, ... 8 -
+	 * COS5)(HIGHEST) 4bits client num.TODO_ETS - Should be done by
+	 * reset value or init tool
+	 */
+	if (port) {
+		REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB, 0x543210);
+		REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB, 0x0);
+	} else {
+		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB, 0x76543210);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB, 0x8);
+	}
+	/**
+	* For strict priority entries defines the number of consecutive
+	* slots for the highest priority.
+	*/
+	/* TODO_ETS - Should be done by reset value or init tool */
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
+		   NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+	/**
+	 * mapping between the CREDIT_WEIGHT registers and actual client
+	 * numbers
+	 */
+	/* TODO_ETS - Should be done by reset value or init tool */
+	if (port) {
+		/*Port 1 has 6 COS*/
+		REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB, 0x210543);
+		REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x0);
+	} else {
+		/*Port 0 has 9 COS*/
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB,
+		       0x43210876);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB, 0x5);
+	}
+
+	/**
+	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	 * as strict.  Bits 0,1,2 - debug and management entries, 3 -
+	 * COS0 entry, 4 - COS1 entry.
+	 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
+	 * bit4   bit3	  bit2   bit1	  bit0
+	 * MCP and debug are strict
+	 */
+	if (port)
+		REG_WR(bp, NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT, 0x3f);
+	else
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1ff);
+	/* defines which entries (clients) are subjected to WFQ arbitration */
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+		   NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
+
+	/**
+	* Please notice the register address are note continuous and a
+	* for here is note appropriate.In 2 port mode port0 only COS0-5
+	* can be used. DEBUG1,DEBUG1,MGMT are never used for WFQ* In 4
+	* port mode port1 only COS0-2 can be used. DEBUG1,DEBUG1,MGMT
+	* are never used for WFQ
+	*/
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0x0);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0x0);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2, 0x0);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3, 0x0);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
+		   NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
+	if (0 == port) {
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
+	}
+
+	bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val);
+}
+/******************************************************************************
+* Description:
+*	Set credit upper bound for PBF.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
+	const struct link_params *params,
+	const u32 min_w_val)
+{
+	struct bnx2x *bp = params->bp;
+	const u32 credit_upper_bound =
+	    bnx2x_ets_get_credit_upper_bound(min_w_val);
+	const u8 port = params->port;
+	u32 base_upper_bound = 0;
+	u8 max_cos = 0;
+	u8 i = 0;
+	/**
+	* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
+	* port mode port1 has COS0-2 that can be used for WFQ.
+	*/
+	if (0 == port) {
+		base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
+		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
+	} else {
+		base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P1;
+		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
+	}
+
+	for (i = 0; i < max_cos; i++)
+		REG_WR(bp, base_upper_bound + (i << 2), credit_upper_bound);
+}
+
+/******************************************************************************
+* Description:
+*	Will return the PBF ETS registers to init values.Except
+*	credit_upper_bound.
+*	That isn't used in this configuration (No WFQ is enabled) and will be
+*	configured acording to spec
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	const u8 port = params->port;
+	const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
+	u8 i = 0;
+	u32 base_weight = 0;
+	u8 max_cos = 0;
+
+	/**
+	 * mapping between entry  priority to client number 0 - COS0
+	 * client, 2 - COS1, ... 5 - COS5)(HIGHEST) 4bits client num.
+	 * TODO_ETS - Should be done by reset value or init tool
+	 */
+	if (port)
+		/*  0x688 (|011|0 10|00 1|000) */
+		REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , 0x688);
+	else
+		/*  (10 1|100 |011|0 10|00 1|000) */
+		REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , 0x2C688);
+
+	/* TODO_ETS - Should be done by reset value or init tool */
+	if (port)
+		/* 0x688 (|011|0 10|00 1|000)*/
+		REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1, 0x688);
+	else
+	/* 0x2C688 (10 1|100 |011|0 10|00 1|000) */
+	REG_WR(bp, PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0, 0x2C688);
+
+	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1 :
+		   PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0 , 0x100);
+
+
+	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+		   PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , 0);
+
+	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+		   PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0 , 0);
+	/**
+	* In 2 port mode port0 has COS0-5 that can be used for WFQ.
+	* In 4 port mode port1 has COS0-2 that can be used for WFQ.
+	*/
+	if (0 == port) {
+		base_weight = PBF_REG_COS0_WEIGHT_P0;
+		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
+	} else {
+		base_weight = PBF_REG_COS0_WEIGHT_P1;
+		max_cos = DCBX_E3B0_MAX_NUM_COS_PORT1;
+	}
+
+	for (i = 0; i < max_cos; i++)
+		REG_WR(bp, base_weight + (0x4 * i), 0);
+
+	bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+}
+/******************************************************************************
+* Description:
+*	E3B0 disable will return basicly the values to init values.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_disabled(const struct link_params *params,
+				   const struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+
+	if (!CHIP_IS_E3B0(bp)) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0"
+				   "\n");
+		return -EINVAL;
+	}
+
+	bnx2x_ets_e3b0_nig_disabled(params, vars);
+
+	bnx2x_ets_e3b0_pbf_disabled(params);
+
+	return 0;
+}
+
+/******************************************************************************
+* Description:
+*	Disable will return basicly the values to init values.
+*.
+******************************************************************************/
+int bnx2x_ets_disabled(struct link_params *params,
+		      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	int bnx2x_status = 0;
+
+	if ((CHIP_IS_E2(bp)) || (CHIP_IS_E3A0(bp)))
+		bnx2x_ets_e2e3a0_disabled(params);
+	else if (CHIP_IS_E3B0(bp))
+		bnx2x_status = bnx2x_ets_e3b0_disabled(params, vars);
+	else {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_disabled - chip not supported\n");
+		return -EINVAL;
+	}
+
+	return bnx2x_status;
+}
+
+/******************************************************************************
+* Description
+*	Set the COS mappimg to SP and BW until this point all the COS are not
+*	set as SP or BW.
+******************************************************************************/
+static int bnx2x_ets_e3b0_cli_map(const struct link_params *params,
+				  const struct bnx2x_ets_params *ets_params,
+				  const u8 cos_sp_bitmap,
+				  const u8 cos_bw_bitmap)
+{
+	struct bnx2x *bp = params->bp;
+	const u8 port = params->port;
+	const u8 nig_cli_sp_bitmap = 0x7 | (cos_sp_bitmap << 3);
+	const u8 pbf_cli_sp_bitmap = cos_sp_bitmap;
+	const u8 nig_cli_subject2wfq_bitmap = cos_bw_bitmap << 3;
+	const u8 pbf_cli_subject2wfq_bitmap = cos_bw_bitmap;
+
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT :
+	       NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, nig_cli_sp_bitmap);
+
+	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1 :
+	       PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0 , pbf_cli_sp_bitmap);
+
+	REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ :
+	       NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
+	       nig_cli_subject2wfq_bitmap);
+
+	REG_WR(bp, (port) ? PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1 :
+	       PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0,
+	       pbf_cli_subject2wfq_bitmap);
+
+	return 0;
+}
+
+/******************************************************************************
+* Description:
+*	This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+*	not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
+				     const u8 cos_entry,
+				     const u32 min_w_val_nig,
+				     const u32 min_w_val_pbf,
+				     const u16 total_bw,
+				     const u8 bw,
+				     const u8 port)
+{
+	u32 nig_reg_adress_crd_weight = 0;
+	u32 pbf_reg_adress_crd_weight = 0;
+	/* Calculate and set BW for this COS*/
+	const u32 cos_bw_nig = (bw * min_w_val_nig) / total_bw;
+	const u32 cos_bw_pbf = (bw * min_w_val_pbf) / total_bw;
+
+	switch (cos_entry) {
+	case 0:
+	    nig_reg_adress_crd_weight =
+		 (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0 :
+		     NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0;
+	     pbf_reg_adress_crd_weight = (port) ?
+		 PBF_REG_COS0_WEIGHT_P1 : PBF_REG_COS0_WEIGHT_P0;
+	     break;
+	case 1:
+	     nig_reg_adress_crd_weight = (port) ?
+		 NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1 :
+		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1;
+	     pbf_reg_adress_crd_weight = (port) ?
+		 PBF_REG_COS1_WEIGHT_P1 : PBF_REG_COS1_WEIGHT_P0;
+	     break;
+	case 2:
+	     nig_reg_adress_crd_weight = (port) ?
+		 NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2 :
+		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2;
+
+		 pbf_reg_adress_crd_weight = (port) ?
+		     PBF_REG_COS2_WEIGHT_P1 : PBF_REG_COS2_WEIGHT_P0;
+	     break;
+	case 3:
+	    if (port)
+			return -EINVAL;
+	     nig_reg_adress_crd_weight =
+		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3;
+	     pbf_reg_adress_crd_weight =
+		 PBF_REG_COS3_WEIGHT_P0;
+	     break;
+	case 4:
+	    if (port)
+		return -EINVAL;
+	     nig_reg_adress_crd_weight =
+		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4;
+	     pbf_reg_adress_crd_weight = PBF_REG_COS4_WEIGHT_P0;
+	     break;
+	case 5:
+	    if (port)
+		return -EINVAL;
+	     nig_reg_adress_crd_weight =
+		 NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5;
+	     pbf_reg_adress_crd_weight = PBF_REG_COS5_WEIGHT_P0;
+	     break;
+	}
+
+	REG_WR(bp, nig_reg_adress_crd_weight, cos_bw_nig);
+
+	REG_WR(bp, pbf_reg_adress_crd_weight, cos_bw_pbf);
+
+	return 0;
+}
+/******************************************************************************
+* Description:
+*	Calculate the total BW.A value of 0 isn't legal.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_get_total_bw(
+	const struct link_params *params,
+	const struct bnx2x_ets_params *ets_params,
+	u16 *total_bw)
+{
+	struct bnx2x *bp = params->bp;
+	u8 cos_idx = 0;
+
+	*total_bw = 0 ;
+	/* Calculate total BW requested */
+	for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
+		if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
+
+			if (0 == ets_params->cos[cos_idx].params.bw_params.bw) {
+				DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
+						   "was set to 0\n");
+			return -EINVAL;
+		}
+		*total_bw +=
+		    ets_params->cos[cos_idx].params.bw_params.bw;
+	    }
+	}
+
+	/*Check taotl BW is valid */
+	if ((100 != *total_bw) || (0 == *total_bw)) {
+		if (0 == *total_bw) {
+			DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW"
+					   "shouldn't be 0\n");
+			return -EINVAL;
+		}
+		DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config toatl BW should be"
+				   "100\n");
+		/**
+		*   We can handle a case whre the BW isn't 100 this can happen
+		*   if the TC are joined.
+		*/
+	}
+	return 0;
+}
+
+/******************************************************************************
+* Description:
+*	Invalidate all the sp_pri_to_cos.
+*.
+******************************************************************************/
+static void bnx2x_ets_e3b0_sp_pri_to_cos_init(u8 *sp_pri_to_cos)
+{
+	u8 pri = 0;
+	for (pri = 0; pri < DCBX_MAX_NUM_COS; pri++)
+		sp_pri_to_cos[pri] = DCBX_INVALID_COS;
+}
+/******************************************************************************
+* Description:
+*	Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+*	according to sp_pri_to_cos.
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
+					    u8 *sp_pri_to_cos, const u8 pri,
+					    const u8 cos_entry)
+{
+	struct bnx2x *bp = params->bp;
+	const u8 port = params->port;
+	const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+		DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+	if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
+				   "parameter There can't be two COS's with"
+				   "the same strict pri\n");
+		return -EINVAL;
+	}
+
+	if (pri > max_num_of_cos) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid"
+			       "parameter Illegal strict priority\n");
+	    return -EINVAL;
+	}
+
+	sp_pri_to_cos[pri] = cos_entry;
+	return 0;
+
+}
+
+/******************************************************************************
+* Description:
+*	Returns the correct value according to COS and priority in
+*	the sp_pri_cli register.
+*.
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg(const u8 cos, const u8 cos_offset,
+					 const u8 pri_set,
+					 const u8 pri_offset,
+					 const u8 entry_size)
+{
+	u64 pri_cli_nig = 0;
+	pri_cli_nig = ((u64)(cos + cos_offset)) << (entry_size *
+						    (pri_set + pri_offset));
+
+	return pri_cli_nig;
+}
+/******************************************************************************
+* Description:
+*	Returns the correct value according to COS and priority in the
+*	sp_pri_cli register for NIG.
+*.
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg_nig(const u8 cos, const u8 pri_set)
+{
+	/* MCP Dbg0 and dbg1 are always with higher strict pri*/
+	const u8 nig_cos_offset = 3;
+	const u8 nig_pri_offset = 3;
+
+	return bnx2x_e3b0_sp_get_pri_cli_reg(cos, nig_cos_offset, pri_set,
+		nig_pri_offset, 4);
+
+}
+/******************************************************************************
+* Description:
+*	Returns the correct value according to COS and priority in the
+*	sp_pri_cli register for PBF.
+*.
+******************************************************************************/
+static u64 bnx2x_e3b0_sp_get_pri_cli_reg_pbf(const u8 cos, const u8 pri_set)
+{
+	const u8 pbf_cos_offset = 0;
+	const u8 pbf_pri_offset = 0;
+
+	return bnx2x_e3b0_sp_get_pri_cli_reg(cos, pbf_cos_offset, pri_set,
+		pbf_pri_offset, 3);
+
+}
+
+/******************************************************************************
+* Description:
+*	Calculate and set the SP (ARB_PRIORITY_CLIENT) NIG and PBF registers
+*	according to sp_pri_to_cos.(which COS has higher priority)
+*.
+******************************************************************************/
+static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
+					     u8 *sp_pri_to_cos)
+{
+	struct bnx2x *bp = params->bp;
+	u8 i = 0;
+	const u8 port = params->port;
+	/* MCP Dbg0 and dbg1 are always with higher strict pri*/
+	u64 pri_cli_nig = 0x210;
+	u32 pri_cli_pbf = 0x0;
+	u8 pri_set = 0;
+	u8 pri_bitmask = 0;
+	const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+		DCBX_E3B0_MAX_NUM_COS_PORT0;
+
+	u8 cos_bit_to_set = (1 << max_num_of_cos) - 1;
+
+	/* Set all the strict priority first */
+	for (i = 0; i < max_num_of_cos; i++) {
+		if (DCBX_INVALID_COS != sp_pri_to_cos[i]) {
+			if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) {
+				DP(NETIF_MSG_LINK,
+					   "bnx2x_ets_e3b0_sp_set_pri_cli_reg "
+					   "invalid cos entry\n");
+				return -EINVAL;
+			}
+
+			pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
+			    sp_pri_to_cos[i], pri_set);
+
+			pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
+			    sp_pri_to_cos[i], pri_set);
+			pri_bitmask = 1 << sp_pri_to_cos[i];
+			/* COS is used remove it from bitmap.*/
+			if (0 == (pri_bitmask & cos_bit_to_set)) {
+				DP(NETIF_MSG_LINK,
+					"bnx2x_ets_e3b0_sp_set_pri_cli_reg "
+					"invalid There can't be two COS's with"
+					" the same strict pri\n");
+				return -EINVAL;
+			}
+			cos_bit_to_set &= ~pri_bitmask;
+			pri_set++;
+		}
+	}
+
+	/* Set all the Non strict priority i= COS*/
+	for (i = 0; i < max_num_of_cos; i++) {
+		pri_bitmask = 1 << i;
+		/* Check if COS was already used for SP */
+		if (pri_bitmask & cos_bit_to_set) {
+			/* COS wasn't used for SP */
+			pri_cli_nig |= bnx2x_e3b0_sp_get_pri_cli_reg_nig(
+			    i, pri_set);
+
+			pri_cli_pbf |= bnx2x_e3b0_sp_get_pri_cli_reg_pbf(
+			    i, pri_set);
+			/* COS is used remove it from bitmap.*/
+			cos_bit_to_set &= ~pri_bitmask;
+			pri_set++;
+		}
+	}
+
+	if (pri_set != max_num_of_cos) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_set_pri_cli_reg not all "
+				   "entries were set\n");
+		return -EINVAL;
+	}
+
+	if (port) {
+		/* Only 6 usable clients*/
+		REG_WR(bp, NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB,
+		       (u32)pri_cli_nig);
+
+		REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1 , pri_cli_pbf);
+	} else {
+		/* Only 9 usable clients*/
+		const u32 pri_cli_nig_lsb = (u32) (pri_cli_nig);
+		const u32 pri_cli_nig_msb = (u32) ((pri_cli_nig >> 32) & 0xF);
+
+		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB,
+		       pri_cli_nig_lsb);
+		REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB,
+		       pri_cli_nig_msb);
+
+		REG_WR(bp, PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0 , pri_cli_pbf);
+	}
+	return 0;
+}
+
+/******************************************************************************
+* Description:
+*	Configure the COS to ETS according to BW and SP settings.
+******************************************************************************/
+int bnx2x_ets_e3b0_config(const struct link_params *params,
+			 const struct link_vars *vars,
+			 const struct bnx2x_ets_params *ets_params)
+{
+	struct bnx2x *bp = params->bp;
+	int bnx2x_status = 0;
+	const u8 port = params->port;
+	u16 total_bw = 0;
+	const u32 min_w_val_nig = bnx2x_ets_get_min_w_val_nig(vars);
+	const u32 min_w_val_pbf = ETS_E3B0_PBF_MIN_W_VAL;
+	u8 cos_bw_bitmap = 0;
+	u8 cos_sp_bitmap = 0;
+	u8 sp_pri_to_cos[DCBX_MAX_NUM_COS] = {0};
+	const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
+		DCBX_E3B0_MAX_NUM_COS_PORT0;
+	u8 cos_entry = 0;
+
+	if (!CHIP_IS_E3B0(bp)) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_disabled the chip isn't E3B0"
+				   "\n");
+		return -EINVAL;
+	}
+
+	if ((ets_params->num_of_cos > max_num_of_cos)) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config the number of COS "
+				   "isn't supported\n");
+		return -EINVAL;
+	}
+
+	/* Prepare sp strict priority parameters*/
+	bnx2x_ets_e3b0_sp_pri_to_cos_init(sp_pri_to_cos);
+
+	/* Prepare BW parameters*/
+	bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
+						   &total_bw);
+	if (0 != bnx2x_status) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config get_total_bw failed "
+				   "\n");
+		return -EINVAL;
+	}
+
+	/**
+	 *  Upper bound is set according to current link speed (min_w_val
+	 *  should be the same for upper bound and COS credit val).
+	 */
+	bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
+	bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
+
+
+	for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
+		if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
+			cos_bw_bitmap |= (1 << cos_entry);
+			/**
+			 * The function also sets the BW in HW(not the mappin
+			 * yet)
+			 */
+			bnx2x_status = bnx2x_ets_e3b0_set_cos_bw(
+				bp, cos_entry, min_w_val_nig, min_w_val_pbf,
+				total_bw,
+				ets_params->cos[cos_entry].params.bw_params.bw,
+				 port);
+		} else if (bnx2x_cos_state_strict ==
+			ets_params->cos[cos_entry].state){
+			cos_sp_bitmap |= (1 << cos_entry);
+
+			bnx2x_status = bnx2x_ets_e3b0_sp_pri_to_cos_set(
+				params,
+				sp_pri_to_cos,
+				ets_params->cos[cos_entry].params.sp_params.pri,
+				cos_entry);
+
+		} else {
+			DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config cos state not"
+					   " valid\n");
+			return -EINVAL;
+		}
+		if (0 != bnx2x_status) {
+			DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_config set cos bw "
+					   "failed\n");
+			return bnx2x_status;
+		}
+	}
+
+	/* Set SP register (which COS has higher priority) */
+	bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
+							 sp_pri_to_cos);
+
+	if (0 != bnx2x_status) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config set_pri_cli_reg "
+				   "failed\n");
+		return bnx2x_status;
+	}
+
+	/* Set client mapping of BW and strict */
+	bnx2x_status = bnx2x_ets_e3b0_cli_map(params, ets_params,
+					      cos_sp_bitmap,
+					      cos_bw_bitmap);
+
+	if (0 != bnx2x_status) {
+		DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
+		return bnx2x_status;
+	}
+	return 0;
+}
+static void bnx2x_ets_bw_limit_common(const struct link_params *params)
+{
+	/* ETS disabled configuration */
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
+	/*
+	 * defines which entries (clients) are subjected to WFQ arbitration
+	 * COS0 0x8
+	 * COS1 0x10
+	 */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
+	/*
+	 * mapping between the ARB_CREDIT_WEIGHT registers and actual
+	 * client numbers (WEIGHT_0 does not actually have to represent
+	 * client 0)
+	 *    PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
+	 *  cos1-001     cos0-000     dbg1-100     dbg0-011     MCP-010
+	 */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
+
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
+	       ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
+	       ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+
+	/* ETS mode enabled*/
+	REG_WR(bp, PBF_REG_ETS_ENABLED, 1);
+
+	/* Defines the number of consecutive slots for the strict priority */
+	REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
+	/*
+	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	 * as strict.  Bits 0,1,2 - debug and management entries, 3 - COS0
+	 * entry, 4 - COS1 entry.
+	 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+	 * bit4   bit3	  bit2     bit1	   bit0
+	 * MCP and debug are strict
+	 */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
+
+	/* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
+	REG_WR(bp, PBF_REG_COS0_UPPER_BOUND,
+	       ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+	REG_WR(bp, PBF_REG_COS1_UPPER_BOUND,
+	       ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
+}
+
+void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
+			const u32 cos1_bw)
+{
+	/* ETS disabled configuration*/
+	struct bnx2x *bp = params->bp;
+	const u32 total_bw = cos0_bw + cos1_bw;
+	u32 cos0_credit_weight = 0;
+	u32 cos1_credit_weight = 0;
+
+	DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
+
+	if ((0 == total_bw) ||
+	    (0 == cos0_bw) ||
+	    (0 == cos1_bw)) {
+		DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
+		return;
+	}
+
+	cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
+		total_bw;
+	cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
+		total_bw;
+
+	bnx2x_ets_bw_limit_common(params);
+
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
+
+	REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
+	REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
+}
+
+int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
+{
+	/* ETS disabled configuration*/
+	struct bnx2x *bp = params->bp;
+	u32 val	= 0;
+
+	DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
+	/*
+	 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
+	 * as strict.  Bits 0,1,2 - debug and management entries,
+	 * 3 - COS0 entry, 4 - COS1 entry.
+	 *  COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
+	 *  bit4   bit3	  bit2      bit1     bit0
+	 * MCP and debug are strict
+	 */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
+	/*
+	 * For strict priority entries defines the number of consecutive slots
+	 * for the highest priority.
+	 */
+	REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
+	/* ETS mode disable */
+	REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
+	/* Defines the number of consecutive slots for the strict priority */
+	REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
+
+	/* Defines the number of consecutive slots for the strict priority */
+	REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
+
+	/*
+	 * mapping between entry  priority to client number (0,1,2 -debug and
+	 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
+	 * 3bits client num.
+	 *   PRI4    |    PRI3    |    PRI2    |    PRI1    |    PRI0
+	 * dbg0-010     dbg1-001     cos1-100     cos0-011     MCP-000
+	 * dbg0-010     dbg1-001     cos0-011     cos1-100     MCP-000
+	 */
+	val = (0 == strict_cos) ? 0x2318 : 0x22E0;
+	REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
+
+	return 0;
+}
+/******************************************************************/
+/*			PFC section				  */
+/******************************************************************/
+
+static void bnx2x_update_pfc_xmac(struct link_params *params,
+				  struct link_vars *vars,
+				  u8 is_lb)
+{
+	struct bnx2x *bp = params->bp;
+	u32 xmac_base;
+	u32 pause_val, pfc0_val, pfc1_val;
+
+	/* XMAC base adrr */
+	xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+	/* Initialize pause and pfc registers */
+	pause_val = 0x18000;
+	pfc0_val = 0xFFFF8000;
+	pfc1_val = 0x2;
+
+	/* No PFC support */
+	if (!(params->feature_config_flags &
+	      FEATURE_CONFIG_PFC_ENABLED)) {
+
+		/*
+		 * RX flow control - Process pause frame in receive direction
+		 */
+		if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+			pause_val |= XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN;
+
+		/*
+		 * TX flow control - Send pause packet when buffer is full
+		 */
+		if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+			pause_val |= XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN;
+	} else {/* PFC support */
+		pfc1_val |= XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN |
+			XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN |
+			XMAC_PFC_CTRL_HI_REG_RX_PFC_EN |
+			XMAC_PFC_CTRL_HI_REG_TX_PFC_EN;
+	}
+
+	/* Write pause and PFC registers */
+	REG_WR(bp, xmac_base + XMAC_REG_PAUSE_CTRL, pause_val);
+	REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL, pfc0_val);
+	REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI, pfc1_val);
+
+
+	/* Set MAC address for source TX Pause/PFC frames */
+	REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_LO,
+	       ((params->mac_addr[2] << 24) |
+		(params->mac_addr[3] << 16) |
+		(params->mac_addr[4] << 8) |
+		(params->mac_addr[5])));
+	REG_WR(bp, xmac_base + XMAC_REG_CTRL_SA_HI,
+	       ((params->mac_addr[0] << 8) |
+		(params->mac_addr[1])));
+
+	udelay(30);
+}
+
+
+static void bnx2x_emac_get_pfc_stat(struct link_params *params,
+				    u32 pfc_frames_sent[2],
+				    u32 pfc_frames_received[2])
+{
+	/* Read pfc statistic */
+	struct bnx2x *bp = params->bp;
+	u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+	u32 val_xon = 0;
+	u32 val_xoff = 0;
+
+	DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
+
+	/* PFC received frames */
+	val_xoff = REG_RD(bp, emac_base +
+				EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
+	val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
+	val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
+	val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
+
+	pfc_frames_received[0] = val_xon + val_xoff;
+
+	/* PFC received sent */
+	val_xoff = REG_RD(bp, emac_base +
+				EMAC_REG_RX_PFC_STATS_XOFF_SENT);
+	val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
+	val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
+	val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
+
+	pfc_frames_sent[0] = val_xon + val_xoff;
+}
+
+/* Read pfc statistic*/
+void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
+			 u32 pfc_frames_sent[2],
+			 u32 pfc_frames_received[2])
+{
+	/* Read pfc statistic */
+	struct bnx2x *bp = params->bp;
+
+	DP(NETIF_MSG_LINK, "pfc statistic\n");
+
+	if (!vars->link_up)
+		return;
+
+	if (MAC_TYPE_EMAC == vars->mac_type) {
+		DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
+		bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
+					pfc_frames_received);
+	}
+}
+/******************************************************************/
+/*			MAC/PBF section				  */
+/******************************************************************/
+static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
+{
+	u32 mode, emac_base;
+	/**
+	 * Set clause 45 mode, slow down the MDIO clock to 2.5MHz
+	 * (a value of 49==0x31) and make sure that the AUTO poll is off
+	 */
+
+	if (CHIP_IS_E2(bp))
+		emac_base = GRCBASE_EMAC0;
+	else
+		emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+	mode = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
+	mode &= ~(EMAC_MDIO_MODE_AUTO_POLL |
+		  EMAC_MDIO_MODE_CLOCK_CNT);
+	if (USES_WARPCORE(bp))
+		mode |= (74L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+	else
+		mode |= (49L << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
+
+	mode |= (EMAC_MDIO_MODE_CLAUSE_45);
+	REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE, mode);
+
+	udelay(40);
+}
+
+static void bnx2x_emac_init(struct link_params *params,
+			    struct link_vars *vars)
+{
+	/* reset and unreset the emac core */
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+	u32 val;
+	u16 timeout;
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+	udelay(5);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+	       (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
+
+	/* init emac - use read-modify-write */
+	/* self clear reset */
+	val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+	EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
+
+	timeout = 200;
+	do {
+		val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+		DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
+		if (!timeout) {
+			DP(NETIF_MSG_LINK, "EMAC timeout!\n");
+			return;
+		}
+		timeout--;
+	} while (val & EMAC_MODE_RESET);
+	bnx2x_set_mdio_clk(bp, params->chip_id, port);
+	/* Set mac address */
+	val = ((params->mac_addr[0] << 8) |
+		params->mac_addr[1]);
+	EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH, val);
+
+	val = ((params->mac_addr[2] << 24) |
+	       (params->mac_addr[3] << 16) |
+	       (params->mac_addr[4] << 8) |
+		params->mac_addr[5]);
+	EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + 4, val);
+}
+
+static void bnx2x_set_xumac_nig(struct link_params *params,
+				u16 tx_pause_en,
+				u8 enable)
+{
+	struct bnx2x *bp = params->bp;
+
+	REG_WR(bp, params->port ? NIG_REG_P1_MAC_IN_EN : NIG_REG_P0_MAC_IN_EN,
+	       enable);
+	REG_WR(bp, params->port ? NIG_REG_P1_MAC_OUT_EN : NIG_REG_P0_MAC_OUT_EN,
+	       enable);
+	REG_WR(bp, params->port ? NIG_REG_P1_MAC_PAUSE_OUT_EN :
+	       NIG_REG_P0_MAC_PAUSE_OUT_EN, tx_pause_en);
+}
+
+static void bnx2x_umac_enable(struct link_params *params,
+			    struct link_vars *vars, u8 lb)
+{
+	u32 val;
+	u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+	struct bnx2x *bp = params->bp;
+	/* Reset UMAC */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+	usleep_range(1000, 1000);
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+	       (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port));
+
+	DP(NETIF_MSG_LINK, "enabling UMAC\n");
+
+	/**
+	 * This register determines on which events the MAC will assert
+	 * error on the i/f to the NIG along w/ EOP.
+	 */
+
+	/**
+	 * BD REG_WR(bp, NIG_REG_P0_MAC_RSV_ERR_MASK +
+	 * params->port*0x14,      0xfffff.
+	 */
+	/* This register opens the gate for the UMAC despite its name */
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
+
+	val = UMAC_COMMAND_CONFIG_REG_PROMIS_EN |
+		UMAC_COMMAND_CONFIG_REG_PAD_EN |
+		UMAC_COMMAND_CONFIG_REG_SW_RESET |
+		UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK;
+	switch (vars->line_speed) {
+	case SPEED_10:
+		val |= (0<<2);
+		break;
+	case SPEED_100:
+		val |= (1<<2);
+		break;
+	case SPEED_1000:
+		val |= (2<<2);
+		break;
+	case SPEED_2500:
+		val |= (3<<2);
+		break;
+	default:
+		DP(NETIF_MSG_LINK, "Invalid speed for UMAC %d\n",
+			       vars->line_speed);
+		break;
+	}
+	if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+		val |= UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE;
+
+	if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+		val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE;
+
+	REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+	udelay(50);
+
+	/* Set MAC address for source TX Pause/PFC frames (under SW reset) */
+	REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR0,
+	       ((params->mac_addr[2] << 24) |
+		(params->mac_addr[3] << 16) |
+		(params->mac_addr[4] << 8) |
+		(params->mac_addr[5])));
+	REG_WR(bp, umac_base + UMAC_REG_MAC_ADDR1,
+	       ((params->mac_addr[0] << 8) |
+		(params->mac_addr[1])));
+
+	/* Enable RX and TX */
+	val &= ~UMAC_COMMAND_CONFIG_REG_PAD_EN;
+	val |= UMAC_COMMAND_CONFIG_REG_TX_ENA |
+		UMAC_COMMAND_CONFIG_REG_RX_ENA;
+	REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+	udelay(50);
+
+	/* Remove SW Reset */
+	val &= ~UMAC_COMMAND_CONFIG_REG_SW_RESET;
+
+	/* Check loopback mode */
+	if (lb)
+		val |= UMAC_COMMAND_CONFIG_REG_LOOP_ENA;
+	REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
+
+	/*
+	 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+	 * length used by the MAC receive logic to check frames.
+	 */
+	REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
+	bnx2x_set_xumac_nig(params,
+			    ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
+	vars->mac_type = MAC_TYPE_UMAC;
+
+}
+
+static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
+{
+	u32 port4mode_ovwr_val;
+	/* Check 4-port override enabled */
+	port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+	if (port4mode_ovwr_val & (1<<0)) {
+		/* Return 4-port mode override value */
+		return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
+	}
+	/* Return 4-port mode from input pin */
+	return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
+}
+
+/* Define the XMAC mode */
+static void bnx2x_xmac_init(struct bnx2x *bp, u32 max_speed)
+{
+	u32 is_port4mode = bnx2x_is_4_port_mode(bp);
+
+	/**
+	* In 4-port mode, need to set the mode only once, so if XMAC is
+	* already out of reset, it means the mode has already been set,
+	* and it must not* reset the XMAC again, since it controls both
+	* ports of the path
+	**/
+
+	if (is_port4mode && (REG_RD(bp, MISC_REG_RESET_REG_2) &
+	     MISC_REGISTERS_RESET_REG_2_XMAC)) {
+		DP(NETIF_MSG_LINK, "XMAC already out of reset"
+				   " in 4-port mode\n");
+		return;
+	}
+
+	/* Hard reset */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       MISC_REGISTERS_RESET_REG_2_XMAC);
+	usleep_range(1000, 1000);
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+	       MISC_REGISTERS_RESET_REG_2_XMAC);
+	if (is_port4mode) {
+		DP(NETIF_MSG_LINK, "Init XMAC to 2 ports x 10G per path\n");
+
+		/*  Set the number of ports on the system side to up to 2 */
+		REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 1);
+
+		/* Set the number of ports on the Warp Core to 10G */
+		REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+	} else {
+		/*  Set the number of ports on the system side to 1 */
+		REG_WR(bp, MISC_REG_XMAC_CORE_PORT_MODE, 0);
+		if (max_speed == SPEED_10000) {
+			DP(NETIF_MSG_LINK, "Init XMAC to 10G x 1"
+					   " port per path\n");
+			/* Set the number of ports on the Warp Core to 10G */
+			REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 3);
+		} else {
+			DP(NETIF_MSG_LINK, "Init XMAC to 20G x 2 ports"
+					   " per path\n");
+			/* Set the number of ports on the Warp Core to 20G */
+			REG_WR(bp, MISC_REG_XMAC_PHY_PORT_MODE, 1);
+		}
+	}
+	/* Soft reset */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+	usleep_range(1000, 1000);
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+	       MISC_REGISTERS_RESET_REG_2_XMAC_SOFT);
+
+}
+
+static void bnx2x_xmac_disable(struct link_params *params)
+{
+	u8 port = params->port;
+	struct bnx2x *bp = params->bp;
+	u32 pfc_ctrl, xmac_base = (port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+	if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+	    MISC_REGISTERS_RESET_REG_2_XMAC) {
+		/*
+		 * Send an indication to change the state in the NIG back to XON
+		 * Clearing this bit enables the next set of this bit to get
+		 * rising edge
+		 */
+		pfc_ctrl = REG_RD(bp, xmac_base + XMAC_REG_PFC_CTRL_HI);
+		REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+		       (pfc_ctrl & ~(1<<1)));
+		REG_WR(bp, xmac_base + XMAC_REG_PFC_CTRL_HI,
+		       (pfc_ctrl | (1<<1)));
+		DP(NETIF_MSG_LINK, "Disable XMAC on port %x\n", port);
+		REG_WR(bp, xmac_base + XMAC_REG_CTRL, 0);
+		usleep_range(1000, 1000);
+		bnx2x_set_xumac_nig(params, 0, 0);
+		REG_WR(bp, xmac_base + XMAC_REG_CTRL,
+		       XMAC_CTRL_REG_SOFT_RESET);
+	}
+}
+
+static int bnx2x_xmac_enable(struct link_params *params,
+			     struct link_vars *vars, u8 lb)
+{
+	u32 val, xmac_base;
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "enabling XMAC\n");
+
+	xmac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+	bnx2x_xmac_init(bp, vars->line_speed);
+
+	/*
+	 * This register determines on which events the MAC will assert
+	 * error on the i/f to the NIG along w/ EOP.
+	 */
+
+	/*
+	 * This register tells the NIG whether to send traffic to UMAC
+	 * or XMAC
+	 */
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 0);
+
+	/* Set Max packet size */
+	REG_WR(bp, xmac_base + XMAC_REG_RX_MAX_SIZE, 0x2710);
+
+	/* CRC append for Tx packets */
+	REG_WR(bp, xmac_base + XMAC_REG_TX_CTRL, 0xC800);
+
+	/* update PFC */
+	bnx2x_update_pfc_xmac(params, vars, 0);
+
+	/* Enable TX and RX */
+	val = XMAC_CTRL_REG_TX_EN | XMAC_CTRL_REG_RX_EN;
+
+	/* Check loopback mode */
+	if (lb)
+		val |= XMAC_CTRL_REG_CORE_LOCAL_LPBK;
+	REG_WR(bp, xmac_base + XMAC_REG_CTRL, val);
+	bnx2x_set_xumac_nig(params,
+			    ((vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) != 0), 1);
+
+	vars->mac_type = MAC_TYPE_XMAC;
+
+	return 0;
+}
+static int bnx2x_emac_enable(struct link_params *params,
+			     struct link_vars *vars, u8 lb)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+	u32 val;
+
+	DP(NETIF_MSG_LINK, "enabling EMAC\n");
+
+	/* Disable BMAC */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+	/* enable emac and not bmac */
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
+
+	/* ASIC */
+	if (vars->phy_flags & PHY_XGXS_FLAG) {
+		u32 ser_lane = ((params->lane_config &
+				 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+				PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+		DP(NETIF_MSG_LINK, "XGXS\n");
+		/* select the master lanes (out of 0-3) */
+		REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, ser_lane);
+		/* select XGXS */
+		REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+
+	} else { /* SerDes */
+		DP(NETIF_MSG_LINK, "SerDes\n");
+		/* select SerDes */
+		REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
+	}
+
+	bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+		      EMAC_RX_MODE_RESET);
+	bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+		      EMAC_TX_MODE_RESET);
+
+	if (CHIP_REV_IS_SLOW(bp)) {
+		/* config GMII mode */
+		val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+		EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
+	} else { /* ASIC */
+		/* pause enable/disable */
+		bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
+			       EMAC_RX_MODE_FLOW_EN);
+
+		bnx2x_bits_dis(bp,  emac_base + EMAC_REG_EMAC_TX_MODE,
+			       (EMAC_TX_MODE_EXT_PAUSE_EN |
+				EMAC_TX_MODE_FLOW_EN));
+		if (!(params->feature_config_flags &
+		      FEATURE_CONFIG_PFC_ENABLED)) {
+			if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+				bnx2x_bits_en(bp, emac_base +
+					      EMAC_REG_EMAC_RX_MODE,
+					      EMAC_RX_MODE_FLOW_EN);
+
+			if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+				bnx2x_bits_en(bp, emac_base +
+					      EMAC_REG_EMAC_TX_MODE,
+					      (EMAC_TX_MODE_EXT_PAUSE_EN |
+					       EMAC_TX_MODE_FLOW_EN));
+		} else
+			bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
+				      EMAC_TX_MODE_FLOW_EN);
+	}
+
+	/* KEEP_VLAN_TAG, promiscuous */
+	val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
+	val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
+
+	/*
+	 * Setting this bit causes MAC control frames (except for pause
+	 * frames) to be passed on for processing. This setting has no
+	 * affect on the operation of the pause frames. This bit effects
+	 * all packets regardless of RX Parser packet sorting logic.
+	 * Turn the PFC off to make sure we are in Xon state before
+	 * enabling it.
+	 */
+	EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
+	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
+		DP(NETIF_MSG_LINK, "PFC is enabled\n");
+		/* Enable PFC again */
+		EMAC_WR(bp, EMAC_REG_RX_PFC_MODE,
+			EMAC_REG_RX_PFC_MODE_RX_EN |
+			EMAC_REG_RX_PFC_MODE_TX_EN |
+			EMAC_REG_RX_PFC_MODE_PRIORITIES);
+
+		EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM,
+			((0x0101 <<
+			  EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
+			 (0x00ff <<
+			  EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
+		val |= EMAC_RX_MODE_KEEP_MAC_CONTROL;
+	}
+	EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
+
+	/* Set Loopback */
+	val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
+	if (lb)
+		val |= 0x810;
+	else
+		val &= ~0x810;
+	EMAC_WR(bp, EMAC_REG_EMAC_MODE, val);
+
+	/* enable emac */
+	REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1);
+
+	/* enable emac for jumbo packets */
+	EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE,
+		(EMAC_RX_MTU_SIZE_JUMBO_ENA |
+		 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)));
+
+	/* strip CRC */
+	REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
+
+	/* disable the NIG in/out to the bmac */
+	REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0);
+	REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
+	REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
+
+	/* enable the NIG in/out to the emac */
+	REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
+	val = 0;
+	if ((params->feature_config_flags &
+	      FEATURE_CONFIG_PFC_ENABLED) ||
+	    (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+		val = 1;
+
+	REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
+
+	REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x0);
+
+	vars->mac_type = MAC_TYPE_EMAC;
+	return 0;
+}
+
+static void bnx2x_update_pfc_bmac1(struct link_params *params,
+				   struct link_vars *vars)
+{
+	u32 wb_data[2];
+	struct bnx2x *bp = params->bp;
+	u32 bmac_addr =  params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+		NIG_REG_INGRESS_BMAC0_MEM;
+
+	u32 val = 0x14;
+	if ((!(params->feature_config_flags &
+	      FEATURE_CONFIG_PFC_ENABLED)) &&
+		(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+		/* Enable BigMAC to react on received Pause packets */
+		val |= (1<<5);
+	wb_data[0] = val;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
+
+	/* tx control */
+	val = 0xc0;
+	if (!(params->feature_config_flags &
+	      FEATURE_CONFIG_PFC_ENABLED) &&
+		(vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+		val |= 0x800000;
+	wb_data[0] = val;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2);
+}
+
+static void bnx2x_update_pfc_bmac2(struct link_params *params,
+				   struct link_vars *vars,
+				   u8 is_lb)
+{
+	/*
+	 * Set rx control: Strip CRC and enable BigMAC to relay
+	 * control packets to the system as well
+	 */
+	u32 wb_data[2];
+	struct bnx2x *bp = params->bp;
+	u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+		NIG_REG_INGRESS_BMAC0_MEM;
+	u32 val = 0x14;
+
+	if ((!(params->feature_config_flags &
+	      FEATURE_CONFIG_PFC_ENABLED)) &&
+		(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
+		/* Enable BigMAC to react on received Pause packets */
+		val |= (1<<5);
+	wb_data[0] = val;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_CONTROL, wb_data, 2);
+	udelay(30);
+
+	/* Tx control */
+	val = 0xc0;
+	if (!(params->feature_config_flags &
+				FEATURE_CONFIG_PFC_ENABLED) &&
+	    (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+		val |= 0x800000;
+	wb_data[0] = val;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
+
+	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
+		DP(NETIF_MSG_LINK, "PFC is enabled\n");
+		/* Enable PFC RX & TX & STATS and set 8 COS  */
+		wb_data[0] = 0x0;
+		wb_data[0] |= (1<<0);  /* RX */
+		wb_data[0] |= (1<<1);  /* TX */
+		wb_data[0] |= (1<<2);  /* Force initial Xon */
+		wb_data[0] |= (1<<3);  /* 8 cos */
+		wb_data[0] |= (1<<5);  /* STATS */
+		wb_data[1] = 0;
+		REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
+			    wb_data, 2);
+		/* Clear the force Xon */
+		wb_data[0] &= ~(1<<2);
+	} else {
+		DP(NETIF_MSG_LINK, "PFC is disabled\n");
+		/* disable PFC RX & TX & STATS and set 8 COS */
+		wb_data[0] = 0x8;
+		wb_data[1] = 0;
+	}
+
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
+
+	/*
+	 * Set Time (based unit is 512 bit time) between automatic
+	 * re-sending of PP packets amd enable automatic re-send of
+	 * Per-Priroity Packet as long as pp_gen is asserted and
+	 * pp_disable is low.
+	 */
+	val = 0x8000;
+	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+		val |= (1<<16); /* enable automatic re-send */
+
+	wb_data[0] = val;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
+		    wb_data, 2);
+
+	/* mac control */
+	val = 0x3; /* Enable RX and TX */
+	if (is_lb) {
+		val |= 0x4; /* Local loopback */
+		DP(NETIF_MSG_LINK, "enable bmac loopback\n");
+	}
+	/* When PFC enabled, Pass pause frames towards the NIG. */
+	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+		val |= ((1<<6)|(1<<5));
+
+	wb_data[0] = val;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
+}
+
+
+/* PFC BRB internal port configuration params */
+struct bnx2x_pfc_brb_threshold_val {
+	u32 pause_xoff;
+	u32 pause_xon;
+	u32 full_xoff;
+	u32 full_xon;
+};
+
+struct bnx2x_pfc_brb_e3b0_val {
+	u32 full_lb_xoff_th;
+	u32 full_lb_xon_threshold;
+	u32 lb_guarantied;
+	u32 mac_0_class_t_guarantied;
+	u32 mac_0_class_t_guarantied_hyst;
+	u32 mac_1_class_t_guarantied;
+	u32 mac_1_class_t_guarantied_hyst;
+};
+
+struct bnx2x_pfc_brb_th_val {
+	struct bnx2x_pfc_brb_threshold_val pauseable_th;
+	struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
+};
+static int bnx2x_pfc_brb_get_config_params(
+				struct link_params *params,
+				struct bnx2x_pfc_brb_th_val *config_val)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
+	if (CHIP_IS_E2(bp)) {
+		config_val->pauseable_th.pause_xoff =
+		    PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+		config_val->pauseable_th.pause_xon =
+		    PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
+		config_val->pauseable_th.full_xoff =
+		    PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
+		config_val->pauseable_th.full_xon =
+		    PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
+		/* non pause able*/
+		config_val->non_pauseable_th.pause_xoff =
+		    PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+		config_val->non_pauseable_th.pause_xon =
+		    PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+		config_val->non_pauseable_th.full_xoff =
+		    PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+		config_val->non_pauseable_th.full_xon =
+		    PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+	} else if (CHIP_IS_E3A0(bp)) {
+		config_val->pauseable_th.pause_xoff =
+		    PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+		config_val->pauseable_th.pause_xon =
+		    PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
+		config_val->pauseable_th.full_xoff =
+		    PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
+		config_val->pauseable_th.full_xon =
+		    PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
+		/* non pause able*/
+		config_val->non_pauseable_th.pause_xoff =
+		    PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+		config_val->non_pauseable_th.pause_xon =
+		    PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+		config_val->non_pauseable_th.full_xoff =
+		    PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+		config_val->non_pauseable_th.full_xon =
+		    PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+	} else if (CHIP_IS_E3B0(bp)) {
+		if (params->phy[INT_PHY].flags &
+		    FLAGS_4_PORT_MODE) {
+			config_val->pauseable_th.pause_xoff =
+			    PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+			config_val->pauseable_th.pause_xon =
+			    PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+			config_val->pauseable_th.full_xoff =
+			    PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+			config_val->pauseable_th.full_xon =
+			    PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
+			/* non pause able*/
+			config_val->non_pauseable_th.pause_xoff =
+			    PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+			config_val->non_pauseable_th.pause_xon =
+			    PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+			config_val->non_pauseable_th.full_xoff =
+			    PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+			config_val->non_pauseable_th.full_xon =
+			    PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+	    } else {
+		config_val->pauseable_th.pause_xoff =
+		    PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+		config_val->pauseable_th.pause_xon =
+		    PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+		config_val->pauseable_th.full_xoff =
+		    PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+		config_val->pauseable_th.full_xon =
+			PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
+		/* non pause able*/
+		config_val->non_pauseable_th.pause_xoff =
+		    PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+		config_val->non_pauseable_th.pause_xon =
+		    PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+		config_val->non_pauseable_th.full_xoff =
+		    PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+		config_val->non_pauseable_th.full_xon =
+		    PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+	    }
+	} else
+	    return -EINVAL;
+
+	return 0;
+}
+
+
+static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params,
+						 struct bnx2x_pfc_brb_e3b0_val
+						 *e3b0_val,
+						 u32 cos0_pauseable,
+						 u32 cos1_pauseable)
+{
+	if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) {
+		e3b0_val->full_lb_xoff_th =
+		    PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+		e3b0_val->full_lb_xon_threshold =
+		    PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+		e3b0_val->lb_guarantied =
+		    PFC_E3B0_4P_LB_GUART;
+		e3b0_val->mac_0_class_t_guarantied =
+		    PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+		e3b0_val->mac_0_class_t_guarantied_hyst =
+		    PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+		e3b0_val->mac_1_class_t_guarantied =
+		    PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+		e3b0_val->mac_1_class_t_guarantied_hyst =
+		    PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
+	} else {
+		e3b0_val->full_lb_xoff_th =
+		    PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
+		e3b0_val->full_lb_xon_threshold =
+		    PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
+		e3b0_val->mac_0_class_t_guarantied_hyst =
+		    PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
+		e3b0_val->mac_1_class_t_guarantied =
+		    PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
+		e3b0_val->mac_1_class_t_guarantied_hyst =
+		    PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
+
+		if (cos0_pauseable != cos1_pauseable) {
+			/* nonpauseable= Lossy + pauseable = Lossless*/
+			e3b0_val->lb_guarantied =
+			    PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
+			e3b0_val->mac_0_class_t_guarantied =
+			    PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
+		} else if (cos0_pauseable) {
+			/* Lossless +Lossless*/
+			e3b0_val->lb_guarantied =
+			    PFC_E3B0_2P_PAUSE_LB_GUART;
+			e3b0_val->mac_0_class_t_guarantied =
+			    PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
+		} else {
+			/* Lossy +Lossy*/
+			e3b0_val->lb_guarantied =
+			    PFC_E3B0_2P_NON_PAUSE_LB_GUART;
+			e3b0_val->mac_0_class_t_guarantied =
+			    PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
+		}
+	}
+}
+static int bnx2x_update_pfc_brb(struct link_params *params,
+				struct link_vars *vars,
+				struct bnx2x_nig_brb_pfc_port_params
+				*pfc_params)
+{
+	struct bnx2x *bp = params->bp;
+	struct bnx2x_pfc_brb_th_val config_val = { {0} };
+	struct bnx2x_pfc_brb_threshold_val *reg_th_config =
+	    &config_val.pauseable_th;
+	struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
+	int set_pfc = params->feature_config_flags &
+		FEATURE_CONFIG_PFC_ENABLED;
+	int bnx2x_status = 0;
+	u8 port = params->port;
+
+	/* default - pause configuration */
+	reg_th_config = &config_val.pauseable_th;
+	bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
+	if (0 != bnx2x_status)
+		return bnx2x_status;
+
+	if (set_pfc && pfc_params)
+		/* First COS */
+		if (!pfc_params->cos0_pauseable)
+			reg_th_config = &config_val.non_pauseable_th;
+	/*
+	 * The number of free blocks below which the pause signal to class 0
+	 * of MAC #n is asserted. n=0,1
+	 */
+	REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1 :
+	       BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 ,
+	       reg_th_config->pause_xoff);
+	/*
+	 * The number of free blocks above which the pause signal to class 0
+	 * of MAC #n is de-asserted. n=0,1
+	 */
+	REG_WR(bp, (port) ? BRB1_REG_PAUSE_0_XON_THRESHOLD_1 :
+	       BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , reg_th_config->pause_xon);
+	/*
+	 * The number of free blocks below which the full signal to class 0
+	 * of MAC #n is asserted. n=0,1
+	 */
+	REG_WR(bp, (port) ? BRB1_REG_FULL_0_XOFF_THRESHOLD_1 :
+	       BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , reg_th_config->full_xoff);
+	/*
+	 * The number of free blocks above which the full signal to class 0
+	 * of MAC #n is de-asserted. n=0,1
+	 */
+	REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
+	       BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
+
+	if (set_pfc && pfc_params) {
+		/* Second COS */
+		if (pfc_params->cos1_pauseable)
+			reg_th_config = &config_val.pauseable_th;
+		else
+			reg_th_config = &config_val.non_pauseable_th;
+		/*
+		 * The number of free blocks below which the pause signal to
+		 * class 1 of MAC #n is asserted. n=0,1
+		**/
+		REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
+		       BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
+		       reg_th_config->pause_xoff);
+		/*
+		 * The number of free blocks above which the pause signal to
+		 * class 1 of MAC #n is de-asserted. n=0,1
+		 */
+		REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
+		       BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
+		       reg_th_config->pause_xon);
+		/*
+		 * The number of free blocks below which the full signal to
+		 * class 1 of MAC #n is asserted. n=0,1
+		 */
+		REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
+		       BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
+		       reg_th_config->full_xoff);
+		/*
+		 * The number of free blocks above which the full signal to
+		 * class 1 of MAC #n is de-asserted. n=0,1
+		 */
+		REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
+		       BRB1_REG_FULL_1_XON_THRESHOLD_0,
+		       reg_th_config->full_xon);
+
+
+		if (CHIP_IS_E3B0(bp)) {
+			/*Should be done by init tool */
+			/*
+			* BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD
+			* reset value
+			* 944
+			*/
+
+			/**
+			 * The hysteresis on the guarantied buffer space for the Lb port
+			 * before signaling XON.
+			 **/
+			REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80);
+
+			bnx2x_pfc_brb_get_e3b0_config_params(
+			    params,
+			    &e3b0_val,
+			    pfc_params->cos0_pauseable,
+			    pfc_params->cos1_pauseable);
+			/**
+			 * The number of free blocks below which the full signal to the
+			 * LB port is asserted.
+			*/
+			REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
+				   e3b0_val.full_lb_xoff_th);
+			/**
+			 * The number of free blocks above which the full signal to the
+			 * LB port is de-asserted.
+			*/
+			REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
+				   e3b0_val.full_lb_xon_threshold);
+			/**
+			* The number of blocks guarantied for the MAC #n port. n=0,1
+			*/
+
+			/*The number of blocks guarantied for the LB port.*/
+			REG_WR(bp, BRB1_REG_LB_GUARANTIED,
+			       e3b0_val.lb_guarantied);
+
+			/**
+			 * The number of blocks guarantied for the MAC #n port.
+			*/
+			REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
+				   2 * e3b0_val.mac_0_class_t_guarantied);
+			REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
+				   2 * e3b0_val.mac_1_class_t_guarantied);
+			/**
+			 * The number of blocks guarantied for class #t in MAC0. t=0,1
+			*/
+			REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
+			       e3b0_val.mac_0_class_t_guarantied);
+			REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
+			       e3b0_val.mac_0_class_t_guarantied);
+			/**
+			 * The hysteresis on the guarantied buffer space for class in
+			 * MAC0.  t=0,1
+			*/
+			REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
+			       e3b0_val.mac_0_class_t_guarantied_hyst);
+			REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
+			       e3b0_val.mac_0_class_t_guarantied_hyst);
+
+			/**
+			 * The number of blocks guarantied for class #t in MAC1.t=0,1
+			*/
+			REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
+			       e3b0_val.mac_1_class_t_guarantied);
+			REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
+			       e3b0_val.mac_1_class_t_guarantied);
+			/**
+			 * The hysteresis on the guarantied buffer space for class #t
+			* in MAC1.  t=0,1
+			*/
+			REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
+			       e3b0_val.mac_1_class_t_guarantied_hyst);
+			REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
+			       e3b0_val.mac_1_class_t_guarantied_hyst);
+
+	    }
+
+	}
+
+	return bnx2x_status;
+}
+
+/******************************************************************************
+* Description:
+*  This function is needed because NIG ARB_CREDIT_WEIGHT_X are
+*  not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable.
+******************************************************************************/
+int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp,
+					      u8 cos_entry,
+					      u32 priority_mask, u8 port)
+{
+	u32 nig_reg_rx_priority_mask_add = 0;
+
+	switch (cos_entry) {
+	case 0:
+	     nig_reg_rx_priority_mask_add = (port) ?
+		 NIG_REG_P1_RX_COS0_PRIORITY_MASK :
+		 NIG_REG_P0_RX_COS0_PRIORITY_MASK;
+	     break;
+	case 1:
+	    nig_reg_rx_priority_mask_add = (port) ?
+		NIG_REG_P1_RX_COS1_PRIORITY_MASK :
+		NIG_REG_P0_RX_COS1_PRIORITY_MASK;
+	    break;
+	case 2:
+	    nig_reg_rx_priority_mask_add = (port) ?
+		NIG_REG_P1_RX_COS2_PRIORITY_MASK :
+		NIG_REG_P0_RX_COS2_PRIORITY_MASK;
+	    break;
+	case 3:
+	    if (port)
+		return -EINVAL;
+	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS3_PRIORITY_MASK;
+	    break;
+	case 4:
+	    if (port)
+		return -EINVAL;
+	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS4_PRIORITY_MASK;
+	    break;
+	case 5:
+	    if (port)
+		return -EINVAL;
+	    nig_reg_rx_priority_mask_add = NIG_REG_P0_RX_COS5_PRIORITY_MASK;
+	    break;
+	}
+
+	REG_WR(bp, nig_reg_rx_priority_mask_add, priority_mask);
+
+	return 0;
+}
+static void bnx2x_update_mng(struct link_params *params, u32 link_status)
+{
+	struct bnx2x *bp = params->bp;
+
+	REG_WR(bp, params->shmem_base +
+	       offsetof(struct shmem_region,
+			port_mb[params->port].link_status), link_status);
+}
+
+static void bnx2x_update_pfc_nig(struct link_params *params,
+		struct link_vars *vars,
+		struct bnx2x_nig_brb_pfc_port_params *nig_params)
+{
+	u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
+	u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
+	u32 pkt_priority_to_cos = 0;
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+
+	int set_pfc = params->feature_config_flags &
+		FEATURE_CONFIG_PFC_ENABLED;
+	DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
+
+	/*
+	 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
+	 * MAC control frames (that are not pause packets)
+	 * will be forwarded to the XCM.
+	 */
+	xcm_mask = REG_RD(bp,
+				port ? NIG_REG_LLH1_XCM_MASK :
+				NIG_REG_LLH0_XCM_MASK);
+	/*
+	 * nig params will override non PFC params, since it's possible to
+	 * do transition from PFC to SAFC
+	 */
+	if (set_pfc) {
+		pause_enable = 0;
+		llfc_out_en = 0;
+		llfc_enable = 0;
+		if (CHIP_IS_E3(bp))
+			ppp_enable = 0;
+		else
+		ppp_enable = 1;
+		xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+				     NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+		xcm0_out_en = 0;
+		p0_hwpfc_enable = 1;
+	} else  {
+		if (nig_params) {
+			llfc_out_en = nig_params->llfc_out_en;
+			llfc_enable = nig_params->llfc_enable;
+			pause_enable = nig_params->pause_enable;
+		} else  /*defaul non PFC mode - PAUSE */
+			pause_enable = 1;
+
+		xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
+			NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
+		xcm0_out_en = 1;
+	}
+
+	if (CHIP_IS_E3(bp))
+		REG_WR(bp, port ? NIG_REG_BRB1_PAUSE_IN_EN :
+		       NIG_REG_BRB0_PAUSE_IN_EN, pause_enable);
+	REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
+	       NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
+	REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
+	       NIG_REG_LLFC_ENABLE_0, llfc_enable);
+	REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 :
+	       NIG_REG_PAUSE_ENABLE_0, pause_enable);
+
+	REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 :
+	       NIG_REG_PPP_ENABLE_0, ppp_enable);
+
+	REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
+	       NIG_REG_LLH0_XCM_MASK, xcm_mask);
+
+	REG_WR(bp,  NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
+
+	/* output enable for RX_XCM # IF */
+	REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en);
+
+	/* HW PFC TX enable */
+	REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
+
+	if (nig_params) {
+		u8 i = 0;
+		pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
+
+		for (i = 0; i < nig_params->num_of_rx_cos_priority_mask; i++)
+			bnx2x_pfc_nig_rx_priority_mask(bp, i,
+		nig_params->rx_cos_priority_mask[i], port);
+
+		REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
+		       NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
+		       nig_params->llfc_high_priority_classes);
+
+		REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
+		       NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
+		       nig_params->llfc_low_priority_classes);
+	}
+	REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
+	       NIG_REG_P0_PKT_PRIORITY_TO_COS,
+	       pkt_priority_to_cos);
+}
+
+int bnx2x_update_pfc(struct link_params *params,
+		      struct link_vars *vars,
+		      struct bnx2x_nig_brb_pfc_port_params *pfc_params)
+{
+	/*
+	 * The PFC and pause are orthogonal to one another, meaning when
+	 * PFC is enabled, the pause are disabled, and when PFC is
+	 * disabled, pause are set according to the pause result.
+	 */
+	u32 val;
+	struct bnx2x *bp = params->bp;
+	int bnx2x_status = 0;
+	u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
+
+	if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
+		vars->link_status |= LINK_STATUS_PFC_ENABLED;
+	else
+		vars->link_status &= ~LINK_STATUS_PFC_ENABLED;
+
+	bnx2x_update_mng(params, vars->link_status);
+
+	/* update NIG params */
+	bnx2x_update_pfc_nig(params, vars, pfc_params);
+
+	/* update BRB params */
+	bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
+	if (0 != bnx2x_status)
+		return bnx2x_status;
+
+	if (!vars->link_up)
+		return bnx2x_status;
+
+	DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
+	if (CHIP_IS_E3(bp))
+		bnx2x_update_pfc_xmac(params, vars, 0);
+	else {
+		val = REG_RD(bp, MISC_REG_RESET_REG_2);
+		if ((val &
+		     (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
+		    == 0) {
+			DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
+			bnx2x_emac_enable(params, vars, 0);
+			return bnx2x_status;
+		}
+
+		if (CHIP_IS_E2(bp))
+			bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
+		else
+			bnx2x_update_pfc_bmac1(params, vars);
+
+		val = 0;
+		if ((params->feature_config_flags &
+		     FEATURE_CONFIG_PFC_ENABLED) ||
+		    (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+			val = 1;
+		REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
+	}
+	return bnx2x_status;
+}
+
+
+static int bnx2x_bmac1_enable(struct link_params *params,
+			      struct link_vars *vars,
+			      u8 is_lb)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+			       NIG_REG_INGRESS_BMAC0_MEM;
+	u32 wb_data[2];
+	u32 val;
+
+	DP(NETIF_MSG_LINK, "Enabling BigMAC1\n");
+
+	/* XGXS control */
+	wb_data[0] = 0x3c;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
+		    wb_data, 2);
+
+	/* tx MAC SA */
+	wb_data[0] = ((params->mac_addr[2] << 24) |
+		       (params->mac_addr[3] << 16) |
+		       (params->mac_addr[4] << 8) |
+			params->mac_addr[5]);
+	wb_data[1] = ((params->mac_addr[0] << 8) |
+			params->mac_addr[1]);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2);
+
+	/* mac control */
+	val = 0x3;
+	if (is_lb) {
+		val |= 0x4;
+		DP(NETIF_MSG_LINK, "enable bmac loopback\n");
+	}
+	wb_data[0] = val;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2);
+
+	/* set rx mtu */
+	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2);
+
+	bnx2x_update_pfc_bmac1(params, vars);
+
+	/* set tx mtu */
+	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2);
+
+	/* set cnt max size */
+	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2);
+
+	/* configure safc */
+	wb_data[0] = 0x1000200;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
+		    wb_data, 2);
+
+	return 0;
+}
+
+static int bnx2x_bmac2_enable(struct link_params *params,
+			      struct link_vars *vars,
+			      u8 is_lb)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+			       NIG_REG_INGRESS_BMAC0_MEM;
+	u32 wb_data[2];
+
+	DP(NETIF_MSG_LINK, "Enabling BigMAC2\n");
+
+	wb_data[0] = 0;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
+	udelay(30);
+
+	/* XGXS control: Reset phy HW, MDIO registers, PHY PLL and BMAC */
+	wb_data[0] = 0x3c;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_XGXS_CONTROL,
+		    wb_data, 2);
+
+	udelay(30);
+
+	/* tx MAC SA */
+	wb_data[0] = ((params->mac_addr[2] << 24) |
+		       (params->mac_addr[3] << 16) |
+		       (params->mac_addr[4] << 8) |
+			params->mac_addr[5]);
+	wb_data[1] = ((params->mac_addr[0] << 8) |
+			params->mac_addr[1]);
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_SOURCE_ADDR,
+		    wb_data, 2);
+
+	udelay(30);
+
+	/* Configure SAFC */
+	wb_data[0] = 0x1000200;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS,
+		    wb_data, 2);
+	udelay(30);
+
+	/* set rx mtu */
+	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2);
+	udelay(30);
+
+	/* set tx mtu */
+	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2);
+	udelay(30);
+	/* set cnt max size */
+	wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2;
+	wb_data[1] = 0;
+	REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2);
+	udelay(30);
+	bnx2x_update_pfc_bmac2(params, vars, is_lb);
+
+	return 0;
+}
+
+static int bnx2x_bmac_enable(struct link_params *params,
+			     struct link_vars *vars,
+			     u8 is_lb)
+{
+	int rc = 0;
+	u8 port = params->port;
+	struct bnx2x *bp = params->bp;
+	u32 val;
+	/* reset and unreset the BigMac */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+	msleep(1);
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+	       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+	/* enable access for bmac registers */
+	REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
+
+	/* Enable BMAC according to BMAC type*/
+	if (CHIP_IS_E2(bp))
+		rc = bnx2x_bmac2_enable(params, vars, is_lb);
+	else
+		rc = bnx2x_bmac1_enable(params, vars, is_lb);
+	REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
+	REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
+	val = 0;
+	if ((params->feature_config_flags &
+	      FEATURE_CONFIG_PFC_ENABLED) ||
+	    (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
+		val = 1;
+	REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
+	REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x0);
+	REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
+	REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x1);
+	REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
+
+	vars->mac_type = MAC_TYPE_BMAC;
+	return rc;
+}
+
+static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port)
+{
+	u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
+			NIG_REG_INGRESS_BMAC0_MEM;
+	u32 wb_data[2];
+	u32 nig_bmac_enable = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4);
+
+	/* Only if the bmac is out of reset */
+	if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+			(MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port) &&
+	    nig_bmac_enable) {
+
+		if (CHIP_IS_E2(bp)) {
+			/* Clear Rx Enable bit in BMAC_CONTROL register */
+			REG_RD_DMAE(bp, bmac_addr +
+				    BIGMAC2_REGISTER_BMAC_CONTROL,
+				    wb_data, 2);
+			wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
+			REG_WR_DMAE(bp, bmac_addr +
+				    BIGMAC2_REGISTER_BMAC_CONTROL,
+				    wb_data, 2);
+		} else {
+			/* Clear Rx Enable bit in BMAC_CONTROL register */
+			REG_RD_DMAE(bp, bmac_addr +
+					BIGMAC_REGISTER_BMAC_CONTROL,
+					wb_data, 2);
+			wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
+			REG_WR_DMAE(bp, bmac_addr +
+					BIGMAC_REGISTER_BMAC_CONTROL,
+					wb_data, 2);
+		}
+		msleep(1);
+	}
+}
+
+static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl,
+			    u32 line_speed)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u32 init_crd, crd;
+	u32 count = 1000;
+
+	/* disable port */
+	REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
+
+	/* wait for init credit */
+	init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
+	crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+	DP(NETIF_MSG_LINK, "init_crd 0x%x  crd 0x%x\n", init_crd, crd);
+
+	while ((init_crd != crd) && count) {
+		msleep(5);
+
+		crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+		count--;
+	}
+	crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
+	if (init_crd != crd) {
+		DP(NETIF_MSG_LINK, "BUG! init_crd 0x%x != crd 0x%x\n",
+			  init_crd, crd);
+		return -EINVAL;
+	}
+
+	if (flow_ctrl & BNX2X_FLOW_CTRL_RX ||
+	    line_speed == SPEED_10 ||
+	    line_speed == SPEED_100 ||
+	    line_speed == SPEED_1000 ||
+	    line_speed == SPEED_2500) {
+		REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1);
+		/* update threshold */
+		REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
+		/* update init credit */
+		init_crd = 778;		/* (800-18-4) */
+
+	} else {
+		u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE +
+			      ETH_OVREHEAD)/16;
+		REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
+		/* update threshold */
+		REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
+		/* update init credit */
+		switch (line_speed) {
+		case SPEED_10000:
+			init_crd = thresh + 553 - 22;
+			break;
+		default:
+			DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+				  line_speed);
+			return -EINVAL;
+		}
+	}
+	REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
+	DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
+		 line_speed, init_crd);
+
+	/* probe the credit changes */
+	REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
+	msleep(5);
+	REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
+
+	/* enable port */
+	REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
+	return 0;
+}
+
+/**
+ * bnx2x_get_emac_base - retrive emac base address
+ *
+ * @bp:			driver handle
+ * @mdc_mdio_access:	access type
+ * @port:		port id
+ *
+ * This function selects the MDC/MDIO access (through emac0 or
+ * emac1) depend on the mdc_mdio_access, port, port swapped. Each
+ * phy has a default access mode, which could also be overridden
+ * by nvram configuration. This parameter, whether this is the
+ * default phy configuration, or the nvram overrun
+ * configuration, is passed here as mdc_mdio_access and selects
+ * the emac_base for the CL45 read/writes operations
+ */
+static u32 bnx2x_get_emac_base(struct bnx2x *bp,
+			       u32 mdc_mdio_access, u8 port)
+{
+	u32 emac_base = 0;
+	switch (mdc_mdio_access) {
+	case SHARED_HW_CFG_MDC_MDIO_ACCESS1_PHY_TYPE:
+		break;
+	case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC0:
+		if (REG_RD(bp, NIG_REG_PORT_SWAP))
+			emac_base = GRCBASE_EMAC1;
+		else
+			emac_base = GRCBASE_EMAC0;
+		break;
+	case SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1:
+		if (REG_RD(bp, NIG_REG_PORT_SWAP))
+			emac_base = GRCBASE_EMAC0;
+		else
+			emac_base = GRCBASE_EMAC1;
+		break;
+	case SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH:
+		emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+		break;
+	case SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED:
+		emac_base = (port) ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
+		break;
+	default:
+		break;
+	}
+	return emac_base;
+
+}
+
+/******************************************************************/
+/*			CL22 access functions			  */
+/******************************************************************/
+static int bnx2x_cl22_write(struct bnx2x *bp,
+				       struct bnx2x_phy *phy,
+				       u16 reg, u16 val)
+{
+	u32 tmp, mode;
+	u8 i;
+	int rc = 0;
+	/* Switch to CL22 */
+	mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+	       mode & ~EMAC_MDIO_MODE_CLAUSE_45);
+
+	/* address */
+	tmp = ((phy->addr << 21) | (reg << 16) | val |
+	       EMAC_MDIO_COMM_COMMAND_WRITE_22 |
+	       EMAC_MDIO_COMM_START_BUSY);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+		if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+			udelay(5);
+			break;
+		}
+	}
+	if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+		DP(NETIF_MSG_LINK, "write phy register failed\n");
+		rc = -EFAULT;
+	}
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
+	return rc;
+}
+
+static int bnx2x_cl22_read(struct bnx2x *bp,
+				      struct bnx2x_phy *phy,
+				      u16 reg, u16 *ret_val)
+{
+	u32 val, mode;
+	u16 i;
+	int rc = 0;
+
+	/* Switch to CL22 */
+	mode = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE,
+	       mode & ~EMAC_MDIO_MODE_CLAUSE_45);
+
+	/* address */
+	val = ((phy->addr << 21) | (reg << 16) |
+	       EMAC_MDIO_COMM_COMMAND_READ_22 |
+	       EMAC_MDIO_COMM_START_BUSY);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+		if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+			*ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
+			udelay(5);
+			break;
+		}
+	}
+	if (val & EMAC_MDIO_COMM_START_BUSY) {
+		DP(NETIF_MSG_LINK, "read phy register failed\n");
+
+		*ret_val = 0;
+		rc = -EFAULT;
+	}
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode);
+	return rc;
+}
+
+/******************************************************************/
+/*			CL45 access functions			  */
+/******************************************************************/
+static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy,
+			   u8 devad, u16 reg, u16 *ret_val)
+{
+	u32 val;
+	u16 i;
+	int rc = 0;
+	if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+		bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+			      EMAC_MDIO_STATUS_10MB);
+	/* address */
+	val = ((phy->addr << 21) | (devad << 16) | reg |
+	       EMAC_MDIO_COMM_COMMAND_ADDRESS |
+	       EMAC_MDIO_COMM_START_BUSY);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		val = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+		if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+			udelay(5);
+			break;
+		}
+	}
+	if (val & EMAC_MDIO_COMM_START_BUSY) {
+		DP(NETIF_MSG_LINK, "read phy register failed\n");
+		netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
+		*ret_val = 0;
+		rc = -EFAULT;
+	} else {
+		/* data */
+		val = ((phy->addr << 21) | (devad << 16) |
+		       EMAC_MDIO_COMM_COMMAND_READ_45 |
+		       EMAC_MDIO_COMM_START_BUSY);
+		REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
+
+		for (i = 0; i < 50; i++) {
+			udelay(10);
+
+			val = REG_RD(bp, phy->mdio_ctrl +
+				     EMAC_REG_EMAC_MDIO_COMM);
+			if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
+				*ret_val = (u16)(val & EMAC_MDIO_COMM_DATA);
+				break;
+			}
+		}
+		if (val & EMAC_MDIO_COMM_START_BUSY) {
+			DP(NETIF_MSG_LINK, "read phy register failed\n");
+			netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
+			*ret_val = 0;
+			rc = -EFAULT;
+		}
+	}
+	/* Work around for E3 A0 */
+	if (phy->flags & FLAGS_MDC_MDIO_WA) {
+		phy->flags ^= FLAGS_DUMMY_READ;
+		if (phy->flags & FLAGS_DUMMY_READ) {
+			u16 temp_val;
+			bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
+		}
+	}
+
+	if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+		bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+			       EMAC_MDIO_STATUS_10MB);
+	return rc;
+}
+
+static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+			    u8 devad, u16 reg, u16 val)
+{
+	u32 tmp;
+	u8 i;
+	int rc = 0;
+	if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+		bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+			      EMAC_MDIO_STATUS_10MB);
+
+	/* address */
+
+	tmp = ((phy->addr << 21) | (devad << 16) | reg |
+	       EMAC_MDIO_COMM_COMMAND_ADDRESS |
+	       EMAC_MDIO_COMM_START_BUSY);
+	REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+	for (i = 0; i < 50; i++) {
+		udelay(10);
+
+		tmp = REG_RD(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
+		if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+			udelay(5);
+			break;
+		}
+	}
+	if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+		DP(NETIF_MSG_LINK, "write phy register failed\n");
+		netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
+		rc = -EFAULT;
+
+	} else {
+		/* data */
+		tmp = ((phy->addr << 21) | (devad << 16) | val |
+		       EMAC_MDIO_COMM_COMMAND_WRITE_45 |
+		       EMAC_MDIO_COMM_START_BUSY);
+		REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
+
+		for (i = 0; i < 50; i++) {
+			udelay(10);
+
+			tmp = REG_RD(bp, phy->mdio_ctrl +
+				     EMAC_REG_EMAC_MDIO_COMM);
+			if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
+				udelay(5);
+				break;
+			}
+		}
+		if (tmp & EMAC_MDIO_COMM_START_BUSY) {
+			DP(NETIF_MSG_LINK, "write phy register failed\n");
+			netdev_err(bp->dev,  "MDC/MDIO access timeout\n");
+			rc = -EFAULT;
+		}
+	}
+	/* Work around for E3 A0 */
+	if (phy->flags & FLAGS_MDC_MDIO_WA) {
+		phy->flags ^= FLAGS_DUMMY_READ;
+		if (phy->flags & FLAGS_DUMMY_READ) {
+			u16 temp_val;
+			bnx2x_cl45_read(bp, phy, devad, 0xf, &temp_val);
+		}
+	}
+	if (phy->flags & FLAGS_MDC_MDIO_WA_B0)
+		bnx2x_bits_dis(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS,
+			       EMAC_MDIO_STATUS_10MB);
+	return rc;
+}
+
+
+/******************************************************************/
+/*			BSC access functions from E3	          */
+/******************************************************************/
+static void bnx2x_bsc_module_sel(struct link_params *params)
+{
+	int idx;
+	u32 board_cfg, sfp_ctrl;
+	u32 i2c_pins[I2C_SWITCH_WIDTH], i2c_val[I2C_SWITCH_WIDTH];
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	/* Read I2C output PINs */
+	board_cfg = REG_RD(bp, params->shmem_base +
+			   offsetof(struct shmem_region,
+				    dev_info.shared_hw_config.board));
+	i2c_pins[I2C_BSC0] = board_cfg & SHARED_HW_CFG_E3_I2C_MUX0_MASK;
+	i2c_pins[I2C_BSC1] = (board_cfg & SHARED_HW_CFG_E3_I2C_MUX1_MASK) >>
+			SHARED_HW_CFG_E3_I2C_MUX1_SHIFT;
+
+	/* Read I2C output value */
+	sfp_ctrl = REG_RD(bp, params->shmem_base +
+			  offsetof(struct shmem_region,
+				 dev_info.port_hw_config[port].e3_cmn_pin_cfg));
+	i2c_val[I2C_BSC0] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX0_MASK) > 0;
+	i2c_val[I2C_BSC1] = (sfp_ctrl & PORT_HW_CFG_E3_I2C_MUX1_MASK) > 0;
+	DP(NETIF_MSG_LINK, "Setting BSC switch\n");
+	for (idx = 0; idx < I2C_SWITCH_WIDTH; idx++)
+		bnx2x_set_cfg_pin(bp, i2c_pins[idx], i2c_val[idx]);
+}
+
+static int bnx2x_bsc_read(struct link_params *params,
+			  struct bnx2x_phy *phy,
+			  u8 sl_devid,
+			  u16 sl_addr,
+			  u8 lc_addr,
+			  u8 xfer_cnt,
+			  u32 *data_array)
+{
+	u32 val, i;
+	int rc = 0;
+	struct bnx2x *bp = params->bp;
+
+	if ((sl_devid != 0xa0) && (sl_devid != 0xa2)) {
+		DP(NETIF_MSG_LINK, "invalid sl_devid 0x%x\n", sl_devid);
+		return -EINVAL;
+	}
+
+	if (xfer_cnt > 16) {
+		DP(NETIF_MSG_LINK, "invalid xfer_cnt %d. Max is 16 bytes\n",
+					xfer_cnt);
+		return -EINVAL;
+	}
+	bnx2x_bsc_module_sel(params);
+
+	xfer_cnt = 16 - lc_addr;
+
+	/* enable the engine */
+	val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+	val |= MCPR_IMC_COMMAND_ENABLE;
+	REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+	/* program slave device ID */
+	val = (sl_devid << 16) | sl_addr;
+	REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val);
+
+	/* start xfer with 0 byte to update the address pointer ???*/
+	val = (MCPR_IMC_COMMAND_ENABLE) |
+	      (MCPR_IMC_COMMAND_WRITE_OP <<
+		MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+		(lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0);
+	REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+	/* poll for completion */
+	i = 0;
+	val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+	while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+		udelay(10);
+		val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+		if (i++ > 1000) {
+			DP(NETIF_MSG_LINK, "wr 0 byte timed out after %d try\n",
+								i);
+			rc = -EFAULT;
+			break;
+		}
+	}
+	if (rc == -EFAULT)
+		return rc;
+
+	/* start xfer with read op */
+	val = (MCPR_IMC_COMMAND_ENABLE) |
+		(MCPR_IMC_COMMAND_READ_OP <<
+		MCPR_IMC_COMMAND_OPERATION_BITSHIFT) |
+		(lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) |
+		  (xfer_cnt);
+	REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val);
+
+	/* poll for completion */
+	i = 0;
+	val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+	while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) {
+		udelay(10);
+		val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND);
+		if (i++ > 1000) {
+			DP(NETIF_MSG_LINK, "rd op timed out after %d try\n", i);
+			rc = -EFAULT;
+			break;
+		}
+	}
+	if (rc == -EFAULT)
+		return rc;
+
+	for (i = (lc_addr >> 2); i < 4; i++) {
+		data_array[i] = REG_RD(bp, (MCP_REG_MCPR_IMC_DATAREG0 + i*4));
+#ifdef __BIG_ENDIAN
+		data_array[i] = ((data_array[i] & 0x000000ff) << 24) |
+				((data_array[i] & 0x0000ff00) << 8) |
+				((data_array[i] & 0x00ff0000) >> 8) |
+				((data_array[i] & 0xff000000) >> 24);
+#endif
+	}
+	return rc;
+}
+
+static void bnx2x_cl45_read_or_write(struct bnx2x *bp, struct bnx2x_phy *phy,
+				     u8 devad, u16 reg, u16 or_val)
+{
+	u16 val;
+	bnx2x_cl45_read(bp, phy, devad, reg, &val);
+	bnx2x_cl45_write(bp, phy, devad, reg, val | or_val);
+}
+
+int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+		   u8 devad, u16 reg, u16 *ret_val)
+{
+	u8 phy_index;
+	/*
+	 * Probe for the phy according to the given phy_addr, and execute
+	 * the read request on it
+	 */
+	for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+		if (params->phy[phy_index].addr == phy_addr) {
+			return bnx2x_cl45_read(params->bp,
+					       &params->phy[phy_index], devad,
+					       reg, ret_val);
+		}
+	}
+	return -EINVAL;
+}
+
+int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+		    u8 devad, u16 reg, u16 val)
+{
+	u8 phy_index;
+	/*
+	 * Probe for the phy according to the given phy_addr, and execute
+	 * the write request on it
+	 */
+	for (phy_index = 0; phy_index < params->num_phys; phy_index++) {
+		if (params->phy[phy_index].addr == phy_addr) {
+			return bnx2x_cl45_write(params->bp,
+						&params->phy[phy_index], devad,
+						reg, val);
+		}
+	}
+	return -EINVAL;
+}
+static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy,
+				  struct link_params *params)
+{
+	u8 lane = 0;
+	struct bnx2x *bp = params->bp;
+	u32 path_swap, path_swap_ovr;
+	u8 path, port;
+
+	path = BP_PATH(bp);
+	port = params->port;
+
+	if (bnx2x_is_4_port_mode(bp)) {
+		u32 port_swap, port_swap_ovr;
+
+		/*figure out path swap value */
+		path_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP_OVWR);
+		if (path_swap_ovr & 0x1)
+			path_swap = (path_swap_ovr & 0x2);
+		else
+			path_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PATH_SWAP);
+
+		if (path_swap)
+			path = path ^ 1;
+
+		/*figure out port swap value */
+		port_swap_ovr = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP_OVWR);
+		if (port_swap_ovr & 0x1)
+			port_swap = (port_swap_ovr & 0x2);
+		else
+			port_swap = REG_RD(bp, MISC_REG_FOUR_PORT_PORT_SWAP);
+
+		if (port_swap)
+			port = port ^ 1;
+
+		lane = (port<<1) + path;
+	} else { /* two port mode - no port swap */
+
+		/*figure out path swap value */
+		path_swap_ovr =
+			REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP_OVWR);
+		if (path_swap_ovr & 0x1) {
+			path_swap = (path_swap_ovr & 0x2);
+		} else {
+			path_swap =
+				REG_RD(bp, MISC_REG_TWO_PORT_PATH_SWAP);
+		}
+		if (path_swap)
+			path = path ^ 1;
+
+		lane = path << 1 ;
+	}
+	return lane;
+}
+
+static void bnx2x_set_aer_mmd(struct link_params *params,
+			      struct bnx2x_phy *phy)
+{
+	u32 ser_lane;
+	u16 offset, aer_val;
+	struct bnx2x *bp = params->bp;
+	ser_lane = ((params->lane_config &
+		     PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+		     PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+	offset = (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) ?
+		(phy->addr + ser_lane) : 0;
+
+	if (USES_WARPCORE(bp)) {
+		aer_val = bnx2x_get_warpcore_lane(phy, params);
+		/*
+		 * In Dual-lane mode, two lanes are joined together,
+		 * so in order to configure them, the AER broadcast method is
+		 * used here.
+		 * 0x200 is the broadcast address for lanes 0,1
+		 * 0x201 is the broadcast address for lanes 2,3
+		 */
+		if (phy->flags & FLAGS_WC_DUAL_MODE)
+			aer_val = (aer_val >> 1) | 0x200;
+	} else if (CHIP_IS_E2(bp))
+		aer_val = 0x3800 + offset - 1;
+	else
+		aer_val = 0x3800 + offset;
+	DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val);
+	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, aer_val);
+
+}
+
+/******************************************************************/
+/*			Internal phy section			  */
+/******************************************************************/
+
+static void bnx2x_set_serdes_access(struct bnx2x *bp, u8 port)
+{
+	u32 emac_base = (port) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+
+	/* Set Clause 22 */
+	REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 1);
+	REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245f8000);
+	udelay(500);
+	REG_WR(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM, 0x245d000f);
+	udelay(500);
+	 /* Set Clause 45 */
+	REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_ST + port*0x10, 0);
+}
+
+static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port)
+{
+	u32 val;
+
+	DP(NETIF_MSG_LINK, "bnx2x_serdes_deassert\n");
+
+	val = SERDES_RESET_BITS << (port*16);
+
+	/* reset and unreset the SerDes/XGXS */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+	udelay(500);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+
+	bnx2x_set_serdes_access(bp, port);
+
+	REG_WR(bp, NIG_REG_SERDES0_CTRL_MD_DEVAD + port*0x10,
+	       DEFAULT_PHY_DEV_ADDR);
+}
+
+static void bnx2x_xgxs_deassert(struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port;
+	u32 val;
+	DP(NETIF_MSG_LINK, "bnx2x_xgxs_deassert\n");
+	port = params->port;
+
+	val = XGXS_RESET_BITS << (port*16);
+
+	/* reset and unreset the SerDes/XGXS */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
+	udelay(500);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
+
+	REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_ST + port*0x18, 0);
+	REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+	       params->phy[INT_PHY].def_md_devad);
+}
+
+static void bnx2x_calc_ieee_aneg_adv(struct bnx2x_phy *phy,
+				     struct link_params *params, u16 *ieee_fc)
+{
+	struct bnx2x *bp = params->bp;
+	*ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
+	/**
+	 * resolve pause mode and advertisement Please refer to Table
+	 * 28B-3 of the 802.3ab-1999 spec
+	 */
+
+	switch (phy->req_flow_ctrl) {
+	case BNX2X_FLOW_CTRL_AUTO:
+		if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH)
+			*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+		else
+			*ieee_fc |=
+			MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+		break;
+
+	case BNX2X_FLOW_CTRL_TX:
+		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+		break;
+
+	case BNX2X_FLOW_CTRL_RX:
+	case BNX2X_FLOW_CTRL_BOTH:
+		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+		break;
+
+	case BNX2X_FLOW_CTRL_NONE:
+	default:
+		*ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
+		break;
+	}
+	DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
+}
+
+static void set_phy_vars(struct link_params *params,
+			 struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 actual_phy_idx, phy_index, link_cfg_idx;
+	u8 phy_config_swapped = params->multi_phy_config &
+			PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+	for (phy_index = INT_PHY; phy_index < params->num_phys;
+	      phy_index++) {
+		link_cfg_idx = LINK_CONFIG_IDX(phy_index);
+		actual_phy_idx = phy_index;
+		if (phy_config_swapped) {
+			if (phy_index == EXT_PHY1)
+				actual_phy_idx = EXT_PHY2;
+			else if (phy_index == EXT_PHY2)
+				actual_phy_idx = EXT_PHY1;
+		}
+		params->phy[actual_phy_idx].req_flow_ctrl =
+			params->req_flow_ctrl[link_cfg_idx];
+
+		params->phy[actual_phy_idx].req_line_speed =
+			params->req_line_speed[link_cfg_idx];
+
+		params->phy[actual_phy_idx].speed_cap_mask =
+			params->speed_cap_mask[link_cfg_idx];
+
+		params->phy[actual_phy_idx].req_duplex =
+			params->req_duplex[link_cfg_idx];
+
+		if (params->req_line_speed[link_cfg_idx] ==
+		    SPEED_AUTO_NEG)
+			vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+
+		DP(NETIF_MSG_LINK, "req_flow_ctrl %x, req_line_speed %x,"
+			   " speed_cap_mask %x\n",
+			   params->phy[actual_phy_idx].req_flow_ctrl,
+			   params->phy[actual_phy_idx].req_line_speed,
+			   params->phy[actual_phy_idx].speed_cap_mask);
+	}
+}
+
+static void bnx2x_ext_phy_set_pause(struct link_params *params,
+				    struct bnx2x_phy *phy,
+				    struct link_vars *vars)
+{
+	u16 val;
+	struct bnx2x *bp = params->bp;
+	/* read modify write pause advertizing */
+	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val);
+
+	val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH;
+
+	/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+	bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+		val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+	}
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+		val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+	}
+	DP(NETIF_MSG_LINK, "Ext phy AN advertize 0x%x\n", val);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val);
+}
+
+static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
+{						/*  LD	    LP	 */
+	switch (pause_result) {			/* ASYM P ASYM P */
+	case 0xb:				/*   1  0   1  1 */
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_TX;
+		break;
+
+	case 0xe:				/*   1  1   1  0 */
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_RX;
+		break;
+
+	case 0x5:				/*   0  1   0  1 */
+	case 0x7:				/*   0  1   1  1 */
+	case 0xd:				/*   1  1   0  1 */
+	case 0xf:				/*   1  1   1  1 */
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+		break;
+
+	default:
+		break;
+	}
+	if (pause_result & (1<<0))
+		vars->link_status |= LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE;
+	if (pause_result & (1<<1))
+		vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
+}
+
+static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
+				   struct link_params *params,
+				   struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 ld_pause;		/* local */
+	u16 lp_pause;		/* link partner */
+	u16 pause_result;
+	u8 ret = 0;
+	/* read twice */
+
+	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+	if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+		vars->flow_ctrl = phy->req_flow_ctrl;
+	else if (phy->req_line_speed != SPEED_AUTO_NEG)
+		vars->flow_ctrl = params->req_fc_auto_adv;
+	else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+		ret = 1;
+		if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
+			bnx2x_cl22_read(bp, phy,
+					0x4, &ld_pause);
+			bnx2x_cl22_read(bp, phy,
+					0x5, &lp_pause);
+		} else {
+			bnx2x_cl45_read(bp, phy,
+					MDIO_AN_DEVAD,
+					MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+			bnx2x_cl45_read(bp, phy,
+					MDIO_AN_DEVAD,
+					MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+		}
+		pause_result = (ld_pause &
+				MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
+		pause_result |= (lp_pause &
+				 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
+		DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
+		   pause_result);
+		bnx2x_pause_resolve(vars, pause_result);
+	}
+	return ret;
+}
+/******************************************************************/
+/*			Warpcore section			  */
+/******************************************************************/
+/* The init_internal_warpcore should mirror the xgxs,
+ * i.e. reset the lane (if needed), set aer for the
+ * init configuration, and set/clear SGMII flag. Internal
+ * phy init is done purely in phy_init stage.
+ */
+static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
+					struct link_params *params,
+					struct link_vars *vars) {
+	u16 val16 = 0, lane, bam37 = 0;
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n");
+	/* Check adding advertisement for 1G KX */
+	if (((vars->line_speed == SPEED_AUTO_NEG) &&
+	     (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+	    (vars->line_speed == SPEED_1000)) {
+		u16 sd_digital;
+		val16 |= (1<<5);
+
+		/* Enable CL37 1G Parallel Detect */
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+				 (sd_digital | 0x1));
+
+		DP(NETIF_MSG_LINK, "Advertize 1G\n");
+	}
+	if (((vars->line_speed == SPEED_AUTO_NEG) &&
+	     (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+	    (vars->line_speed ==  SPEED_10000)) {
+		/* Check adding advertisement for 10G KR */
+		val16 |= (1<<7);
+		/* Enable 10G Parallel Detect */
+		bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+				MDIO_WC_REG_PAR_DET_10G_CTRL, 1);
+
+		DP(NETIF_MSG_LINK, "Advertize 10G\n");
+	}
+
+	/* Set Transmit PMD settings */
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+		      MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+		     ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+		      (0x06 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+		      (0x09 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL,
+			 0x03f0);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL,
+			 0x03f0);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+			 0x383f);
+
+	/* Advertised speeds */
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, val16);
+
+	/* Enable CL37 BAM */
+	if (REG_RD(bp, params->shmem_base +
+		   offsetof(struct shmem_region, dev_info.
+			    port_hw_config[params->port].default_cfg)) &
+	    PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1);
+		DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+	}
+
+	/* Advertise pause */
+	bnx2x_ext_phy_set_pause(params, phy, vars);
+
+	/* Enable Autoneg */
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000);
+
+	/* Over 1G - AN local device user page 1 */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DIGITAL3_UP1, 0x1f);
+
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DIGITAL5_MISC7, &val16);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100);
+}
+
+static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy,
+				      struct link_params *params,
+				      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val;
+
+	/* Disable Autoneg */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7);
+
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_WC_REG_PAR_DET_10G_CTRL, 0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00);
+
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DIGITAL3_UP1, 0x1);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL5_MISC7, 0xa);
+
+	/* Disable CL36 PCS Tx */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0);
+
+	/* Double Wide Single Data Rate @ pll rate */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF);
+
+	/* Leave cl72 training enable, needed for KR */
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+		MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150,
+		0x2);
+
+	/* Leave CL72 enabled */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+			 &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL,
+			 val | 0x3800);
+
+	/* Set speed via PMA/PMD register */
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
+
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0xB);
+
+	/*Enable encoded forced speed */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x30);
+
+	/* Turn TX scramble payload only the 64/66 scrambler */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX66_CONTROL, 0x9);
+
+	/* Turn RX scramble payload only the 64/66 scrambler */
+	bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_RX66_CONTROL, 0xF9);
+
+	/* set and clear loopback to cause a reset to 64/66 decoder */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0);
+
+}
+
+static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy,
+				       struct link_params *params,
+				       u8 is_xfi)
+{
+	struct bnx2x *bp = params->bp;
+	u16 misc1_val, tap_val, tx_driver_val, lane, val;
+	/* Hold rxSeqStart */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000));
+
+	/* Hold tx_fifo_reset */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1));
+
+	/* Disable CL73 AN */
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
+
+	/* Disable 100FX Enable and Auto-Detect */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_FX100_CTRL1, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA));
+
+	/* Disable 100FX Idle detect */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_FX100_CTRL3, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_FX100_CTRL3, (val | 0x0080));
+
+	/* Set Block address to Remote PHY & Clear forced_speed[5] */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DIGITAL4_MISC3, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL4_MISC3, (val & 0xFF7F));
+
+	/* Turn off auto-detect & fiber mode */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+			 (val & 0xFFEE));
+
+	/* Set filter_force_link, disable_false_link and parallel_detect */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+			 ((val | 0x0006) & 0xFFFE));
+
+	/* Set XFI / SFI */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_MISC1, &misc1_val);
+
+	misc1_val &= ~(0x1f);
+
+	if (is_xfi) {
+		misc1_val |= 0x5;
+		tap_val = ((0x08 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+			   (0x37 << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+			   (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
+		tx_driver_val =
+		      ((0x00 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+		       (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+		       (0x03 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+
+	} else {
+		misc1_val |= 0x9;
+		tap_val = ((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+			   (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+			   (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET));
+		tx_driver_val =
+		      ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+		       (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+		       (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET));
+	}
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_MISC1, misc1_val);
+
+	/* Set Transmit PMD settings */
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX_FIR_TAP,
+			 tap_val | MDIO_WC_REG_TX_FIR_TAP_ENABLE);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+			 tx_driver_val);
+
+	/* Enable fiber mode, enable and invert sig_det */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd);
+
+	/* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DIGITAL4_MISC3, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080);
+
+	/* 10G XFI Full Duplex */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x100);
+
+	/* Release tx_fifo_reset */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, val & 0xFFFE);
+
+	/* Release rxSeqStart */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val & 0x7FFF));
+}
+
+static void bnx2x_warpcore_set_20G_KR2(struct bnx2x *bp,
+				       struct bnx2x_phy *phy)
+{
+	DP(NETIF_MSG_LINK, "KR2 still not supported !!!\n");
+}
+
+static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
+					 struct bnx2x_phy *phy,
+					 u16 lane)
+{
+	/* Rx0 anaRxControl1G */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX0_ANARXCONTROL1G, 0x90);
+
+	/* Rx2 anaRxControl1G */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX2_ANARXCONTROL1G, 0x90);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW0, 0xE070);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW1, 0xC0D0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW2, 0xA0B0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW3, 0x8090);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW0_MASK, 0xF0F0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW1_MASK, 0xF0F0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW2_MASK, 0xF0F0);
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_SCW3_MASK, 0xF0F0);
+
+	/* Serdes Digital Misc1 */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6008);
+
+	/* Serdes Digital4 Misc3 */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL4_MISC3, 0x8088);
+
+	/* Set Transmit PMD settings */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX_FIR_TAP,
+			((0x12 << MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET) |
+			 (0x2d << MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET) |
+			 (0x00 << MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET) |
+			 MDIO_WC_REG_TX_FIR_TAP_ENABLE));
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+		      MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane,
+		     ((0x02 << MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET) |
+		      (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET) |
+		      (0x02 << MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET)));
+}
+
+static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
+					   struct link_params *params,
+					   u8 fiber_mode)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val16, digctrl_kx1, digctrl_kx2;
+	u8 lane;
+
+	lane = bnx2x_get_warpcore_lane(phy, params);
+
+	/* Clear XFI clock comp in non-10G single lane mode. */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_RX66_CONTROL, &val16);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
+
+	if (phy->req_line_speed == SPEED_AUTO_NEG) {
+		/* SGMII Autoneg */
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				 MDIO_WC_REG_COMBO_IEEE0_MIICTRL,
+				 val16 | 0x1000);
+		DP(NETIF_MSG_LINK, "set SGMII AUTONEG\n");
+	} else {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+		val16 &= 0xcfbf;
+		switch (phy->req_line_speed) {
+		case SPEED_10:
+			break;
+		case SPEED_100:
+			val16 |= 0x2000;
+			break;
+		case SPEED_1000:
+			val16 |= 0x0040;
+			break;
+		default:
+			DP(NETIF_MSG_LINK, "Speed not supported: 0x%x"
+					   "\n", phy->req_line_speed);
+			return;
+		}
+
+		if (phy->req_duplex == DUPLEX_FULL)
+			val16 |= 0x0100;
+
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16);
+
+		DP(NETIF_MSG_LINK, "set SGMII force speed %d\n",
+			       phy->req_line_speed);
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+		DP(NETIF_MSG_LINK, "  (readback) %x\n", val16);
+	}
+
+	/* SGMII Slave mode and disable signal detect */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &digctrl_kx1);
+	if (fiber_mode)
+		digctrl_kx1 = 1;
+	else
+		digctrl_kx1 &= 0xff4a;
+
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+			digctrl_kx1);
+
+	/* Turn off parallel detect */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &digctrl_kx2);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+			(digctrl_kx2 & ~(1<<2)));
+
+	/* Re-enable parallel detect */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2,
+			(digctrl_kx2 | (1<<2)));
+
+	/* Enable autodet */
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1,
+			(digctrl_kx1 | 0x10));
+}
+
+static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
+				      struct bnx2x_phy *phy,
+				      u8 reset)
+{
+	u16 val;
+	/* Take lane out of reset after configuration is finished */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_DIGITAL5_MISC6, &val);
+	if (reset)
+		val |= 0xC000;
+	else
+		val &= 0x3FFF;
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL5_MISC6, val);
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL5_MISC6, &val);
+}
+
+
+	/* Clear SFI/XFI link settings registers */
+static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
+				      struct link_params *params,
+				      u16 lane)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val16;
+
+	/* Set XFI clock comp as default. */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_RX66_CONTROL, &val16);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13));
+
+	bnx2x_warpcore_reset_lane(bp, phy, 1);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_FX100_CTRL1, 0x014a);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_FX100_CTRL3, 0x0800);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_DIGITAL4_MISC3, 0x8008);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000);
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX_FIR_TAP, 0x0000);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140);
+	bnx2x_warpcore_reset_lane(bp, phy, 0);
+}
+
+static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp,
+						u32 chip_id,
+						u32 shmem_base, u8 port,
+						u8 *gpio_num, u8 *gpio_port)
+{
+	u32 cfg_pin;
+	*gpio_num = 0;
+	*gpio_port = 0;
+	if (CHIP_IS_E3(bp)) {
+		cfg_pin = (REG_RD(bp, shmem_base +
+				offsetof(struct shmem_region,
+				dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+				PORT_HW_CFG_E3_MOD_ABS_MASK) >>
+				PORT_HW_CFG_E3_MOD_ABS_SHIFT;
+
+		/*
+		 * Should not happen. This function called upon interrupt
+		 * triggered by GPIO ( since EPIO can only generate interrupts
+		 * to MCP).
+		 * So if this function was called and none of the GPIOs was set,
+		 * it means the shit hit the fan.
+		 */
+		if ((cfg_pin < PIN_CFG_GPIO0_P0) ||
+		    (cfg_pin > PIN_CFG_GPIO3_P1)) {
+			DP(NETIF_MSG_LINK, "ERROR: Invalid cfg pin %x for "
+					   "module detect indication\n",
+				       cfg_pin);
+			return -EINVAL;
+		}
+
+		*gpio_num = (cfg_pin - PIN_CFG_GPIO0_P0) & 0x3;
+		*gpio_port = (cfg_pin - PIN_CFG_GPIO0_P0) >> 2;
+	} else {
+		*gpio_num = MISC_REGISTERS_GPIO_3;
+		*gpio_port = port;
+	}
+	DP(NETIF_MSG_LINK, "MOD_ABS int GPIO%d_P%d\n", *gpio_num, *gpio_port);
+	return 0;
+}
+
+static int bnx2x_is_sfp_module_plugged(struct bnx2x_phy *phy,
+				       struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u8 gpio_num, gpio_port;
+	u32 gpio_val;
+	if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id,
+				      params->shmem_base, params->port,
+				      &gpio_num, &gpio_port) != 0)
+		return 0;
+	gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+
+	/* Call the handling function in case module is detected */
+	if (gpio_val == 0)
+		return 1;
+	else
+		return 0;
+}
+
+static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
+				       struct link_params *params,
+				       struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u32 serdes_net_if;
+	u8 fiber_mode;
+	u16 lane = bnx2x_get_warpcore_lane(phy, params);
+	serdes_net_if = (REG_RD(bp, params->shmem_base +
+			 offsetof(struct shmem_region, dev_info.
+				  port_hw_config[params->port].default_cfg)) &
+			 PORT_HW_CFG_NET_SERDES_IF_MASK);
+	DP(NETIF_MSG_LINK, "Begin Warpcore init, link_speed %d, "
+			   "serdes_net_if = 0x%x\n",
+		       vars->line_speed, serdes_net_if);
+	bnx2x_set_aer_mmd(params, phy);
+
+	vars->phy_flags |= PHY_XGXS_FLAG;
+	if ((serdes_net_if == PORT_HW_CFG_NET_SERDES_IF_SGMII) ||
+	    (phy->req_line_speed &&
+	     ((phy->req_line_speed == SPEED_100) ||
+	      (phy->req_line_speed == SPEED_10)))) {
+		vars->phy_flags |= PHY_SGMII_FLAG;
+		DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
+		bnx2x_warpcore_clear_regs(phy, params, lane);
+		bnx2x_warpcore_set_sgmii_speed(phy, params, 0);
+	} else {
+		switch (serdes_net_if) {
+		case PORT_HW_CFG_NET_SERDES_IF_KR:
+			/* Enable KR Auto Neg */
+			if (params->loopback_mode == LOOPBACK_NONE)
+				bnx2x_warpcore_enable_AN_KR(phy, params, vars);
+			else {
+				DP(NETIF_MSG_LINK, "Setting KR 10G-Force\n");
+				bnx2x_warpcore_set_10G_KR(phy, params, vars);
+			}
+			break;
+
+		case PORT_HW_CFG_NET_SERDES_IF_XFI:
+			bnx2x_warpcore_clear_regs(phy, params, lane);
+			if (vars->line_speed == SPEED_10000) {
+				DP(NETIF_MSG_LINK, "Setting 10G XFI\n");
+				bnx2x_warpcore_set_10G_XFI(phy, params, 1);
+			} else {
+				if (SINGLE_MEDIA_DIRECT(params)) {
+					DP(NETIF_MSG_LINK, "1G Fiber\n");
+					fiber_mode = 1;
+				} else {
+					DP(NETIF_MSG_LINK, "10/100/1G SGMII\n");
+					fiber_mode = 0;
+				}
+				bnx2x_warpcore_set_sgmii_speed(phy,
+								params,
+								fiber_mode);
+			}
+
+			break;
+
+		case PORT_HW_CFG_NET_SERDES_IF_SFI:
+
+			bnx2x_warpcore_clear_regs(phy, params, lane);
+			if (vars->line_speed == SPEED_10000) {
+				DP(NETIF_MSG_LINK, "Setting 10G SFI\n");
+				bnx2x_warpcore_set_10G_XFI(phy, params, 0);
+			} else if (vars->line_speed == SPEED_1000) {
+				DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
+				bnx2x_warpcore_set_sgmii_speed(phy, params, 1);
+			}
+			/* Issue Module detection */
+			if (bnx2x_is_sfp_module_plugged(phy, params))
+				bnx2x_sfp_module_detection(phy, params);
+			break;
+
+		case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+			if (vars->line_speed != SPEED_20000) {
+				DP(NETIF_MSG_LINK, "Speed not supported yet\n");
+				return;
+			}
+			DP(NETIF_MSG_LINK, "Setting 20G DXGXS\n");
+			bnx2x_warpcore_set_20G_DXGXS(bp, phy, lane);
+			/* Issue Module detection */
+
+			bnx2x_sfp_module_detection(phy, params);
+			break;
+
+		case PORT_HW_CFG_NET_SERDES_IF_KR2:
+			if (vars->line_speed != SPEED_20000) {
+				DP(NETIF_MSG_LINK, "Speed not supported yet\n");
+				return;
+			}
+			DP(NETIF_MSG_LINK, "Setting 20G KR2\n");
+			bnx2x_warpcore_set_20G_KR2(bp, phy);
+			break;
+
+		default:
+			DP(NETIF_MSG_LINK, "Unsupported Serdes Net Interface "
+					   "0x%x\n", serdes_net_if);
+			return;
+		}
+	}
+
+	/* Take lane out of reset after configuration is finished */
+	bnx2x_warpcore_reset_lane(bp, phy, 0);
+	DP(NETIF_MSG_LINK, "Exit config init\n");
+}
+
+static void bnx2x_sfp_e3_set_transmitter(struct link_params *params,
+					 struct bnx2x_phy *phy,
+					 u8 tx_en)
+{
+	struct bnx2x *bp = params->bp;
+	u32 cfg_pin;
+	u8 port = params->port;
+
+	cfg_pin = REG_RD(bp, params->shmem_base +
+				offsetof(struct shmem_region,
+				dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+				PORT_HW_CFG_TX_LASER_MASK;
+	/* Set the !tx_en since this pin is DISABLE_TX_LASER */
+	DP(NETIF_MSG_LINK, "Setting WC TX to %d\n", tx_en);
+	/* For 20G, the expected pin to be used is 3 pins after the current */
+
+	bnx2x_set_cfg_pin(bp, cfg_pin, tx_en ^ 1);
+	if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)
+		bnx2x_set_cfg_pin(bp, cfg_pin + 3, tx_en ^ 1);
+}
+
+static void bnx2x_warpcore_link_reset(struct bnx2x_phy *phy,
+				      struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val16;
+	bnx2x_sfp_e3_set_transmitter(params, phy, 0);
+	bnx2x_set_mdio_clk(bp, params->chip_id, params->port);
+	bnx2x_set_aer_mmd(params, phy);
+	/* Global register */
+	bnx2x_warpcore_reset_lane(bp, phy, 1);
+
+	/* Clear loopback settings (if any) */
+	/* 10G & 20G */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 &
+			 0xBFFF);
+
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 & 0xfffe);
+
+	/* Update those 1-copy registers */
+	CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+			  MDIO_AER_BLOCK_AER_REG, 0);
+		/* Enable 1G MDIO (1-copy) */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+			&val16);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+			 val16 & ~0x10);
+
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+			 val16 & 0xff00);
+
+}
+
+static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
+					struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val16;
+	u32 lane;
+	DP(NETIF_MSG_LINK, "Setting Warpcore loopback type %x, speed %d\n",
+		       params->loopback_mode, phy->req_line_speed);
+
+	if (phy->req_line_speed < SPEED_10000) {
+		/* 10/100/1000 */
+
+		/* Update those 1-copy registers */
+		CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+				  MDIO_AER_BLOCK_AER_REG, 0);
+		/* Enable 1G MDIO (1-copy) */
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+				&val16);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_XGXSBLK0_XGXSCONTROL,
+				val16 | 0x10);
+		/* Set 1G loopback based on lane (1-copy) */
+		lane = bnx2x_get_warpcore_lane(phy, params);
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_XGXSBLK1_LANECTRL2, &val16);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_XGXSBLK1_LANECTRL2,
+				val16 | (1<<lane));
+
+		/* Switch back to 4-copy registers */
+		bnx2x_set_aer_mmd(params, phy);
+		/* Global loopback, not recommended. */
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
+				0x4000);
+	} else {
+		/* 10G & 20G */
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
+				 0x4000);
+
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16);
+		bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1);
+	}
+}
+
+
+void bnx2x_link_status_update(struct link_params *params,
+			      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 link_10g_plus;
+	u8 port = params->port;
+	u32 sync_offset, media_types;
+	/* Update PHY configuration */
+	set_phy_vars(params, vars);
+
+	vars->link_status = REG_RD(bp, params->shmem_base +
+				   offsetof(struct shmem_region,
+					    port_mb[port].link_status));
+
+	vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
+	vars->phy_flags = PHY_XGXS_FLAG;
+	if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+		vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
+
+	if (vars->link_up) {
+		DP(NETIF_MSG_LINK, "phy link up\n");
+
+		vars->phy_link_up = 1;
+		vars->duplex = DUPLEX_FULL;
+		switch (vars->link_status &
+			LINK_STATUS_SPEED_AND_DUPLEX_MASK) {
+			case LINK_10THD:
+				vars->duplex = DUPLEX_HALF;
+				/* fall thru */
+			case LINK_10TFD:
+				vars->line_speed = SPEED_10;
+				break;
+
+			case LINK_100TXHD:
+				vars->duplex = DUPLEX_HALF;
+				/* fall thru */
+			case LINK_100T4:
+			case LINK_100TXFD:
+				vars->line_speed = SPEED_100;
+				break;
+
+			case LINK_1000THD:
+				vars->duplex = DUPLEX_HALF;
+				/* fall thru */
+			case LINK_1000TFD:
+				vars->line_speed = SPEED_1000;
+				break;
+
+			case LINK_2500THD:
+				vars->duplex = DUPLEX_HALF;
+				/* fall thru */
+			case LINK_2500TFD:
+				vars->line_speed = SPEED_2500;
+				break;
+
+			case LINK_10GTFD:
+				vars->line_speed = SPEED_10000;
+				break;
+			case LINK_20GTFD:
+				vars->line_speed = SPEED_20000;
+				break;
+			default:
+				break;
+		}
+		vars->flow_ctrl = 0;
+		if (vars->link_status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED)
+			vars->flow_ctrl |= BNX2X_FLOW_CTRL_TX;
+
+		if (vars->link_status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED)
+			vars->flow_ctrl |= BNX2X_FLOW_CTRL_RX;
+
+		if (!vars->flow_ctrl)
+			vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+		if (vars->line_speed &&
+		    ((vars->line_speed == SPEED_10) ||
+		     (vars->line_speed == SPEED_100))) {
+			vars->phy_flags |= PHY_SGMII_FLAG;
+		} else {
+			vars->phy_flags &= ~PHY_SGMII_FLAG;
+		}
+		if (vars->line_speed &&
+		    USES_WARPCORE(bp) &&
+		    (vars->line_speed == SPEED_1000))
+			vars->phy_flags |= PHY_SGMII_FLAG;
+		/* anything 10 and over uses the bmac */
+		link_10g_plus = (vars->line_speed >= SPEED_10000);
+
+		if (link_10g_plus) {
+			if (USES_WARPCORE(bp))
+				vars->mac_type = MAC_TYPE_XMAC;
+			else
+				vars->mac_type = MAC_TYPE_BMAC;
+		} else {
+			if (USES_WARPCORE(bp))
+				vars->mac_type = MAC_TYPE_UMAC;
+			else
+				vars->mac_type = MAC_TYPE_EMAC;
+		}
+	} else { /* link down */
+		DP(NETIF_MSG_LINK, "phy link down\n");
+
+		vars->phy_link_up = 0;
+
+		vars->line_speed = 0;
+		vars->duplex = DUPLEX_FULL;
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+		/* indicate no mac active */
+		vars->mac_type = MAC_TYPE_NONE;
+		if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
+			vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+	}
+
+	/* Sync media type */
+	sync_offset = params->shmem_base +
+			offsetof(struct shmem_region,
+				 dev_info.port_hw_config[port].media_type);
+	media_types = REG_RD(bp, sync_offset);
+
+	params->phy[INT_PHY].media_type =
+		(media_types & PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) >>
+		PORT_HW_CFG_MEDIA_TYPE_PHY0_SHIFT;
+	params->phy[EXT_PHY1].media_type =
+		(media_types & PORT_HW_CFG_MEDIA_TYPE_PHY1_MASK) >>
+		PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT;
+	params->phy[EXT_PHY2].media_type =
+		(media_types & PORT_HW_CFG_MEDIA_TYPE_PHY2_MASK) >>
+		PORT_HW_CFG_MEDIA_TYPE_PHY2_SHIFT;
+	DP(NETIF_MSG_LINK, "media_types = 0x%x\n", media_types);
+
+	/* Sync AEU offset */
+	sync_offset = params->shmem_base +
+			offsetof(struct shmem_region,
+				 dev_info.port_hw_config[port].aeu_int_mask);
+
+	vars->aeu_int_mask = REG_RD(bp, sync_offset);
+
+	/* Sync PFC status */
+	if (vars->link_status & LINK_STATUS_PFC_ENABLED)
+		params->feature_config_flags |=
+					FEATURE_CONFIG_PFC_ENABLED;
+	else
+		params->feature_config_flags &=
+					~FEATURE_CONFIG_PFC_ENABLED;
+
+	DP(NETIF_MSG_LINK, "link_status 0x%x  phy_link_up %x int_mask 0x%x\n",
+		 vars->link_status, vars->phy_link_up, vars->aeu_int_mask);
+	DP(NETIF_MSG_LINK, "line_speed %x  duplex %x  flow_ctrl 0x%x\n",
+		 vars->line_speed, vars->duplex, vars->flow_ctrl);
+}
+
+
+static void bnx2x_set_master_ln(struct link_params *params,
+				struct bnx2x_phy *phy)
+{
+	struct bnx2x *bp = params->bp;
+	u16 new_master_ln, ser_lane;
+	ser_lane = ((params->lane_config &
+		     PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+		    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+
+	/* set the master_ln for AN */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_XGXS_BLOCK2,
+			  MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+			  &new_master_ln);
+
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_XGXS_BLOCK2 ,
+			  MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
+			  (new_master_ln | ser_lane));
+}
+
+static int bnx2x_reset_unicore(struct link_params *params,
+			       struct bnx2x_phy *phy,
+			       u8 set_serdes)
+{
+	struct bnx2x *bp = params->bp;
+	u16 mii_control;
+	u16 i;
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
+
+	/* reset the unicore */
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL,
+			  (mii_control |
+			   MDIO_COMBO_IEEO_MII_CONTROL_RESET));
+	if (set_serdes)
+		bnx2x_set_serdes_access(bp, params->port);
+
+	/* wait for the reset to self clear */
+	for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
+		udelay(5);
+
+		/* the reset erased the previous bank value */
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_MII_CONTROL,
+				  &mii_control);
+
+		if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
+			udelay(5);
+			return 0;
+		}
+	}
+
+	netdev_err(bp->dev,  "Warning: PHY was not initialized,"
+			      " Port %d\n",
+			 params->port);
+	DP(NETIF_MSG_LINK, "BUG! XGXS is still in reset!\n");
+	return -EINVAL;
+
+}
+
+static void bnx2x_set_swap_lanes(struct link_params *params,
+				 struct bnx2x_phy *phy)
+{
+	struct bnx2x *bp = params->bp;
+	/*
+	 *  Each two bits represents a lane number:
+	 *  No swap is 0123 => 0x1b no need to enable the swap
+	 */
+	u16 ser_lane, rx_lane_swap, tx_lane_swap;
+
+	ser_lane = ((params->lane_config &
+		     PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+		    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+	rx_lane_swap = ((params->lane_config &
+			 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
+			PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
+	tx_lane_swap = ((params->lane_config &
+			 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
+			PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
+
+	if (rx_lane_swap != 0x1b) {
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_XGXS_BLOCK2,
+				  MDIO_XGXS_BLOCK2_RX_LN_SWAP,
+				  (rx_lane_swap |
+				   MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
+				   MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
+	} else {
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_XGXS_BLOCK2,
+				  MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
+	}
+
+	if (tx_lane_swap != 0x1b) {
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_XGXS_BLOCK2,
+				  MDIO_XGXS_BLOCK2_TX_LN_SWAP,
+				  (tx_lane_swap |
+				   MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
+	} else {
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_XGXS_BLOCK2,
+				  MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
+	}
+}
+
+static void bnx2x_set_parallel_detection(struct bnx2x_phy *phy,
+					 struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 control2;
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+			  &control2);
+	if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+		control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+	else
+		control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
+	DP(NETIF_MSG_LINK, "phy->speed_cap_mask = 0x%x, control2 = 0x%x\n",
+		phy->speed_cap_mask, control2);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
+			  control2);
+
+	if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+	     (phy->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+		DP(NETIF_MSG_LINK, "XGXS\n");
+
+		CL22_WR_OVER_CL45(bp, phy,
+				 MDIO_REG_BANK_10G_PARALLEL_DETECT,
+				 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
+				 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
+
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_10G_PARALLEL_DETECT,
+				  MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+				  &control2);
+
+
+		control2 |=
+		    MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
+
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_10G_PARALLEL_DETECT,
+				  MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
+				  control2);
+
+		/* Disable parallel detection of HiG */
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_XGXS_BLOCK2,
+				  MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
+				  MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
+				  MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
+	}
+}
+
+static void bnx2x_set_autoneg(struct bnx2x_phy *phy,
+			      struct link_params *params,
+			      struct link_vars *vars,
+			      u8 enable_cl73)
+{
+	struct bnx2x *bp = params->bp;
+	u16 reg_val;
+
+	/* CL37 Autoneg */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+
+	/* CL37 Autoneg Enabled */
+	if (vars->line_speed == SPEED_AUTO_NEG)
+		reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
+	else /* CL37 Autoneg Disabled */
+		reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+			     MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
+
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+	/* Enable/Disable Autodetection */
+
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &reg_val);
+	reg_val &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN |
+		    MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT);
+	reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE;
+	if (vars->line_speed == SPEED_AUTO_NEG)
+		reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+	else
+		reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
+
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
+
+	/* Enable TetonII and BAM autoneg */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_BAM_NEXT_PAGE,
+			  MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+			  &reg_val);
+	if (vars->line_speed == SPEED_AUTO_NEG) {
+		/* Enable BAM aneg Mode and TetonII aneg Mode */
+		reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+			    MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+	} else {
+		/* TetonII and BAM Autoneg Disabled */
+		reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
+			     MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
+	}
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_BAM_NEXT_PAGE,
+			  MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
+			  reg_val);
+
+	if (enable_cl73) {
+		/* Enable Cl73 FSM status bits */
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_USERB0,
+				  MDIO_CL73_USERB0_CL73_UCTRL,
+				  0xe);
+
+		/* Enable BAM Station Manager*/
+		CL22_WR_OVER_CL45(bp, phy,
+			MDIO_REG_BANK_CL73_USERB0,
+			MDIO_CL73_USERB0_CL73_BAM_CTRL1,
+			MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
+			MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
+			MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
+
+		/* Advertise CL73 link speeds */
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB1,
+				  MDIO_CL73_IEEEB1_AN_ADV2,
+				  &reg_val);
+		if (phy->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+			reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
+		if (phy->speed_cap_mask &
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
+			reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
+
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB1,
+				  MDIO_CL73_IEEEB1_AN_ADV2,
+				  reg_val);
+
+		/* CL73 Autoneg Enabled */
+		reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
+
+	} else /* CL73 Autoneg Disabled */
+		reg_val = 0;
+
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_CL73_IEEEB0,
+			  MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
+}
+
+/* program SerDes, forced speed */
+static void bnx2x_program_serdes(struct bnx2x_phy *phy,
+				 struct link_params *params,
+				 struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 reg_val;
+
+	/* program duplex, disable autoneg and sgmii*/
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL, &reg_val);
+	reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
+		     MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+		     MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK);
+	if (phy->req_duplex == DUPLEX_FULL)
+		reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
+
+	/*
+	 * program speed
+	 *  - needed only if the speed is greater than 1G (2.5G or 10G)
+	 */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_MISC1, &reg_val);
+	/* clearing the speed value before setting the right speed */
+	DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val);
+
+	reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK |
+		     MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+
+	if (!((vars->line_speed == SPEED_1000) ||
+	      (vars->line_speed == SPEED_100) ||
+	      (vars->line_speed == SPEED_10))) {
+
+		reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
+			    MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
+		if (vars->line_speed == SPEED_10000)
+			reg_val |=
+				MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
+	}
+
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_MISC1, reg_val);
+
+}
+
+static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy,
+					      struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val = 0;
+
+	/* configure the 48 bits for BAM AN */
+
+	/* set extended capabilities */
+	if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G)
+		val |= MDIO_OVER_1G_UP1_2_5G;
+	if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+		val |= MDIO_OVER_1G_UP1_10G;
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_OVER_1G,
+			  MDIO_OVER_1G_UP1, val);
+
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_OVER_1G,
+			  MDIO_OVER_1G_UP3, 0x400);
+}
+
+static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy,
+					      struct link_params *params,
+					      u16 ieee_fc)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val;
+	/* for AN, we are always publishing full duplex */
+
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_COMBO_IEEE0,
+			  MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_CL73_IEEEB1,
+			  MDIO_CL73_IEEEB1_AN_ADV1, &val);
+	val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
+	val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_CL73_IEEEB1,
+			  MDIO_CL73_IEEEB1_AN_ADV1, val);
+}
+
+static void bnx2x_restart_autoneg(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  u8 enable_cl73)
+{
+	struct bnx2x *bp = params->bp;
+	u16 mii_control;
+
+	DP(NETIF_MSG_LINK, "bnx2x_restart_autoneg\n");
+	/* Enable and restart BAM/CL37 aneg */
+
+	if (enable_cl73) {
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB0,
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+				  &mii_control);
+
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB0,
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+				  (mii_control |
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
+	} else {
+
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_MII_CONTROL,
+				  &mii_control);
+		DP(NETIF_MSG_LINK,
+			 "bnx2x_restart_autoneg mii_control before = 0x%x\n",
+			 mii_control);
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_MII_CONTROL,
+				  (mii_control |
+				   MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+				   MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
+	}
+}
+
+static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy,
+					   struct link_params *params,
+					   struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 control1;
+
+	/* in SGMII mode, the unicore is always slave */
+
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+			  &control1);
+	control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
+	/* set sgmii mode (and not fiber) */
+	control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
+		      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
+		      MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
+			  control1);
+
+	/* if forced speed */
+	if (!(vars->line_speed == SPEED_AUTO_NEG)) {
+		/* set speed, disable autoneg */
+		u16 mii_control;
+
+		CL22_RD_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_MII_CONTROL,
+				  &mii_control);
+		mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
+				 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK|
+				 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
+
+		switch (vars->line_speed) {
+		case SPEED_100:
+			mii_control |=
+				MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
+			break;
+		case SPEED_1000:
+			mii_control |=
+				MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
+			break;
+		case SPEED_10:
+			/* there is nothing to set for 10M */
+			break;
+		default:
+			/* invalid speed for SGMII */
+			DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+				  vars->line_speed);
+			break;
+		}
+
+		/* setting the full duplex */
+		if (phy->req_duplex == DUPLEX_FULL)
+			mii_control |=
+				MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_COMBO_IEEE0,
+				  MDIO_COMBO_IEEE0_MII_CONTROL,
+				  mii_control);
+
+	} else { /* AN mode */
+		/* enable and restart AN */
+		bnx2x_restart_autoneg(phy, params, 0);
+	}
+}
+
+
+/*
+ * link management
+ */
+
+static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
+					     struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 pd_10g, status2_1000x;
+	if (phy->req_line_speed != SPEED_AUTO_NEG)
+		return 0;
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+			  &status2_1000x);
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_SERDES_DIGITAL,
+			  MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
+			  &status2_1000x);
+	if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
+		DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
+			 params->port);
+		return 1;
+	}
+
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_10G_PARALLEL_DETECT,
+			  MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
+			  &pd_10g);
+
+	if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
+		DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
+			 params->port);
+		return 1;
+	}
+	return 0;
+}
+
+static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
+				    struct link_params *params,
+				    struct link_vars *vars,
+				    u32 gp_status)
+{
+	struct bnx2x *bp = params->bp;
+	u16 ld_pause;   /* local driver */
+	u16 lp_pause;   /* link partner */
+	u16 pause_result;
+
+	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+
+	/* resolve from gp_status in case of AN complete and not sgmii */
+	if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+		vars->flow_ctrl = phy->req_flow_ctrl;
+	else if (phy->req_line_speed != SPEED_AUTO_NEG)
+		vars->flow_ctrl = params->req_fc_auto_adv;
+	else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
+		 (!(vars->phy_flags & PHY_SGMII_FLAG))) {
+		if (bnx2x_direct_parallel_detect_used(phy, params)) {
+			vars->flow_ctrl = params->req_fc_auto_adv;
+			return;
+		}
+		if ((gp_status &
+		    (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+		     MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
+		    (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+		     MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
+
+			CL22_RD_OVER_CL45(bp, phy,
+					  MDIO_REG_BANK_CL73_IEEEB1,
+					  MDIO_CL73_IEEEB1_AN_ADV1,
+					  &ld_pause);
+			CL22_RD_OVER_CL45(bp, phy,
+					  MDIO_REG_BANK_CL73_IEEEB1,
+					  MDIO_CL73_IEEEB1_AN_LP_ADV1,
+					  &lp_pause);
+			pause_result = (ld_pause &
+					MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
+					>> 8;
+			pause_result |= (lp_pause &
+					MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK)
+					>> 10;
+			DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
+				 pause_result);
+		} else {
+			CL22_RD_OVER_CL45(bp, phy,
+					  MDIO_REG_BANK_COMBO_IEEE0,
+					  MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+					  &ld_pause);
+			CL22_RD_OVER_CL45(bp, phy,
+				MDIO_REG_BANK_COMBO_IEEE0,
+				MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+				&lp_pause);
+			pause_result = (ld_pause &
+				MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
+			pause_result |= (lp_pause &
+				MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+			DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
+				 pause_result);
+		}
+		bnx2x_pause_resolve(vars, pause_result);
+	}
+	DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
+}
+
+static void bnx2x_check_fallback_to_cl37(struct bnx2x_phy *phy,
+					 struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 rx_status, ustat_val, cl37_fsm_received;
+	DP(NETIF_MSG_LINK, "bnx2x_check_fallback_to_cl37\n");
+	/* Step 1: Make sure signal is detected */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_RX0,
+			  MDIO_RX0_RX_STATUS,
+			  &rx_status);
+	if ((rx_status & MDIO_RX0_RX_STATUS_SIGDET) !=
+	    (MDIO_RX0_RX_STATUS_SIGDET)) {
+		DP(NETIF_MSG_LINK, "Signal is not detected. Restoring CL73."
+			     "rx_status(0x80b0) = 0x%x\n", rx_status);
+		CL22_WR_OVER_CL45(bp, phy,
+				  MDIO_REG_BANK_CL73_IEEEB0,
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+				  MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN);
+		return;
+	}
+	/* Step 2: Check CL73 state machine */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_CL73_USERB0,
+			  MDIO_CL73_USERB0_CL73_USTAT1,
+			  &ustat_val);
+	if ((ustat_val &
+	     (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
+	      MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) !=
+	    (MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK |
+	      MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37)) {
+		DP(NETIF_MSG_LINK, "CL73 state-machine is not stable. "
+			     "ustat_val(0x8371) = 0x%x\n", ustat_val);
+		return;
+	}
+	/*
+	 * Step 3: Check CL37 Message Pages received to indicate LP
+	 * supports only CL37
+	 */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_REMOTE_PHY,
+			  MDIO_REMOTE_PHY_MISC_RX_STATUS,
+			  &cl37_fsm_received);
+	if ((cl37_fsm_received &
+	     (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
+	     MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) !=
+	    (MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG |
+	      MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG)) {
+		DP(NETIF_MSG_LINK, "No CL37 FSM were received. "
+			     "misc_rx_status(0x8330) = 0x%x\n",
+			 cl37_fsm_received);
+		return;
+	}
+	/*
+	 * The combined cl37/cl73 fsm state information indicating that
+	 * we are connected to a device which does not support cl73, but
+	 * does support cl37 BAM. In this case we disable cl73 and
+	 * restart cl37 auto-neg
+	 */
+
+	/* Disable CL73 */
+	CL22_WR_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_CL73_IEEEB0,
+			  MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
+			  0);
+	/* Restart CL37 autoneg */
+	bnx2x_restart_autoneg(phy, params, 0);
+	DP(NETIF_MSG_LINK, "Disabling CL73, and restarting CL37 autoneg\n");
+}
+
+static void bnx2x_xgxs_an_resolve(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars,
+				  u32 gp_status)
+{
+	if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
+		vars->link_status |=
+			LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+	if (bnx2x_direct_parallel_detect_used(phy, params))
+		vars->link_status |=
+			LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy,
+				     struct link_params *params,
+				      struct link_vars *vars,
+				      u16 is_link_up,
+				      u16 speed_mask,
+				      u16 is_duplex)
+{
+	struct bnx2x *bp = params->bp;
+	if (phy->req_line_speed == SPEED_AUTO_NEG)
+		vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
+	if (is_link_up) {
+		DP(NETIF_MSG_LINK, "phy link up\n");
+
+		vars->phy_link_up = 1;
+		vars->link_status |= LINK_STATUS_LINK_UP;
+
+		switch (speed_mask) {
+		case GP_STATUS_10M:
+			vars->line_speed = SPEED_10;
+			if (vars->duplex == DUPLEX_FULL)
+				vars->link_status |= LINK_10TFD;
+			else
+				vars->link_status |= LINK_10THD;
+			break;
+
+		case GP_STATUS_100M:
+			vars->line_speed = SPEED_100;
+			if (vars->duplex == DUPLEX_FULL)
+				vars->link_status |= LINK_100TXFD;
+			else
+				vars->link_status |= LINK_100TXHD;
+			break;
+
+		case GP_STATUS_1G:
+		case GP_STATUS_1G_KX:
+			vars->line_speed = SPEED_1000;
+			if (vars->duplex == DUPLEX_FULL)
+				vars->link_status |= LINK_1000TFD;
+			else
+				vars->link_status |= LINK_1000THD;
+			break;
+
+		case GP_STATUS_2_5G:
+			vars->line_speed = SPEED_2500;
+			if (vars->duplex == DUPLEX_FULL)
+				vars->link_status |= LINK_2500TFD;
+			else
+				vars->link_status |= LINK_2500THD;
+			break;
+
+		case GP_STATUS_5G:
+		case GP_STATUS_6G:
+			DP(NETIF_MSG_LINK,
+				 "link speed unsupported  gp_status 0x%x\n",
+				  speed_mask);
+			return -EINVAL;
+
+		case GP_STATUS_10G_KX4:
+		case GP_STATUS_10G_HIG:
+		case GP_STATUS_10G_CX4:
+		case GP_STATUS_10G_KR:
+		case GP_STATUS_10G_SFI:
+		case GP_STATUS_10G_XFI:
+			vars->line_speed = SPEED_10000;
+			vars->link_status |= LINK_10GTFD;
+			break;
+		case GP_STATUS_20G_DXGXS:
+			vars->line_speed = SPEED_20000;
+			vars->link_status |= LINK_20GTFD;
+			break;
+		default:
+			DP(NETIF_MSG_LINK,
+				  "link speed unsupported gp_status 0x%x\n",
+				  speed_mask);
+			return -EINVAL;
+		}
+	} else { /* link_down */
+		DP(NETIF_MSG_LINK, "phy link down\n");
+
+		vars->phy_link_up = 0;
+
+		vars->duplex = DUPLEX_FULL;
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+		vars->mac_type = MAC_TYPE_NONE;
+	}
+	DP(NETIF_MSG_LINK, " phy_link_up %x line_speed %d\n",
+		    vars->phy_link_up, vars->line_speed);
+	return 0;
+}
+
+static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
+				      struct link_params *params,
+				      struct link_vars *vars)
+{
+
+	struct bnx2x *bp = params->bp;
+
+	u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
+	int rc = 0;
+
+	/* Read gp_status */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_GP_STATUS,
+			  MDIO_GP_STATUS_TOP_AN_STATUS1,
+			  &gp_status);
+	if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
+		duplex = DUPLEX_FULL;
+	if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)
+		link_up = 1;
+	speed_mask = gp_status & GP_STATUS_SPEED_MASK;
+	DP(NETIF_MSG_LINK, "gp_status 0x%x, is_link_up %d, speed_mask 0x%x\n",
+		       gp_status, link_up, speed_mask);
+	rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, speed_mask,
+					 duplex);
+	if (rc == -EINVAL)
+		return rc;
+
+	if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
+		if (SINGLE_MEDIA_DIRECT(params)) {
+			bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status);
+			if (phy->req_line_speed == SPEED_AUTO_NEG)
+				bnx2x_xgxs_an_resolve(phy, params, vars,
+						      gp_status);
+		}
+	} else { /* link_down */
+		if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+		    SINGLE_MEDIA_DIRECT(params)) {
+			/* Check signal is detected */
+			bnx2x_check_fallback_to_cl37(phy, params);
+		}
+	}
+
+	DP(NETIF_MSG_LINK, "duplex %x  flow_ctrl 0x%x link_status 0x%x\n",
+		   vars->duplex, vars->flow_ctrl, vars->link_status);
+	return rc;
+}
+
+static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
+				     struct link_params *params,
+				     struct link_vars *vars)
+{
+
+	struct bnx2x *bp = params->bp;
+
+	u8 lane;
+	u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
+	int rc = 0;
+	lane = bnx2x_get_warpcore_lane(phy, params);
+	/* Read gp_status */
+	if (phy->req_line_speed > SPEED_10000) {
+		u16 temp_link_up;
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				1, &temp_link_up);
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				1, &link_up);
+		DP(NETIF_MSG_LINK, "PCS RX link status = 0x%x-->0x%x\n",
+			       temp_link_up, link_up);
+		link_up &= (1<<2);
+		if (link_up)
+			bnx2x_ext_phy_resolve_fc(phy, params, vars);
+	} else {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_GP2_STATUS_GP_2_1, &gp_status1);
+		DP(NETIF_MSG_LINK, "0x81d1 = 0x%x\n", gp_status1);
+		/* Check for either KR or generic link up. */
+		gp_status1 = ((gp_status1 >> 8) & 0xf) |
+			((gp_status1 >> 12) & 0xf);
+		link_up = gp_status1 & (1 << lane);
+		if (link_up && SINGLE_MEDIA_DIRECT(params)) {
+			u16 pd, gp_status4;
+			if (phy->req_line_speed == SPEED_AUTO_NEG) {
+				/* Check Autoneg complete */
+				bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+						MDIO_WC_REG_GP2_STATUS_GP_2_4,
+						&gp_status4);
+				if (gp_status4 & ((1<<12)<<lane))
+					vars->link_status |=
+					LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+
+				/* Check parallel detect used */
+				bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+						MDIO_WC_REG_PAR_DET_10G_STATUS,
+						&pd);
+				if (pd & (1<<15))
+					vars->link_status |=
+					LINK_STATUS_PARALLEL_DETECTION_USED;
+			}
+			bnx2x_ext_phy_resolve_fc(phy, params, vars);
+		}
+	}
+
+	if (lane < 2) {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed);
+	} else {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_GP2_STATUS_GP_2_3, &gp_speed);
+	}
+	DP(NETIF_MSG_LINK, "lane %d gp_speed 0x%x\n", lane, gp_speed);
+
+	if ((lane & 1) == 0)
+		gp_speed <<= 8;
+	gp_speed &= 0x3f00;
+
+
+	rc = bnx2x_get_link_speed_duplex(phy, params, vars, link_up, gp_speed,
+					 duplex);
+
+	DP(NETIF_MSG_LINK, "duplex %x  flow_ctrl 0x%x link_status 0x%x\n",
+		   vars->duplex, vars->flow_ctrl, vars->link_status);
+	return rc;
+}
+static void bnx2x_set_gmii_tx_driver(struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	struct bnx2x_phy *phy = &params->phy[INT_PHY];
+	u16 lp_up2;
+	u16 tx_driver;
+	u16 bank;
+
+	/* read precomp */
+	CL22_RD_OVER_CL45(bp, phy,
+			  MDIO_REG_BANK_OVER_1G,
+			  MDIO_OVER_1G_LP_UP2, &lp_up2);
+
+	/* bits [10:7] at lp_up2, positioned at [15:12] */
+	lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
+		   MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
+		  MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
+
+	if (lp_up2 == 0)
+		return;
+
+	for (bank = MDIO_REG_BANK_TX0; bank <= MDIO_REG_BANK_TX3;
+	      bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0)) {
+		CL22_RD_OVER_CL45(bp, phy,
+				  bank,
+				  MDIO_TX0_TX_DRIVER, &tx_driver);
+
+		/* replace tx_driver bits [15:12] */
+		if (lp_up2 !=
+		    (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) {
+			tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
+			tx_driver |= lp_up2;
+			CL22_WR_OVER_CL45(bp, phy,
+					  bank,
+					  MDIO_TX0_TX_DRIVER, tx_driver);
+		}
+	}
+}
+
+static int bnx2x_emac_program(struct link_params *params,
+			      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u16 mode = 0;
+
+	DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
+	bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 +
+		       EMAC_REG_EMAC_MODE,
+		       (EMAC_MODE_25G_MODE |
+			EMAC_MODE_PORT_MII_10M |
+			EMAC_MODE_HALF_DUPLEX));
+	switch (vars->line_speed) {
+	case SPEED_10:
+		mode |= EMAC_MODE_PORT_MII_10M;
+		break;
+
+	case SPEED_100:
+		mode |= EMAC_MODE_PORT_MII;
+		break;
+
+	case SPEED_1000:
+		mode |= EMAC_MODE_PORT_GMII;
+		break;
+
+	case SPEED_2500:
+		mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
+		break;
+
+	default:
+		/* 10G not valid for EMAC */
+		DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n",
+			   vars->line_speed);
+		return -EINVAL;
+	}
+
+	if (vars->duplex == DUPLEX_HALF)
+		mode |= EMAC_MODE_HALF_DUPLEX;
+	bnx2x_bits_en(bp,
+		      GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
+		      mode);
+
+	bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
+	return 0;
+}
+
+static void bnx2x_set_preemphasis(struct bnx2x_phy *phy,
+				  struct link_params *params)
+{
+
+	u16 bank, i = 0;
+	struct bnx2x *bp = params->bp;
+
+	for (bank = MDIO_REG_BANK_RX0, i = 0; bank <= MDIO_REG_BANK_RX3;
+	      bank += (MDIO_REG_BANK_RX1-MDIO_REG_BANK_RX0), i++) {
+			CL22_WR_OVER_CL45(bp, phy,
+					  bank,
+					  MDIO_RX0_RX_EQ_BOOST,
+					  phy->rx_preemphasis[i]);
+	}
+
+	for (bank = MDIO_REG_BANK_TX0, i = 0; bank <= MDIO_REG_BANK_TX3;
+		      bank += (MDIO_REG_BANK_TX1 - MDIO_REG_BANK_TX0), i++) {
+			CL22_WR_OVER_CL45(bp, phy,
+					  bank,
+					  MDIO_TX0_TX_DRIVER,
+					  phy->tx_preemphasis[i]);
+	}
+}
+
+static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy,
+				   struct link_params *params,
+				   struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 enable_cl73 = (SINGLE_MEDIA_DIRECT(params) ||
+			  (params->loopback_mode == LOOPBACK_XGXS));
+	if (!(vars->phy_flags & PHY_SGMII_FLAG)) {
+		if (SINGLE_MEDIA_DIRECT(params) &&
+		    (params->feature_config_flags &
+		     FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED))
+			bnx2x_set_preemphasis(phy, params);
+
+		/* forced speed requested? */
+		if (vars->line_speed != SPEED_AUTO_NEG ||
+		    (SINGLE_MEDIA_DIRECT(params) &&
+		     params->loopback_mode == LOOPBACK_EXT)) {
+			DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
+
+			/* disable autoneg */
+			bnx2x_set_autoneg(phy, params, vars, 0);
+
+			/* program speed and duplex */
+			bnx2x_program_serdes(phy, params, vars);
+
+		} else { /* AN_mode */
+			DP(NETIF_MSG_LINK, "not SGMII, AN\n");
+
+			/* AN enabled */
+			bnx2x_set_brcm_cl37_advertisement(phy, params);
+
+			/* program duplex & pause advertisement (for aneg) */
+			bnx2x_set_ieee_aneg_advertisement(phy, params,
+							  vars->ieee_fc);
+
+			/* enable autoneg */
+			bnx2x_set_autoneg(phy, params, vars, enable_cl73);
+
+			/* enable and restart AN */
+			bnx2x_restart_autoneg(phy, params, enable_cl73);
+		}
+
+	} else { /* SGMII mode */
+		DP(NETIF_MSG_LINK, "SGMII\n");
+
+		bnx2x_initialize_sgmii_process(phy, params, vars);
+	}
+}
+
+static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy,
+			  struct link_params *params,
+			  struct link_vars *vars)
+{
+	int rc;
+	vars->phy_flags |= PHY_XGXS_FLAG;
+	if ((phy->req_line_speed &&
+	     ((phy->req_line_speed == SPEED_100) ||
+	      (phy->req_line_speed == SPEED_10))) ||
+	    (!phy->req_line_speed &&
+	     (phy->speed_cap_mask >=
+	      PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) &&
+	     (phy->speed_cap_mask <
+	      PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+	    (phy->type == PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT_SD))
+		vars->phy_flags |= PHY_SGMII_FLAG;
+	else
+		vars->phy_flags &= ~PHY_SGMII_FLAG;
+
+	bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+	bnx2x_set_aer_mmd(params, phy);
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+		bnx2x_set_master_ln(params, phy);
+
+	rc = bnx2x_reset_unicore(params, phy, 0);
+	/* reset the SerDes and wait for reset bit return low */
+	if (rc != 0)
+		return rc;
+
+	bnx2x_set_aer_mmd(params, phy);
+	/* setting the masterLn_def again after the reset */
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
+		bnx2x_set_master_ln(params, phy);
+		bnx2x_set_swap_lanes(params, phy);
+	}
+
+	return rc;
+}
+
+static u16 bnx2x_wait_reset_complete(struct bnx2x *bp,
+				     struct bnx2x_phy *phy,
+				     struct link_params *params)
+{
+	u16 cnt, ctrl;
+	/* Wait for soft reset to get cleared up to 1 sec */
+	for (cnt = 0; cnt < 1000; cnt++) {
+		if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE)
+			bnx2x_cl22_read(bp, phy,
+				MDIO_PMA_REG_CTRL, &ctrl);
+		else
+			bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_CTRL, &ctrl);
+		if (!(ctrl & (1<<15)))
+			break;
+		msleep(1);
+	}
+
+	if (cnt == 1000)
+		netdev_err(bp->dev,  "Warning: PHY was not initialized,"
+				      " Port %d\n",
+			 params->port);
+	DP(NETIF_MSG_LINK, "control reg 0x%x (after %d ms)\n", ctrl, cnt);
+	return cnt;
+}
+
+static void bnx2x_link_int_enable(struct link_params *params)
+{
+	u8 port = params->port;
+	u32 mask;
+	struct bnx2x *bp = params->bp;
+
+	/* Setting the status to report on link up for either XGXS or SerDes */
+	if (CHIP_IS_E3(bp)) {
+		mask = NIG_MASK_XGXS0_LINK_STATUS;
+		if (!(SINGLE_MEDIA_DIRECT(params)))
+			mask |= NIG_MASK_MI_INT;
+	} else if (params->switch_cfg == SWITCH_CFG_10G) {
+		mask = (NIG_MASK_XGXS0_LINK10G |
+			NIG_MASK_XGXS0_LINK_STATUS);
+		DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
+		if (!(SINGLE_MEDIA_DIRECT(params)) &&
+			params->phy[INT_PHY].type !=
+				PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) {
+			mask |= NIG_MASK_MI_INT;
+			DP(NETIF_MSG_LINK, "enabled external phy int\n");
+		}
+
+	} else { /* SerDes */
+		mask = NIG_MASK_SERDES0_LINK_STATUS;
+		DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
+		if (!(SINGLE_MEDIA_DIRECT(params)) &&
+			params->phy[INT_PHY].type !=
+				PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN) {
+			mask |= NIG_MASK_MI_INT;
+			DP(NETIF_MSG_LINK, "enabled external phy int\n");
+		}
+	}
+	bnx2x_bits_en(bp,
+		      NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+		      mask);
+
+	DP(NETIF_MSG_LINK, "port %x, is_xgxs %x, int_status 0x%x\n", port,
+		 (params->switch_cfg == SWITCH_CFG_10G),
+		 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
+	DP(NETIF_MSG_LINK, " int_mask 0x%x, MI_INT %x, SERDES_LINK %x\n",
+		 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+		 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
+		 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS+port*0x3c));
+	DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
+	   REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+	   REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+}
+
+static void bnx2x_rearm_latch_signal(struct bnx2x *bp, u8 port,
+				     u8 exp_mi_int)
+{
+	u32 latch_status = 0;
+
+	/*
+	 * Disable the MI INT ( external phy int ) by writing 1 to the
+	 * status register. Link down indication is high-active-signal,
+	 * so in this case we need to write the status to clear the XOR
+	 */
+	/* Read Latched signals */
+	latch_status = REG_RD(bp,
+				    NIG_REG_LATCH_STATUS_0 + port*8);
+	DP(NETIF_MSG_LINK, "latch_status = 0x%x\n", latch_status);
+	/* Handle only those with latched-signal=up.*/
+	if (exp_mi_int)
+		bnx2x_bits_en(bp,
+			      NIG_REG_STATUS_INTERRUPT_PORT0
+			      + port*4,
+			      NIG_STATUS_EMAC0_MI_INT);
+	else
+		bnx2x_bits_dis(bp,
+			       NIG_REG_STATUS_INTERRUPT_PORT0
+			       + port*4,
+			       NIG_STATUS_EMAC0_MI_INT);
+
+	if (latch_status & 1) {
+
+		/* For all latched-signal=up : Re-Arm Latch signals */
+		REG_WR(bp, NIG_REG_LATCH_STATUS_0 + port*8,
+		       (latch_status & 0xfffe) | (latch_status & 1));
+	}
+	/* For all latched-signal=up,Write original_signal to status */
+}
+
+static void bnx2x_link_int_ack(struct link_params *params,
+			       struct link_vars *vars, u8 is_10g_plus)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	u32 mask;
+	/*
+	 * First reset all status we assume only one line will be
+	 * change at a time
+	 */
+	bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+		       (NIG_STATUS_XGXS0_LINK10G |
+			NIG_STATUS_XGXS0_LINK_STATUS |
+			NIG_STATUS_SERDES0_LINK_STATUS));
+	if (vars->phy_link_up) {
+		if (USES_WARPCORE(bp))
+			mask = NIG_STATUS_XGXS0_LINK_STATUS;
+		else {
+			if (is_10g_plus)
+				mask = NIG_STATUS_XGXS0_LINK10G;
+			else if (params->switch_cfg == SWITCH_CFG_10G) {
+				/*
+				 * Disable the link interrupt by writing 1 to
+				 * the relevant lane in the status register
+				 */
+				u32 ser_lane =
+					((params->lane_config &
+				    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
+				    PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
+				mask = ((1 << ser_lane) <<
+				       NIG_STATUS_XGXS0_LINK_STATUS_SIZE);
+			} else
+				mask = NIG_STATUS_SERDES0_LINK_STATUS;
+		}
+		DP(NETIF_MSG_LINK, "Ack link up interrupt with mask 0x%x\n",
+			       mask);
+		bnx2x_bits_en(bp,
+			      NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
+			      mask);
+	}
+}
+
+static int bnx2x_format_ver(u32 num, u8 *str, u16 *len)
+{
+	u8 *str_ptr = str;
+	u32 mask = 0xf0000000;
+	u8 shift = 8*4;
+	u8 digit;
+	u8 remove_leading_zeros = 1;
+	if (*len < 10) {
+		/* Need more than 10chars for this format */
+		*str_ptr = '\0';
+		(*len)--;
+		return -EINVAL;
+	}
+	while (shift > 0) {
+
+		shift -= 4;
+		digit = ((num & mask) >> shift);
+		if (digit == 0 && remove_leading_zeros) {
+			mask = mask >> 4;
+			continue;
+		} else if (digit < 0xa)
+			*str_ptr = digit + '0';
+		else
+			*str_ptr = digit - 0xa + 'a';
+		remove_leading_zeros = 0;
+		str_ptr++;
+		(*len)--;
+		mask = mask >> 4;
+		if (shift == 4*4) {
+			*str_ptr = '.';
+			str_ptr++;
+			(*len)--;
+			remove_leading_zeros = 1;
+		}
+	}
+	return 0;
+}
+
+
+static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+{
+	str[0] = '\0';
+	(*len)--;
+	return 0;
+}
+
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
+				 u8 *version, u16 len)
+{
+	struct bnx2x *bp;
+	u32 spirom_ver = 0;
+	int status = 0;
+	u8 *ver_p = version;
+	u16 remain_len = len;
+	if (version == NULL || params == NULL)
+		return -EINVAL;
+	bp = params->bp;
+
+	/* Extract first external phy*/
+	version[0] = '\0';
+	spirom_ver = REG_RD(bp, params->phy[EXT_PHY1].ver_addr);
+
+	if (params->phy[EXT_PHY1].format_fw_ver) {
+		status |= params->phy[EXT_PHY1].format_fw_ver(spirom_ver,
+							      ver_p,
+							      &remain_len);
+		ver_p += (len - remain_len);
+	}
+	if ((params->num_phys == MAX_PHYS) &&
+	    (params->phy[EXT_PHY2].ver_addr != 0)) {
+		spirom_ver = REG_RD(bp, params->phy[EXT_PHY2].ver_addr);
+		if (params->phy[EXT_PHY2].format_fw_ver) {
+			*ver_p = '/';
+			ver_p++;
+			remain_len--;
+			status |= params->phy[EXT_PHY2].format_fw_ver(
+				spirom_ver,
+				ver_p,
+				&remain_len);
+			ver_p = version + (len - remain_len);
+		}
+	}
+	*ver_p = '\0';
+	return status;
+}
+
+static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy,
+				    struct link_params *params)
+{
+	u8 port = params->port;
+	struct bnx2x *bp = params->bp;
+
+	if (phy->req_line_speed != SPEED_1000) {
+		u32 md_devad = 0;
+
+		DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
+
+		if (!CHIP_IS_E3(bp)) {
+			/* change the uni_phy_addr in the nig */
+			md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD +
+					       port*0x18));
+
+			REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+			       0x5);
+		}
+
+		bnx2x_cl45_write(bp, phy,
+				 5,
+				 (MDIO_REG_BANK_AER_BLOCK +
+				  (MDIO_AER_BLOCK_AER_REG & 0xf)),
+				 0x2800);
+
+		bnx2x_cl45_write(bp, phy,
+				 5,
+				 (MDIO_REG_BANK_CL73_IEEEB0 +
+				  (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)),
+				 0x6041);
+		msleep(200);
+		/* set aer mmd back */
+		bnx2x_set_aer_mmd(params, phy);
+
+		if (!CHIP_IS_E3(bp)) {
+			/* and md_devad */
+			REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18,
+			       md_devad);
+		}
+	} else {
+		u16 mii_ctrl;
+		DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
+		bnx2x_cl45_read(bp, phy, 5,
+				(MDIO_REG_BANK_COMBO_IEEE0 +
+				(MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+				&mii_ctrl);
+		bnx2x_cl45_write(bp, phy, 5,
+				 (MDIO_REG_BANK_COMBO_IEEE0 +
+				 (MDIO_COMBO_IEEE0_MII_CONTROL & 0xf)),
+				 mii_ctrl |
+				 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK);
+	}
+}
+
+int bnx2x_set_led(struct link_params *params,
+		  struct link_vars *vars, u8 mode, u32 speed)
+{
+	u8 port = params->port;
+	u16 hw_led_mode = params->hw_led_mode;
+	int rc = 0;
+	u8 phy_idx;
+	u32 tmp;
+	u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
+	DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
+		 speed, hw_led_mode);
+	/* In case */
+	for (phy_idx = EXT_PHY1; phy_idx < MAX_PHYS; phy_idx++) {
+		if (params->phy[phy_idx].set_link_led) {
+			params->phy[phy_idx].set_link_led(
+				&params->phy[phy_idx], params, mode);
+		}
+	}
+
+	switch (mode) {
+	case LED_MODE_FRONT_PANEL_OFF:
+	case LED_MODE_OFF:
+		REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 0);
+		REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+		       SHARED_HW_CFG_LED_MAC1);
+
+		tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+		EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp | EMAC_LED_OVERRIDE));
+		break;
+
+	case LED_MODE_OPER:
+		/*
+		 * For all other phys, OPER mode is same as ON, so in case
+		 * link is down, do nothing
+		 */
+		if (!vars->link_up)
+			break;
+	case LED_MODE_ON:
+		if (((params->phy[EXT_PHY1].type ==
+			  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
+			 (params->phy[EXT_PHY1].type ==
+			  PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722)) &&
+		    CHIP_IS_E2(bp) && params->num_phys == 2) {
+			/*
+			 * This is a work-around for E2+8727 Configurations
+			 */
+			if (mode == LED_MODE_ON ||
+				speed == SPEED_10000){
+				REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+				REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+
+				tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+				EMAC_WR(bp, EMAC_REG_EMAC_LED,
+					(tmp | EMAC_LED_OVERRIDE));
+				/*
+				 * return here without enabling traffic
+				 * LED blink andsetting rate in ON mode.
+				 * In oper mode, enabling LED blink
+				 * and setting rate is needed.
+				 */
+				if (mode == LED_MODE_ON)
+					return rc;
+			}
+		} else if (SINGLE_MEDIA_DIRECT(params)) {
+			/*
+			 * This is a work-around for HW issue found when link
+			 * is up in CL73
+			 */
+			REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
+			if (CHIP_IS_E1x(bp) ||
+			    CHIP_IS_E2(bp) ||
+			    (mode == LED_MODE_ON))
+				REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
+			else
+				REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
+				       hw_led_mode);
+		} else
+			REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode);
+
+		REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
+		/* Set blinking rate to ~15.9Hz */
+		REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
+		       LED_BLINK_RATE_VAL);
+		REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 +
+		       port*4, 1);
+		tmp = EMAC_RD(bp, EMAC_REG_EMAC_LED);
+		EMAC_WR(bp, EMAC_REG_EMAC_LED, (tmp & (~EMAC_LED_OVERRIDE)));
+
+		if (CHIP_IS_E1(bp) &&
+		    ((speed == SPEED_2500) ||
+		     (speed == SPEED_1000) ||
+		     (speed == SPEED_100) ||
+		     (speed == SPEED_10))) {
+			/*
+			 * On Everest 1 Ax chip versions for speeds less than
+			 * 10G LED scheme is different
+			 */
+			REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0
+			       + port*4, 1);
+			REG_WR(bp, NIG_REG_LED_CONTROL_TRAFFIC_P0 +
+			       port*4, 0);
+			REG_WR(bp, NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 +
+			       port*4, 1);
+		}
+		break;
+
+	default:
+		rc = -EINVAL;
+		DP(NETIF_MSG_LINK, "bnx2x_set_led: Invalid led mode %d\n",
+			 mode);
+		break;
+	}
+	return rc;
+
+}
+
+/*
+ * This function comes to reflect the actual link state read DIRECTLY from the
+ * HW
+ */
+int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
+		    u8 is_serdes)
+{
+	struct bnx2x *bp = params->bp;
+	u16 gp_status = 0, phy_index = 0;
+	u8 ext_phy_link_up = 0, serdes_phy_type;
+	struct link_vars temp_vars;
+	struct bnx2x_phy *int_phy = &params->phy[INT_PHY];
+
+	if (CHIP_IS_E3(bp)) {
+		u16 link_up;
+		if (params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)]
+		    > SPEED_10000) {
+			/* Check 20G link */
+			bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+					1, &link_up);
+			bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+					1, &link_up);
+			link_up &= (1<<2);
+		} else {
+			/* Check 10G link and below*/
+			u8 lane = bnx2x_get_warpcore_lane(int_phy, params);
+			bnx2x_cl45_read(bp, int_phy, MDIO_WC_DEVAD,
+					MDIO_WC_REG_GP2_STATUS_GP_2_1,
+					&gp_status);
+			gp_status = ((gp_status >> 8) & 0xf) |
+				((gp_status >> 12) & 0xf);
+			link_up = gp_status & (1 << lane);
+		}
+		if (!link_up)
+			return -ESRCH;
+	} else {
+		CL22_RD_OVER_CL45(bp, int_phy,
+			  MDIO_REG_BANK_GP_STATUS,
+			  MDIO_GP_STATUS_TOP_AN_STATUS1,
+			  &gp_status);
+	/* link is up only if both local phy and external phy are up */
+	if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS))
+		return -ESRCH;
+	}
+	/* In XGXS loopback mode, do not check external PHY */
+	if (params->loopback_mode == LOOPBACK_XGXS)
+		return 0;
+
+	switch (params->num_phys) {
+	case 1:
+		/* No external PHY */
+		return 0;
+	case 2:
+		ext_phy_link_up = params->phy[EXT_PHY1].read_status(
+			&params->phy[EXT_PHY1],
+			params, &temp_vars);
+		break;
+	case 3: /* Dual Media */
+		for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+		      phy_index++) {
+			serdes_phy_type = ((params->phy[phy_index].media_type ==
+					    ETH_PHY_SFP_FIBER) ||
+					   (params->phy[phy_index].media_type ==
+					    ETH_PHY_XFP_FIBER) ||
+					   (params->phy[phy_index].media_type ==
+					    ETH_PHY_DA_TWINAX));
+
+			if (is_serdes != serdes_phy_type)
+				continue;
+			if (params->phy[phy_index].read_status) {
+				ext_phy_link_up |=
+					params->phy[phy_index].read_status(
+						&params->phy[phy_index],
+						params, &temp_vars);
+			}
+		}
+		break;
+	}
+	if (ext_phy_link_up)
+		return 0;
+	return -ESRCH;
+}
+
+static int bnx2x_link_initialize(struct link_params *params,
+				 struct link_vars *vars)
+{
+	int rc = 0;
+	u8 phy_index, non_ext_phy;
+	struct bnx2x *bp = params->bp;
+	/*
+	 * In case of external phy existence, the line speed would be the
+	 * line speed linked up by the external phy. In case it is direct
+	 * only, then the line_speed during initialization will be
+	 * equal to the req_line_speed
+	 */
+	vars->line_speed = params->phy[INT_PHY].req_line_speed;
+
+	/*
+	 * Initialize the internal phy in case this is a direct board
+	 * (no external phys), or this board has external phy which requires
+	 * to first.
+	 */
+	if (!USES_WARPCORE(bp))
+		bnx2x_prepare_xgxs(&params->phy[INT_PHY], params, vars);
+	/* init ext phy and enable link state int */
+	non_ext_phy = (SINGLE_MEDIA_DIRECT(params) ||
+		       (params->loopback_mode == LOOPBACK_XGXS));
+
+	if (non_ext_phy ||
+	    (params->phy[EXT_PHY1].flags & FLAGS_INIT_XGXS_FIRST) ||
+	    (params->loopback_mode == LOOPBACK_EXT_PHY)) {
+		struct bnx2x_phy *phy = &params->phy[INT_PHY];
+		if (vars->line_speed == SPEED_AUTO_NEG &&
+		    (CHIP_IS_E1x(bp) ||
+		     CHIP_IS_E2(bp)))
+			bnx2x_set_parallel_detection(phy, params);
+			if (params->phy[INT_PHY].config_init)
+				params->phy[INT_PHY].config_init(phy,
+								 params,
+								 vars);
+	}
+
+	/* Init external phy*/
+	if (non_ext_phy) {
+		if (params->phy[INT_PHY].supported &
+		    SUPPORTED_FIBRE)
+			vars->link_status |= LINK_STATUS_SERDES_LINK;
+	} else {
+		for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+		      phy_index++) {
+			/*
+			 * No need to initialize second phy in case of first
+			 * phy only selection. In case of second phy, we do
+			 * need to initialize the first phy, since they are
+			 * connected.
+			 */
+			if (params->phy[phy_index].supported &
+			    SUPPORTED_FIBRE)
+				vars->link_status |= LINK_STATUS_SERDES_LINK;
+
+			if (phy_index == EXT_PHY2 &&
+			    (bnx2x_phy_selection(params) ==
+			     PORT_HW_CFG_PHY_SELECTION_FIRST_PHY)) {
+				DP(NETIF_MSG_LINK, "Not initializing"
+						" second phy\n");
+				continue;
+			}
+			params->phy[phy_index].config_init(
+				&params->phy[phy_index],
+				params, vars);
+		}
+	}
+	/* Reset the interrupt indication after phy was initialized */
+	bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 +
+		       params->port*4,
+		       (NIG_STATUS_XGXS0_LINK10G |
+			NIG_STATUS_XGXS0_LINK_STATUS |
+			NIG_STATUS_SERDES0_LINK_STATUS |
+			NIG_MASK_MI_INT));
+	bnx2x_update_mng(params, vars->link_status);
+	return rc;
+}
+
+static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
+				 struct link_params *params)
+{
+	/* reset the SerDes/XGXS */
+	REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
+	       (0x1ff << (params->port*16)));
+}
+
+static void bnx2x_common_ext_link_reset(struct bnx2x_phy *phy,
+					struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u8 gpio_port;
+	/* HW reset */
+	if (CHIP_IS_E2(bp))
+		gpio_port = BP_PATH(bp);
+	else
+		gpio_port = params->port;
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW,
+		       gpio_port);
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW,
+		       gpio_port);
+	DP(NETIF_MSG_LINK, "reset external PHY\n");
+}
+
+static int bnx2x_update_link_down(struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+
+	DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
+	bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
+	vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG;
+	/* indicate no mac active */
+	vars->mac_type = MAC_TYPE_NONE;
+
+	/* update shared memory */
+	vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK |
+			       LINK_STATUS_LINK_UP |
+			       LINK_STATUS_PHYSICAL_LINK_FLAG |
+			       LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
+			       LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
+			       LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
+			       LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK);
+	vars->line_speed = 0;
+	bnx2x_update_mng(params, vars->link_status);
+
+	/* activate nig drain */
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+	/* disable emac */
+	if (!CHIP_IS_E3(bp))
+		REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+	msleep(10);
+	/* reset BigMac/Xmac */
+	if (CHIP_IS_E1x(bp) ||
+	    CHIP_IS_E2(bp)) {
+		bnx2x_bmac_rx_disable(bp, params->port);
+		REG_WR(bp, GRCBASE_MISC +
+		       MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+	}
+	if (CHIP_IS_E3(bp))
+		bnx2x_xmac_disable(params);
+
+	return 0;
+}
+
+static int bnx2x_update_link_up(struct link_params *params,
+				struct link_vars *vars,
+				u8 link_10g)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port = params->port;
+	int rc = 0;
+
+	vars->link_status |= (LINK_STATUS_LINK_UP |
+			      LINK_STATUS_PHYSICAL_LINK_FLAG);
+	vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
+
+	if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
+		vars->link_status |=
+			LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
+
+	if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
+		vars->link_status |=
+			LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
+	if (USES_WARPCORE(bp)) {
+		if (link_10g) {
+			if (bnx2x_xmac_enable(params, vars, 0) ==
+			    -ESRCH) {
+				DP(NETIF_MSG_LINK, "Found errors on XMAC\n");
+				vars->link_up = 0;
+				vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+				vars->link_status &= ~LINK_STATUS_LINK_UP;
+			}
+		} else
+			bnx2x_umac_enable(params, vars, 0);
+		bnx2x_set_led(params, vars,
+			      LED_MODE_OPER, vars->line_speed);
+	}
+	if ((CHIP_IS_E1x(bp) ||
+	     CHIP_IS_E2(bp))) {
+		if (link_10g) {
+			if (bnx2x_bmac_enable(params, vars, 0) ==
+			    -ESRCH) {
+				DP(NETIF_MSG_LINK, "Found errors on BMAC\n");
+				vars->link_up = 0;
+				vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+				vars->link_status &= ~LINK_STATUS_LINK_UP;
+			}
+
+			bnx2x_set_led(params, vars,
+				      LED_MODE_OPER, SPEED_10000);
+		} else {
+			rc = bnx2x_emac_program(params, vars);
+			bnx2x_emac_enable(params, vars, 0);
+
+			/* AN complete? */
+			if ((vars->link_status &
+			     LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
+			    && (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
+			    SINGLE_MEDIA_DIRECT(params))
+				bnx2x_set_gmii_tx_driver(params);
+		}
+	}
+
+	/* PBF - link up */
+	if (CHIP_IS_E1x(bp))
+		rc |= bnx2x_pbf_update(params, vars->flow_ctrl,
+				       vars->line_speed);
+
+	/* disable drain */
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
+
+	/* update shared memory */
+	bnx2x_update_mng(params, vars->link_status);
+	msleep(20);
+	return rc;
+}
+/*
+ * The bnx2x_link_update function should be called upon link
+ * interrupt.
+ * Link is considered up as follows:
+ * - DIRECT_SINGLE_MEDIA - Only XGXS link (internal link) needs
+ *   to be up
+ * - SINGLE_MEDIA - The link between the 577xx and the external
+ *   phy (XGXS) need to up as well as the external link of the
+ *   phy (PHY_EXT1)
+ * - DUAL_MEDIA - The link between the 577xx and the first
+ *   external phy needs to be up, and at least one of the 2
+ *   external phy link must be up.
+ */
+int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	struct link_vars phy_vars[MAX_PHYS];
+	u8 port = params->port;
+	u8 link_10g_plus, phy_index;
+	u8 ext_phy_link_up = 0, cur_link_up;
+	int rc = 0;
+	u8 is_mi_int = 0;
+	u16 ext_phy_line_speed = 0, prev_line_speed = vars->line_speed;
+	u8 active_external_phy = INT_PHY;
+	vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+	for (phy_index = INT_PHY; phy_index < params->num_phys;
+	      phy_index++) {
+		phy_vars[phy_index].flow_ctrl = 0;
+		phy_vars[phy_index].link_status = 0;
+		phy_vars[phy_index].line_speed = 0;
+		phy_vars[phy_index].duplex = DUPLEX_FULL;
+		phy_vars[phy_index].phy_link_up = 0;
+		phy_vars[phy_index].link_up = 0;
+		phy_vars[phy_index].fault_detected = 0;
+	}
+
+	if (USES_WARPCORE(bp))
+		bnx2x_set_aer_mmd(params, &params->phy[INT_PHY]);
+
+	DP(NETIF_MSG_LINK, "port %x, XGXS?%x, int_status 0x%x\n",
+		 port, (vars->phy_flags & PHY_XGXS_FLAG),
+		 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4));
+
+	is_mi_int = (u8)(REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT +
+				port*0x18) > 0);
+	DP(NETIF_MSG_LINK, "int_mask 0x%x MI_INT %x, SERDES_LINK %x\n",
+		 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
+		 is_mi_int,
+		 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c));
+
+	DP(NETIF_MSG_LINK, " 10G %x, XGXS_LINK %x\n",
+	  REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
+	  REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68));
+
+	/* disable emac */
+	if (!CHIP_IS_E3(bp))
+		REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+	/*
+	 * Step 1:
+	 * Check external link change only for external phys, and apply
+	 * priority selection between them in case the link on both phys
+	 * is up. Note that instead of the common vars, a temporary
+	 * vars argument is used since each phy may have different link/
+	 * speed/duplex result
+	 */
+	for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+	      phy_index++) {
+		struct bnx2x_phy *phy = &params->phy[phy_index];
+		if (!phy->read_status)
+			continue;
+		/* Read link status and params of this ext phy */
+		cur_link_up = phy->read_status(phy, params,
+					       &phy_vars[phy_index]);
+		if (cur_link_up) {
+			DP(NETIF_MSG_LINK, "phy in index %d link is up\n",
+				   phy_index);
+		} else {
+			DP(NETIF_MSG_LINK, "phy in index %d link is down\n",
+				   phy_index);
+			continue;
+		}
+
+		if (!ext_phy_link_up) {
+			ext_phy_link_up = 1;
+			active_external_phy = phy_index;
+		} else {
+			switch (bnx2x_phy_selection(params)) {
+			case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+			case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+			/*
+			 * In this option, the first PHY makes sure to pass the
+			 * traffic through itself only.
+			 * Its not clear how to reset the link on the second phy
+			 */
+				active_external_phy = EXT_PHY1;
+				break;
+			case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+			/*
+			 * In this option, the first PHY makes sure to pass the
+			 * traffic through the second PHY.
+			 */
+				active_external_phy = EXT_PHY2;
+				break;
+			default:
+			/*
+			 * Link indication on both PHYs with the following cases
+			 * is invalid:
+			 * - FIRST_PHY means that second phy wasn't initialized,
+			 * hence its link is expected to be down
+			 * - SECOND_PHY means that first phy should not be able
+			 * to link up by itself (using configuration)
+			 * - DEFAULT should be overriden during initialiazation
+			 */
+				DP(NETIF_MSG_LINK, "Invalid link indication"
+					   "mpc=0x%x. DISABLING LINK !!!\n",
+					   params->multi_phy_config);
+				ext_phy_link_up = 0;
+				break;
+			}
+		}
+	}
+	prev_line_speed = vars->line_speed;
+	/*
+	 * Step 2:
+	 * Read the status of the internal phy. In case of
+	 * DIRECT_SINGLE_MEDIA board, this link is the external link,
+	 * otherwise this is the link between the 577xx and the first
+	 * external phy
+	 */
+	if (params->phy[INT_PHY].read_status)
+		params->phy[INT_PHY].read_status(
+			&params->phy[INT_PHY],
+			params, vars);
+	/*
+	 * The INT_PHY flow control reside in the vars. This include the
+	 * case where the speed or flow control are not set to AUTO.
+	 * Otherwise, the active external phy flow control result is set
+	 * to the vars. The ext_phy_line_speed is needed to check if the
+	 * speed is different between the internal phy and external phy.
+	 * This case may be result of intermediate link speed change.
+	 */
+	if (active_external_phy > INT_PHY) {
+		vars->flow_ctrl = phy_vars[active_external_phy].flow_ctrl;
+		/*
+		 * Link speed is taken from the XGXS. AN and FC result from
+		 * the external phy.
+		 */
+		vars->link_status |= phy_vars[active_external_phy].link_status;
+
+		/*
+		 * if active_external_phy is first PHY and link is up - disable
+		 * disable TX on second external PHY
+		 */
+		if (active_external_phy == EXT_PHY1) {
+			if (params->phy[EXT_PHY2].phy_specific_func) {
+				DP(NETIF_MSG_LINK, "Disabling TX on"
+						   " EXT_PHY2\n");
+				params->phy[EXT_PHY2].phy_specific_func(
+					&params->phy[EXT_PHY2],
+					params, DISABLE_TX);
+			}
+		}
+
+		ext_phy_line_speed = phy_vars[active_external_phy].line_speed;
+		vars->duplex = phy_vars[active_external_phy].duplex;
+		if (params->phy[active_external_phy].supported &
+		    SUPPORTED_FIBRE)
+			vars->link_status |= LINK_STATUS_SERDES_LINK;
+		else
+			vars->link_status &= ~LINK_STATUS_SERDES_LINK;
+		DP(NETIF_MSG_LINK, "Active external phy selected: %x\n",
+			   active_external_phy);
+	}
+
+	for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+	      phy_index++) {
+		if (params->phy[phy_index].flags &
+		    FLAGS_REARM_LATCH_SIGNAL) {
+			bnx2x_rearm_latch_signal(bp, port,
+						 phy_index ==
+						 active_external_phy);
+			break;
+		}
+	}
+	DP(NETIF_MSG_LINK, "vars->flow_ctrl = 0x%x, vars->link_status = 0x%x,"
+		   " ext_phy_line_speed = %d\n", vars->flow_ctrl,
+		   vars->link_status, ext_phy_line_speed);
+	/*
+	 * Upon link speed change set the NIG into drain mode. Comes to
+	 * deals with possible FIFO glitch due to clk change when speed
+	 * is decreased without link down indicator
+	 */
+
+	if (vars->phy_link_up) {
+		if (!(SINGLE_MEDIA_DIRECT(params)) && ext_phy_link_up &&
+		    (ext_phy_line_speed != vars->line_speed)) {
+			DP(NETIF_MSG_LINK, "Internal link speed %d is"
+				   " different than the external"
+				   " link speed %d\n", vars->line_speed,
+				   ext_phy_line_speed);
+			vars->phy_link_up = 0;
+		} else if (prev_line_speed != vars->line_speed) {
+			REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4,
+			       0);
+			msleep(1);
+		}
+	}
+
+	/* anything 10 and over uses the bmac */
+	link_10g_plus = (vars->line_speed >= SPEED_10000);
+
+	bnx2x_link_int_ack(params, vars, link_10g_plus);
+
+	/*
+	 * In case external phy link is up, and internal link is down
+	 * (not initialized yet probably after link initialization, it
+	 * needs to be initialized.
+	 * Note that after link down-up as result of cable plug, the xgxs
+	 * link would probably become up again without the need
+	 * initialize it
+	 */
+	if (!(SINGLE_MEDIA_DIRECT(params))) {
+		DP(NETIF_MSG_LINK, "ext_phy_link_up = %d, int_link_up = %d,"
+			   " init_preceding = %d\n", ext_phy_link_up,
+			   vars->phy_link_up,
+			   params->phy[EXT_PHY1].flags &
+			   FLAGS_INIT_XGXS_FIRST);
+		if (!(params->phy[EXT_PHY1].flags &
+		      FLAGS_INIT_XGXS_FIRST)
+		    && ext_phy_link_up && !vars->phy_link_up) {
+			vars->line_speed = ext_phy_line_speed;
+			if (vars->line_speed < SPEED_1000)
+				vars->phy_flags |= PHY_SGMII_FLAG;
+			else
+				vars->phy_flags &= ~PHY_SGMII_FLAG;
+
+			if (params->phy[INT_PHY].config_init)
+				params->phy[INT_PHY].config_init(
+					&params->phy[INT_PHY], params,
+						vars);
+		}
+	}
+	/*
+	 * Link is up only if both local phy and external phy (in case of
+	 * non-direct board) are up and no fault detected on active PHY.
+	 */
+	vars->link_up = (vars->phy_link_up &&
+			 (ext_phy_link_up ||
+			  SINGLE_MEDIA_DIRECT(params)) &&
+			 (phy_vars[active_external_phy].fault_detected == 0));
+
+	if (vars->link_up)
+		rc = bnx2x_update_link_up(params, vars, link_10g_plus);
+	else
+		rc = bnx2x_update_link_down(params, vars);
+
+	return rc;
+}
+
+
+/*****************************************************************************/
+/*			    External Phy section			     */
+/*****************************************************************************/
+void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port)
+{
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+	msleep(1);
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, port);
+}
+
+static void bnx2x_save_spirom_version(struct bnx2x *bp, u8 port,
+				      u32 spirom_ver, u32 ver_addr)
+{
+	DP(NETIF_MSG_LINK, "FW version 0x%x:0x%x for port %d\n",
+		 (u16)(spirom_ver>>16), (u16)spirom_ver, port);
+
+	if (ver_addr)
+		REG_WR(bp, ver_addr, spirom_ver);
+}
+
+static void bnx2x_save_bcm_spirom_ver(struct bnx2x *bp,
+				      struct bnx2x_phy *phy,
+				      u8 port)
+{
+	u16 fw_ver1, fw_ver2;
+
+	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_ROM_VER2, &fw_ver2);
+	bnx2x_save_spirom_version(bp, port, (u32)(fw_ver1<<16 | fw_ver2),
+				  phy->ver_addr);
+}
+
+static void bnx2x_ext_phy_10G_an_resolve(struct bnx2x *bp,
+				       struct bnx2x_phy *phy,
+				       struct link_vars *vars)
+{
+	u16 val;
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD,
+			MDIO_AN_REG_STATUS, &val);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD,
+			MDIO_AN_REG_STATUS, &val);
+	if (val & (1<<5))
+		vars->link_status |= LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+	if ((val & (1<<0)) == 0)
+		vars->link_status |= LINK_STATUS_PARALLEL_DETECTION_USED;
+}
+
+/******************************************************************/
+/*		common BCM8073/BCM8727 PHY SECTION		  */
+/******************************************************************/
+static void bnx2x_8073_resolve_fc(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	if (phy->req_line_speed == SPEED_10 ||
+	    phy->req_line_speed == SPEED_100) {
+		vars->flow_ctrl = phy->req_flow_ctrl;
+		return;
+	}
+
+	if (bnx2x_ext_phy_resolve_fc(phy, params, vars) &&
+	    (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE)) {
+		u16 pause_result;
+		u16 ld_pause;		/* local */
+		u16 lp_pause;		/* link partner */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_CL37_FC_LD, &ld_pause);
+
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_CL37_FC_LP, &lp_pause);
+		pause_result = (ld_pause &
+				MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 5;
+		pause_result |= (lp_pause &
+				 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
+
+		bnx2x_pause_resolve(vars, pause_result);
+		DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
+			   pause_result);
+	}
+}
+static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp,
+					     struct bnx2x_phy *phy,
+					     u8 port)
+{
+	u32 count = 0;
+	u16 fw_ver1, fw_msgout;
+	int rc = 0;
+
+	/* Boot port from external ROM  */
+	/* EDC grst */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 0x0001);
+
+	/* ucode reboot and rst */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 0x008c);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+
+	/* Reset internal microprocessor */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+
+	/* Release srst bit */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+
+	/* Delay 100ms per the PHY specifications */
+	msleep(100);
+
+	/* 8073 sometimes taking longer to download */
+	do {
+		count++;
+		if (count > 300) {
+			DP(NETIF_MSG_LINK,
+				 "bnx2x_8073_8727_external_rom_boot port %x:"
+				 "Download failed. fw version = 0x%x\n",
+				 port, fw_ver1);
+			rc = -EINVAL;
+			break;
+		}
+
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_ROM_VER1, &fw_ver1);
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout);
+
+		msleep(1);
+	} while (fw_ver1 == 0 || fw_ver1 == 0x4321 ||
+			((fw_msgout & 0xff) != 0x03 && (phy->type ==
+			PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073)));
+
+	/* Clear ser_boot_ctl bit */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+	bnx2x_save_bcm_spirom_ver(bp, phy, port);
+
+	DP(NETIF_MSG_LINK,
+		 "bnx2x_8073_8727_external_rom_boot port %x:"
+		 "Download complete. fw version = 0x%x\n",
+		 port, fw_ver1);
+
+	return rc;
+}
+
+/******************************************************************/
+/*			BCM8073 PHY SECTION			  */
+/******************************************************************/
+static int bnx2x_8073_is_snr_needed(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+	/* This is only required for 8073A1, version 102 only */
+	u16 val;
+
+	/* Read 8073 HW revision*/
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8073_CHIP_REV, &val);
+
+	if (val != 1) {
+		/* No need to workaround in 8073 A1 */
+		return 0;
+	}
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_ROM_VER2, &val);
+
+	/* SNR should be applied only for version 0x102 */
+	if (val != 0x102)
+		return 0;
+
+	return 1;
+}
+
+static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+	u16 val, cnt, cnt1 ;
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8073_CHIP_REV, &val);
+
+	if (val > 0) {
+		/* No need to workaround in 8073 A1 */
+		return 0;
+	}
+	/* XAUI workaround in 8073 A0: */
+
+	/*
+	 * After loading the boot ROM and restarting Autoneg, poll
+	 * Dev1, Reg $C820:
+	 */
+
+	for (cnt = 0; cnt < 1000; cnt++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+				&val);
+		  /*
+		   * If bit [14] = 0 or bit [13] = 0, continue on with
+		   * system initialization (XAUI work-around not required, as
+		   * these bits indicate 2.5G or 1G link up).
+		   */
+		if (!(val & (1<<14)) || !(val & (1<<13))) {
+			DP(NETIF_MSG_LINK, "XAUI work-around not required\n");
+			return 0;
+		} else if (!(val & (1<<15))) {
+			DP(NETIF_MSG_LINK, "bit 15 went off\n");
+			/*
+			 * If bit 15 is 0, then poll Dev1, Reg $C841 until it's
+			 * MSB (bit15) goes to 1 (indicating that the XAUI
+			 * workaround has completed), then continue on with
+			 * system initialization.
+			 */
+			for (cnt1 = 0; cnt1 < 1000; cnt1++) {
+				bnx2x_cl45_read(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8073_XAUI_WA, &val);
+				if (val & (1<<15)) {
+					DP(NETIF_MSG_LINK,
+					  "XAUI workaround has completed\n");
+					return 0;
+				 }
+				 msleep(3);
+			}
+			break;
+		}
+		msleep(3);
+	}
+	DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n");
+	return -EINVAL;
+}
+
+static void bnx2x_807x_force_10G(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+	/* Force KR or KX */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0x000b);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0000);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+}
+
+static void bnx2x_8073_set_pause_cl37(struct link_params *params,
+				      struct bnx2x_phy *phy,
+				      struct link_vars *vars)
+{
+	u16 cl37_val;
+	struct bnx2x *bp = params->bp;
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &cl37_val);
+
+	cl37_val &= ~MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+	/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+	bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) ==
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC) {
+		cl37_val |=  MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC;
+	}
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) {
+		cl37_val |=  MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
+	}
+	if ((vars->ieee_fc &
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+	    MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) {
+		cl37_val |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
+	}
+	DP(NETIF_MSG_LINK,
+		 "Ext phy AN advertize cl37 0x%x\n", cl37_val);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, cl37_val);
+	msleep(500);
+}
+
+static int bnx2x_8073_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val = 0, tmp1;
+	u8 gpio_port;
+	DP(NETIF_MSG_LINK, "Init 8073\n");
+
+	if (CHIP_IS_E2(bp))
+		gpio_port = BP_PATH(bp);
+	else
+		gpio_port = params->port;
+	/* Restore normal power mode*/
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port);
+
+	/* enable LASI */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2));
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,  0x0004);
+
+	bnx2x_8073_set_pause_cl37(params, phy, vars);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
+
+	DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
+
+	/* Swap polarity if required - Must be done only in non-1G mode */
+	if (params->lane_config & PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+		/* Configure the 8073 to swap _P and _N of the KR lines */
+		DP(NETIF_MSG_LINK, "Swapping polarity for the 8073\n");
+		/* 10G Rx/Tx and 1G Tx signal polarity swap */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL, &val);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL,
+				 (val | (3<<9)));
+	}
+
+
+	/* Enable CL37 BAM */
+	if (REG_RD(bp, params->shmem_base +
+			 offsetof(struct shmem_region, dev_info.
+				  port_hw_config[params->port].default_cfg)) &
+	    PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
+
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_8073_BAM, &val);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD,
+				 MDIO_AN_REG_8073_BAM, val | 1);
+		DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+	}
+	if (params->loopback_mode == LOOPBACK_EXT) {
+		bnx2x_807x_force_10G(bp, phy);
+		DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
+		return 0;
+	} else {
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_BCM_CTRL, 0x0002);
+	}
+	if (phy->req_line_speed != SPEED_AUTO_NEG) {
+		if (phy->req_line_speed == SPEED_10000) {
+			val = (1<<7);
+		} else if (phy->req_line_speed ==  SPEED_2500) {
+			val = (1<<5);
+			/*
+			 * Note that 2.5G works only when used with 1G
+			 * advertisement
+			 */
+		} else
+			val = (1<<5);
+	} else {
+		val = 0;
+		if (phy->speed_cap_mask &
+			PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+			val |= (1<<7);
+
+		/* Note that 2.5G works only when used with 1G advertisement */
+		if (phy->speed_cap_mask &
+			(PORT_HW_CFG_SPEED_CAPABILITY_D0_1G |
+			 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+			val |= (1<<5);
+		DP(NETIF_MSG_LINK, "807x autoneg val = 0x%x\n", val);
+	}
+
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV, val);
+	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, &tmp1);
+
+	if (((phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) &&
+	     (phy->req_line_speed == SPEED_AUTO_NEG)) ||
+	    (phy->req_line_speed == SPEED_2500)) {
+		u16 phy_ver;
+		/* Allow 2.5G for A1 and above */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_CHIP_REV,
+				&phy_ver);
+		DP(NETIF_MSG_LINK, "Add 2.5G\n");
+		if (phy_ver > 0)
+			tmp1 |= 1;
+		else
+			tmp1 &= 0xfffe;
+	} else {
+		DP(NETIF_MSG_LINK, "Disable 2.5G\n");
+		tmp1 &= 0xfffe;
+	}
+
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_8073_2_5G, tmp1);
+	/* Add support for CL37 (passive mode) II */
+
+	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, &tmp1);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD,
+			 (tmp1 | ((phy->req_duplex == DUPLEX_FULL) ?
+				  0x20 : 0x40)));
+
+	/* Add support for CL37 (passive mode) III */
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+
+	/*
+	 * The SNR will improve about 2db by changing BW and FEE main
+	 * tap. Rest commands are executed after link is up
+	 * Change FFE main cursor to 5 in EDC register
+	 */
+	if (bnx2x_8073_is_snr_needed(bp, phy))
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_EDC_FFE_MAIN,
+				 0xFB0C);
+
+	/* Enable FEC (Forware Error Correction) Request in the AN */
+	bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, &tmp1);
+	tmp1 |= (1<<15);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV2, tmp1);
+
+	bnx2x_ext_phy_set_pause(params, phy, vars);
+
+	/* Restart autoneg */
+	msleep(500);
+	bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+	DP(NETIF_MSG_LINK, "807x Autoneg Restart: Advertise 1G=%x, 10G=%x\n",
+		   ((val & (1<<5)) > 0), ((val & (1<<7)) > 0));
+	return 0;
+}
+
+static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
+				 struct link_params *params,
+				 struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 link_up = 0;
+	u16 val1, val2;
+	u16 link_status = 0;
+	u16 an1000_status = 0;
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+
+	DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1);
+
+	/* clear the interrupt LASI status register */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val1);
+	DP(NETIF_MSG_LINK, "807x PCS status 0x%x->0x%x\n", val2, val1);
+	/* Clear MSG-OUT */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
+
+	/* Check the LASI */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+
+	DP(NETIF_MSG_LINK, "KR 0x9003 0x%x\n", val2);
+
+	/* Check the link status */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2);
+	DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+	link_up = ((val1 & 4) == 4);
+	DP(NETIF_MSG_LINK, "PMA_REG_STATUS=0x%x\n", val1);
+
+	if (link_up &&
+	     ((phy->req_line_speed != SPEED_10000))) {
+		if (bnx2x_8073_xaui_wa(bp, phy) != 0)
+			return 0;
+	}
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &an1000_status);
+
+	/* Check the link status on 1.1.2 */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+	DP(NETIF_MSG_LINK, "KR PMA status 0x%x->0x%x,"
+		   "an_link_status=0x%x\n", val2, val1, an1000_status);
+
+	link_up = (((val1 & 4) == 4) || (an1000_status & (1<<1)));
+	if (link_up && bnx2x_8073_is_snr_needed(bp, phy)) {
+		/*
+		 * The SNR will improve about 2dbby changing the BW and FEE main
+		 * tap. The 1st write to change FFE main tap is set before
+		 * restart AN. Change PLL Bandwidth in EDC register
+		 */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_PLL_BANDWIDTH,
+				 0x26BC);
+
+		/* Change CDR Bandwidth in EDC register */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_CDR_BANDWIDTH,
+				 0x0333);
+	}
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_8073_SPEED_LINK_STATUS,
+			&link_status);
+
+	/* Bits 0..2 --> speed detected, bits 13..15--> link is down */
+	if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
+		link_up = 1;
+		vars->line_speed = SPEED_10000;
+		DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+			   params->port);
+	} else if ((link_status & (1<<1)) && (!(link_status & (1<<14)))) {
+		link_up = 1;
+		vars->line_speed = SPEED_2500;
+		DP(NETIF_MSG_LINK, "port %x: External link up in 2.5G\n",
+			   params->port);
+	} else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
+		link_up = 1;
+		vars->line_speed = SPEED_1000;
+		DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
+			   params->port);
+	} else {
+		link_up = 0;
+		DP(NETIF_MSG_LINK, "port %x: External link is down\n",
+			   params->port);
+	}
+
+	if (link_up) {
+		/* Swap polarity if required */
+		if (params->lane_config &
+		    PORT_HW_CFG_SWAP_PHY_POLARITY_ENABLED) {
+			/* Configure the 8073 to swap P and N of the KR lines */
+			bnx2x_cl45_read(bp, phy,
+					MDIO_XS_DEVAD,
+					MDIO_XS_REG_8073_RX_CTRL_PCIE, &val1);
+			/*
+			 * Set bit 3 to invert Rx in 1G mode and clear this bit
+			 * when it`s in 10G mode.
+			 */
+			if (vars->line_speed == SPEED_1000) {
+				DP(NETIF_MSG_LINK, "Swapping 1G polarity for"
+					      "the 8073\n");
+				val1 |= (1<<3);
+			} else
+				val1 &= ~(1<<3);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_XS_DEVAD,
+					 MDIO_XS_REG_8073_RX_CTRL_PCIE,
+					 val1);
+		}
+		bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+		bnx2x_8073_resolve_fc(phy, params, vars);
+		vars->duplex = DUPLEX_FULL;
+	}
+	return link_up;
+}
+
+static void bnx2x_8073_link_reset(struct bnx2x_phy *phy,
+				  struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u8 gpio_port;
+	if (CHIP_IS_E2(bp))
+		gpio_port = BP_PATH(bp);
+	else
+		gpio_port = params->port;
+	DP(NETIF_MSG_LINK, "Setting 8073 port %d into low power mode\n",
+	   gpio_port);
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW,
+		       gpio_port);
+}
+
+/******************************************************************/
+/*			BCM8705 PHY SECTION			  */
+/******************************************************************/
+static int bnx2x_8705_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "init 8705\n");
+	/* Restore normal power mode*/
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+	/* HW reset */
+	bnx2x_ext_phy_hw_reset(bp, params->port);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_MISC_CTRL, 0x8288);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, 0x7fbf);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_CMU_PLL_BYPASS, 0x0100);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_CNTL, 0x1);
+	/* BCM8705 doesn't have microcode, hence the 0 */
+	bnx2x_save_spirom_version(bp, params->port, params->shmem_base, 0);
+	return 0;
+}
+
+static u8 bnx2x_8705_read_status(struct bnx2x_phy *phy,
+				 struct link_params *params,
+				 struct link_vars *vars)
+{
+	u8 link_up = 0;
+	u16 val1, rx_sd;
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "read status 8705\n");
+	bnx2x_cl45_read(bp, phy,
+		      MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+	DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+	bnx2x_cl45_read(bp, phy,
+		      MDIO_WIS_DEVAD, MDIO_WIS_REG_LASI_STATUS, &val1);
+	DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
+
+	bnx2x_cl45_read(bp, phy,
+		      MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+
+	bnx2x_cl45_read(bp, phy,
+		      MDIO_PMA_DEVAD, 0xc809, &val1);
+	bnx2x_cl45_read(bp, phy,
+		      MDIO_PMA_DEVAD, 0xc809, &val1);
+
+	DP(NETIF_MSG_LINK, "8705 1.c809 val=0x%x\n", val1);
+	link_up = ((rx_sd & 0x1) && (val1 & (1<<9)) && ((val1 & (1<<8)) == 0));
+	if (link_up) {
+		vars->line_speed = SPEED_10000;
+		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+	}
+	return link_up;
+}
+
+/******************************************************************/
+/*			SFP+ module Section			  */
+/******************************************************************/
+static void bnx2x_set_disable_pmd_transmit(struct link_params *params,
+					   struct bnx2x_phy *phy,
+					   u8 pmd_dis)
+{
+	struct bnx2x *bp = params->bp;
+	/*
+	 * Disable transmitter only for bootcodes which can enable it afterwards
+	 * (for D3 link)
+	 */
+	if (pmd_dis) {
+		if (params->feature_config_flags &
+		     FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED)
+			DP(NETIF_MSG_LINK, "Disabling PMD transmitter\n");
+		else {
+			DP(NETIF_MSG_LINK, "NOT disabling PMD transmitter\n");
+			return;
+		}
+	} else
+		DP(NETIF_MSG_LINK, "Enabling PMD transmitter\n");
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_TX_DISABLE, pmd_dis);
+}
+
+static u8 bnx2x_get_gpio_port(struct link_params *params)
+{
+	u8 gpio_port;
+	u32 swap_val, swap_override;
+	struct bnx2x *bp = params->bp;
+	if (CHIP_IS_E2(bp))
+		gpio_port = BP_PATH(bp);
+	else
+		gpio_port = params->port;
+	swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+	swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+	return gpio_port ^ (swap_val && swap_override);
+}
+
+static void bnx2x_sfp_e1e2_set_transmitter(struct link_params *params,
+					   struct bnx2x_phy *phy,
+					   u8 tx_en)
+{
+	u16 val;
+	u8 port = params->port;
+	struct bnx2x *bp = params->bp;
+	u32 tx_en_mode;
+
+	/* Disable/Enable transmitter ( TX laser of the SFP+ module.)*/
+	tx_en_mode = REG_RD(bp, params->shmem_base +
+			    offsetof(struct shmem_region,
+				     dev_info.port_hw_config[port].sfp_ctrl)) &
+		PORT_HW_CFG_TX_LASER_MASK;
+	DP(NETIF_MSG_LINK, "Setting transmitter tx_en=%x for port %x "
+			   "mode = %x\n", tx_en, port, tx_en_mode);
+	switch (tx_en_mode) {
+	case PORT_HW_CFG_TX_LASER_MDIO:
+
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_PHY_IDENTIFIER,
+				&val);
+
+		if (tx_en)
+			val &= ~(1<<15);
+		else
+			val |= (1<<15);
+
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_PHY_IDENTIFIER,
+				 val);
+	break;
+	case PORT_HW_CFG_TX_LASER_GPIO0:
+	case PORT_HW_CFG_TX_LASER_GPIO1:
+	case PORT_HW_CFG_TX_LASER_GPIO2:
+	case PORT_HW_CFG_TX_LASER_GPIO3:
+	{
+		u16 gpio_pin;
+		u8 gpio_port, gpio_mode;
+		if (tx_en)
+			gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_HIGH;
+		else
+			gpio_mode = MISC_REGISTERS_GPIO_OUTPUT_LOW;
+
+		gpio_pin = tx_en_mode - PORT_HW_CFG_TX_LASER_GPIO0;
+		gpio_port = bnx2x_get_gpio_port(params);
+		bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+		break;
+	}
+	default:
+		DP(NETIF_MSG_LINK, "Invalid TX_LASER_MDIO 0x%x\n", tx_en_mode);
+		break;
+	}
+}
+
+static void bnx2x_sfp_set_transmitter(struct link_params *params,
+				      struct bnx2x_phy *phy,
+				      u8 tx_en)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Setting SFP+ transmitter to %d\n", tx_en);
+	if (CHIP_IS_E3(bp))
+		bnx2x_sfp_e3_set_transmitter(params, phy, tx_en);
+	else
+		bnx2x_sfp_e1e2_set_transmitter(params, phy, tx_en);
+}
+
+static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+					     struct link_params *params,
+					     u16 addr, u8 byte_cnt, u8 *o_buf)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val = 0;
+	u16 i;
+	if (byte_cnt > 16) {
+		DP(NETIF_MSG_LINK, "Reading from eeprom is"
+			    " is limited to 0xf\n");
+		return -EINVAL;
+	}
+	/* Set the read command byte count */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+			 (byte_cnt | 0xa000));
+
+	/* Set the read command address */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+			 addr);
+
+	/* Activate read command */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+			 0x2c0f);
+
+	/* Wait up to 500us for command complete status */
+	for (i = 0; i < 100; i++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
+			break;
+		udelay(5);
+	}
+
+	if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
+		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
+		DP(NETIF_MSG_LINK,
+			 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
+			 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
+		return -EINVAL;
+	}
+
+	/* Read the buffer */
+	for (i = 0; i < byte_cnt; i++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF + i, &val);
+		o_buf[i] = (u8)(val & MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK);
+	}
+
+	for (i = 0; i < 100; i++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
+			return 0;
+		msleep(1);
+	}
+	return -EINVAL;
+}
+
+static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+						 struct link_params *params,
+						 u16 addr, u8 byte_cnt,
+						 u8 *o_buf)
+{
+	int rc = 0;
+	u8 i, j = 0, cnt = 0;
+	u32 data_array[4];
+	u16 addr32;
+	struct bnx2x *bp = params->bp;
+	/*DP(NETIF_MSG_LINK, "bnx2x_direct_read_sfp_module_eeprom:"
+					" addr %d, cnt %d\n",
+					addr, byte_cnt);*/
+	if (byte_cnt > 16) {
+		DP(NETIF_MSG_LINK, "Reading from eeprom is"
+			    " is limited to 16 bytes\n");
+		return -EINVAL;
+	}
+
+	/* 4 byte aligned address */
+	addr32 = addr & (~0x3);
+	do {
+		rc = bnx2x_bsc_read(params, phy, 0xa0, addr32, 0, byte_cnt,
+				    data_array);
+	} while ((rc != 0) && (++cnt < I2C_WA_RETRY_CNT));
+
+	if (rc == 0) {
+		for (i = (addr - addr32); i < byte_cnt + (addr - addr32); i++) {
+			o_buf[j] = *((u8 *)data_array + i);
+			j++;
+		}
+	}
+
+	return rc;
+}
+
+static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+					     struct link_params *params,
+					     u16 addr, u8 byte_cnt, u8 *o_buf)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val, i;
+
+	if (byte_cnt > 16) {
+		DP(NETIF_MSG_LINK, "Reading from eeprom is"
+			    " is limited to 0xf\n");
+		return -EINVAL;
+	}
+
+	/* Need to read from 1.8000 to clear it */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+			&val);
+
+	/* Set the read command byte count */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT,
+			 ((byte_cnt < 2) ? 2 : byte_cnt));
+
+	/* Set the read command address */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR,
+			 addr);
+	/* Set the destination address */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 0x8004,
+			 MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF);
+
+	/* Activate read command */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL,
+			 0x8002);
+	/*
+	 * Wait appropriate time for two-wire command to finish before
+	 * polling the status register
+	 */
+	msleep(1);
+
+	/* Wait up to 500us for command complete status */
+	for (i = 0; i < 100; i++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE)
+			break;
+		udelay(5);
+	}
+
+	if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) !=
+		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE) {
+		DP(NETIF_MSG_LINK,
+			 "Got bad status 0x%x when reading from SFP+ EEPROM\n",
+			 (val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK));
+		return -EFAULT;
+	}
+
+	/* Read the buffer */
+	for (i = 0; i < byte_cnt; i++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF + i, &val);
+		o_buf[i] = (u8)(val & MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK);
+	}
+
+	for (i = 0; i < 100; i++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
+		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
+		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
+			return 0;
+		msleep(1);
+	}
+
+	return -EINVAL;
+}
+
+int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+				 struct link_params *params, u16 addr,
+				 u8 byte_cnt, u8 *o_buf)
+{
+	int rc = -EINVAL;
+	switch (phy->type) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+		rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr,
+						       byte_cnt, o_buf);
+	break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+		rc = bnx2x_8727_read_sfp_module_eeprom(phy, params, addr,
+						       byte_cnt, o_buf);
+	break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+		rc = bnx2x_warpcore_read_sfp_module_eeprom(phy, params, addr,
+							   byte_cnt, o_buf);
+	break;
+	}
+	return rc;
+}
+
+static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
+			      struct link_params *params,
+			      u16 *edc_mode)
+{
+	struct bnx2x *bp = params->bp;
+	u32 sync_offset = 0, phy_idx, media_types;
+	u8 val, check_limiting_mode = 0;
+	*edc_mode = EDC_MODE_LIMITING;
+
+	phy->media_type = ETH_PHY_UNSPECIFIED;
+	/* First check for copper cable */
+	if (bnx2x_read_sfp_module_eeprom(phy,
+					 params,
+					 SFP_EEPROM_CON_TYPE_ADDR,
+					 1,
+					 &val) != 0) {
+		DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n");
+		return -EINVAL;
+	}
+
+	switch (val) {
+	case SFP_EEPROM_CON_TYPE_VAL_COPPER:
+	{
+		u8 copper_module_type;
+		phy->media_type = ETH_PHY_DA_TWINAX;
+		/*
+		 * Check if its active cable (includes SFP+ module)
+		 * of passive cable
+		 */
+		if (bnx2x_read_sfp_module_eeprom(phy,
+					       params,
+					       SFP_EEPROM_FC_TX_TECH_ADDR,
+					       1,
+					       &copper_module_type) != 0) {
+			DP(NETIF_MSG_LINK,
+				"Failed to read copper-cable-type"
+				" from SFP+ EEPROM\n");
+			return -EINVAL;
+		}
+
+		if (copper_module_type &
+		    SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) {
+			DP(NETIF_MSG_LINK, "Active Copper cable detected\n");
+			check_limiting_mode = 1;
+		} else if (copper_module_type &
+			SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
+				DP(NETIF_MSG_LINK, "Passive Copper"
+					    " cable detected\n");
+				*edc_mode =
+				      EDC_MODE_PASSIVE_DAC;
+		} else {
+			DP(NETIF_MSG_LINK, "Unknown copper-cable-"
+				     "type 0x%x !!!\n", copper_module_type);
+			return -EINVAL;
+		}
+		break;
+	}
+	case SFP_EEPROM_CON_TYPE_VAL_LC:
+		phy->media_type = ETH_PHY_SFP_FIBER;
+		DP(NETIF_MSG_LINK, "Optic module detected\n");
+		check_limiting_mode = 1;
+		break;
+	default:
+		DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n",
+			 val);
+		return -EINVAL;
+	}
+	sync_offset = params->shmem_base +
+		offsetof(struct shmem_region,
+			 dev_info.port_hw_config[params->port].media_type);
+	media_types = REG_RD(bp, sync_offset);
+	/* Update media type for non-PMF sync */
+	for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+		if (&(params->phy[phy_idx]) == phy) {
+			media_types &= ~(PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+				(PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+			media_types |= ((phy->media_type &
+					PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+				(PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT * phy_idx));
+			break;
+		}
+	}
+	REG_WR(bp, sync_offset, media_types);
+	if (check_limiting_mode) {
+		u8 options[SFP_EEPROM_OPTIONS_SIZE];
+		if (bnx2x_read_sfp_module_eeprom(phy,
+						 params,
+						 SFP_EEPROM_OPTIONS_ADDR,
+						 SFP_EEPROM_OPTIONS_SIZE,
+						 options) != 0) {
+			DP(NETIF_MSG_LINK, "Failed to read Option"
+				" field from module EEPROM\n");
+			return -EINVAL;
+		}
+		if ((options[0] & SFP_EEPROM_OPTIONS_LINEAR_RX_OUT_MASK))
+			*edc_mode = EDC_MODE_LINEAR;
+		else
+			*edc_mode = EDC_MODE_LIMITING;
+	}
+	DP(NETIF_MSG_LINK, "EDC mode is set to 0x%x\n", *edc_mode);
+	return 0;
+}
+/*
+ * This function read the relevant field from the module (SFP+), and verify it
+ * is compliant with this board
+ */
+static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy,
+				   struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u32 val, cmd;
+	u32 fw_resp, fw_cmd_param;
+	char vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE+1];
+	char vendor_pn[SFP_EEPROM_PART_NO_SIZE+1];
+	phy->flags &= ~FLAGS_SFP_NOT_APPROVED;
+	val = REG_RD(bp, params->shmem_base +
+			 offsetof(struct shmem_region, dev_info.
+				  port_feature_config[params->port].config));
+	if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+	    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_NO_ENFORCEMENT) {
+		DP(NETIF_MSG_LINK, "NOT enforcing module verification\n");
+		return 0;
+	}
+
+	if (params->feature_config_flags &
+	    FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY) {
+		/* Use specific phy request */
+		cmd = DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL;
+	} else if (params->feature_config_flags &
+		   FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY) {
+		/* Use first phy request only in case of non-dual media*/
+		if (DUAL_MEDIA(params)) {
+			DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
+			   "verification\n");
+			return -EINVAL;
+		}
+		cmd = DRV_MSG_CODE_VRFY_FIRST_PHY_OPT_MDL;
+	} else {
+		/* No support in OPT MDL detection */
+		DP(NETIF_MSG_LINK, "FW does not support OPT MDL "
+			  "verification\n");
+		return -EINVAL;
+	}
+
+	fw_cmd_param = FW_PARAM_SET(phy->addr, phy->type, phy->mdio_ctrl);
+	fw_resp = bnx2x_fw_command(bp, cmd, fw_cmd_param);
+	if (fw_resp == FW_MSG_CODE_VRFY_OPT_MDL_SUCCESS) {
+		DP(NETIF_MSG_LINK, "Approved module\n");
+		return 0;
+	}
+
+	/* format the warning message */
+	if (bnx2x_read_sfp_module_eeprom(phy,
+					 params,
+					 SFP_EEPROM_VENDOR_NAME_ADDR,
+					 SFP_EEPROM_VENDOR_NAME_SIZE,
+					 (u8 *)vendor_name))
+		vendor_name[0] = '\0';
+	else
+		vendor_name[SFP_EEPROM_VENDOR_NAME_SIZE] = '\0';
+	if (bnx2x_read_sfp_module_eeprom(phy,
+					 params,
+					 SFP_EEPROM_PART_NO_ADDR,
+					 SFP_EEPROM_PART_NO_SIZE,
+					 (u8 *)vendor_pn))
+		vendor_pn[0] = '\0';
+	else
+		vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
+
+	netdev_err(bp->dev,  "Warning: Unqualified SFP+ module detected,"
+			      " Port %d from %s part number %s\n",
+			 params->port, vendor_name, vendor_pn);
+	phy->flags |= FLAGS_SFP_NOT_APPROVED;
+	return -EINVAL;
+}
+
+static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy,
+						 struct link_params *params)
+
+{
+	u8 val;
+	struct bnx2x *bp = params->bp;
+	u16 timeout;
+	/*
+	 * Initialization time after hot-plug may take up to 300ms for
+	 * some phys type ( e.g. JDSU )
+	 */
+
+	for (timeout = 0; timeout < 60; timeout++) {
+		if (bnx2x_read_sfp_module_eeprom(phy, params, 1, 1, &val)
+		    == 0) {
+			DP(NETIF_MSG_LINK, "SFP+ module initialization "
+				     "took %d ms\n", timeout * 5);
+			return 0;
+		}
+		msleep(5);
+	}
+	return -EINVAL;
+}
+
+static void bnx2x_8727_power_module(struct bnx2x *bp,
+				    struct bnx2x_phy *phy,
+				    u8 is_power_up) {
+	/* Make sure GPIOs are not using for LED mode */
+	u16 val;
+	/*
+	 * In the GPIO register, bit 4 is use to determine if the GPIOs are
+	 * operating as INPUT or as OUTPUT. Bit 1 is for input, and 0 for
+	 * output
+	 * Bits 0-1 determine the GPIOs value for OUTPUT in case bit 4 val is 0
+	 * Bits 8-9 determine the GPIOs value for INPUT in case bit 4 val is 1
+	 * where the 1st bit is the over-current(only input), and 2nd bit is
+	 * for power( only output )
+	 *
+	 * In case of NOC feature is disabled and power is up, set GPIO control
+	 *  as input to enable listening of over-current indication
+	 */
+	if (phy->flags & FLAGS_NOC)
+		return;
+	if (is_power_up)
+		val = (1<<4);
+	else
+		/*
+		 * Set GPIO control to OUTPUT, and set the power bit
+		 * to according to the is_power_up
+		 */
+		val = (1<<1);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_8727_GPIO_CTRL,
+			 val);
+}
+
+static int bnx2x_8726_set_limiting_mode(struct bnx2x *bp,
+					struct bnx2x_phy *phy,
+					u16 edc_mode)
+{
+	u16 cur_limiting_mode;
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_ROM_VER2,
+			&cur_limiting_mode);
+	DP(NETIF_MSG_LINK, "Current Limiting mode is 0x%x\n",
+		 cur_limiting_mode);
+
+	if (edc_mode == EDC_MODE_LIMITING) {
+		DP(NETIF_MSG_LINK, "Setting LIMITING MODE\n");
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_ROM_VER2,
+				 EDC_MODE_LIMITING);
+	} else { /* LRM mode ( default )*/
+
+		DP(NETIF_MSG_LINK, "Setting LRM MODE\n");
+
+		/*
+		 * Changing to LRM mode takes quite few seconds. So do it only
+		 * if current mode is limiting (default is LRM)
+		 */
+		if (cur_limiting_mode != EDC_MODE_LIMITING)
+			return 0;
+
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_LRM_MODE,
+				 0);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_ROM_VER2,
+				 0x128);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_MISC_CTRL0,
+				 0x4008);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_LRM_MODE,
+				 0xaaaa);
+	}
+	return 0;
+}
+
+static int bnx2x_8727_set_limiting_mode(struct bnx2x *bp,
+					struct bnx2x_phy *phy,
+					u16 edc_mode)
+{
+	u16 phy_identifier;
+	u16 rom_ver2_val;
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_PHY_IDENTIFIER,
+			&phy_identifier);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_PHY_IDENTIFIER,
+			 (phy_identifier & ~(1<<9)));
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_ROM_VER2,
+			&rom_ver2_val);
+	/* Keep the MSB 8-bits, and set the LSB 8-bits with the edc_mode */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_ROM_VER2,
+			 (rom_ver2_val & 0xff00) | (edc_mode & 0x00ff));
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_PHY_IDENTIFIER,
+			 (phy_identifier | (1<<9)));
+
+	return 0;
+}
+
+static void bnx2x_8727_specific_func(struct bnx2x_phy *phy,
+				     struct link_params *params,
+				     u32 action)
+{
+	struct bnx2x *bp = params->bp;
+
+	switch (action) {
+	case DISABLE_TX:
+		bnx2x_sfp_set_transmitter(params, phy, 0);
+		break;
+	case ENABLE_TX:
+		if (!(phy->flags & FLAGS_SFP_NOT_APPROVED))
+			bnx2x_sfp_set_transmitter(params, phy, 1);
+		break;
+	default:
+		DP(NETIF_MSG_LINK, "Function 0x%x not supported by 8727\n",
+		   action);
+		return;
+	}
+}
+
+static void bnx2x_set_e1e2_module_fault_led(struct link_params *params,
+					   u8 gpio_mode)
+{
+	struct bnx2x *bp = params->bp;
+
+	u32 fault_led_gpio = REG_RD(bp, params->shmem_base +
+			    offsetof(struct shmem_region,
+			dev_info.port_hw_config[params->port].sfp_ctrl)) &
+		PORT_HW_CFG_FAULT_MODULE_LED_MASK;
+	switch (fault_led_gpio) {
+	case PORT_HW_CFG_FAULT_MODULE_LED_DISABLED:
+		return;
+	case PORT_HW_CFG_FAULT_MODULE_LED_GPIO0:
+	case PORT_HW_CFG_FAULT_MODULE_LED_GPIO1:
+	case PORT_HW_CFG_FAULT_MODULE_LED_GPIO2:
+	case PORT_HW_CFG_FAULT_MODULE_LED_GPIO3:
+	{
+		u8 gpio_port = bnx2x_get_gpio_port(params);
+		u16 gpio_pin = fault_led_gpio -
+			PORT_HW_CFG_FAULT_MODULE_LED_GPIO0;
+		DP(NETIF_MSG_LINK, "Set fault module-detected led "
+				   "pin %x port %x mode %x\n",
+			       gpio_pin, gpio_port, gpio_mode);
+		bnx2x_set_gpio(bp, gpio_pin, gpio_mode, gpio_port);
+	}
+	break;
+	default:
+		DP(NETIF_MSG_LINK, "Error: Invalid fault led mode 0x%x\n",
+			       fault_led_gpio);
+	}
+}
+
+static void bnx2x_set_e3_module_fault_led(struct link_params *params,
+					  u8 gpio_mode)
+{
+	u32 pin_cfg;
+	u8 port = params->port;
+	struct bnx2x *bp = params->bp;
+	pin_cfg = (REG_RD(bp, params->shmem_base +
+			 offsetof(struct shmem_region,
+				  dev_info.port_hw_config[port].e3_sfp_ctrl)) &
+		PORT_HW_CFG_E3_FAULT_MDL_LED_MASK) >>
+		PORT_HW_CFG_E3_FAULT_MDL_LED_SHIFT;
+	DP(NETIF_MSG_LINK, "Setting Fault LED to %d using pin cfg %d\n",
+		       gpio_mode, pin_cfg);
+	bnx2x_set_cfg_pin(bp, pin_cfg, gpio_mode);
+}
+
+static void bnx2x_set_sfp_module_fault_led(struct link_params *params,
+					   u8 gpio_mode)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Setting SFP+ module fault LED to %d\n", gpio_mode);
+	if (CHIP_IS_E3(bp)) {
+		/*
+		 * Low ==> if SFP+ module is supported otherwise
+		 * High ==> if SFP+ module is not on the approved vendor list
+		 */
+		bnx2x_set_e3_module_fault_led(params, gpio_mode);
+	} else
+		bnx2x_set_e1e2_module_fault_led(params, gpio_mode);
+}
+
+static void bnx2x_warpcore_power_module(struct link_params *params,
+					struct bnx2x_phy *phy,
+					u8 power)
+{
+	u32 pin_cfg;
+	struct bnx2x *bp = params->bp;
+
+	pin_cfg = (REG_RD(bp, params->shmem_base +
+			  offsetof(struct shmem_region,
+			dev_info.port_hw_config[params->port].e3_sfp_ctrl)) &
+			PORT_HW_CFG_E3_PWR_DIS_MASK) >>
+			PORT_HW_CFG_E3_PWR_DIS_SHIFT;
+
+	if (pin_cfg == PIN_CFG_NA)
+		return;
+	DP(NETIF_MSG_LINK, "Setting SFP+ module power to %d using pin cfg %d\n",
+		       power, pin_cfg);
+	/*
+	 * Low ==> corresponding SFP+ module is powered
+	 * high ==> the SFP+ module is powered down
+	 */
+	bnx2x_set_cfg_pin(bp, pin_cfg, power ^ 1);
+}
+
+static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
+				    struct link_params *params)
+{
+	bnx2x_warpcore_power_module(params, phy, 0);
+}
+
+static void bnx2x_power_sfp_module(struct link_params *params,
+				   struct bnx2x_phy *phy,
+				   u8 power)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Setting SFP+ power to %x\n", power);
+
+	switch (phy->type) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+		bnx2x_8727_power_module(params->bp, phy, power);
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+		bnx2x_warpcore_power_module(params, phy, power);
+		break;
+	default:
+		break;
+	}
+}
+static void bnx2x_warpcore_set_limiting_mode(struct link_params *params,
+					     struct bnx2x_phy *phy,
+					     u16 edc_mode)
+{
+	u16 val = 0;
+	u16 mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+	struct bnx2x *bp = params->bp;
+
+	u8 lane = bnx2x_get_warpcore_lane(phy, params);
+	/* This is a global register which controls all lanes */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+	val &= ~(0xf << (lane << 2));
+
+	switch (edc_mode) {
+	case EDC_MODE_LINEAR:
+	case EDC_MODE_LIMITING:
+		mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT;
+		break;
+	case EDC_MODE_PASSIVE_DAC:
+		mode = MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC;
+		break;
+	default:
+		break;
+	}
+
+	val |= (mode << (lane << 2));
+	bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+			 MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, val);
+	/* A must read */
+	bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+			MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE, &val);
+
+	/* Restart microcode to re-read the new mode */
+	bnx2x_warpcore_reset_lane(bp, phy, 1);
+	bnx2x_warpcore_reset_lane(bp, phy, 0);
+
+}
+
+static void bnx2x_set_limiting_mode(struct link_params *params,
+				    struct bnx2x_phy *phy,
+				    u16 edc_mode)
+{
+	switch (phy->type) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+		bnx2x_8726_set_limiting_mode(params->bp, phy, edc_mode);
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+		bnx2x_8727_set_limiting_mode(params->bp, phy, edc_mode);
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
+		bnx2x_warpcore_set_limiting_mode(params, phy, edc_mode);
+		break;
+	}
+}
+
+int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+			       struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 edc_mode;
+	int rc = 0;
+
+	u32 val = REG_RD(bp, params->shmem_base +
+			     offsetof(struct shmem_region, dev_info.
+				     port_feature_config[params->port].config));
+
+	DP(NETIF_MSG_LINK, "SFP+ module plugged in/out detected on port %d\n",
+		 params->port);
+	/* Power up module */
+	bnx2x_power_sfp_module(params, phy, 1);
+	if (bnx2x_get_edc_mode(phy, params, &edc_mode) != 0) {
+		DP(NETIF_MSG_LINK, "Failed to get valid module type\n");
+		return -EINVAL;
+	} else if (bnx2x_verify_sfp_module(phy, params) != 0) {
+		/* check SFP+ module compatibility */
+		DP(NETIF_MSG_LINK, "Module verification failed!!\n");
+		rc = -EINVAL;
+		/* Turn on fault module-detected led */
+		bnx2x_set_sfp_module_fault_led(params,
+					       MISC_REGISTERS_GPIO_HIGH);
+
+		/* Check if need to power down the SFP+ module */
+		if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+		     PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_POWER_DOWN) {
+			DP(NETIF_MSG_LINK, "Shutdown SFP+ module!!\n");
+			bnx2x_power_sfp_module(params, phy, 0);
+			return rc;
+		}
+	} else {
+		/* Turn off fault module-detected led */
+		bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_LOW);
+	}
+
+	/*
+	 * Check and set limiting mode / LRM mode on 8726. On 8727 it
+	 * is done automatically
+	 */
+	bnx2x_set_limiting_mode(params, phy, edc_mode);
+
+	/*
+	 * Enable transmit for this module if the module is approved, or
+	 * if unapproved modules should also enable the Tx laser
+	 */
+	if (rc == 0 ||
+	    (val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) !=
+	    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
+		bnx2x_sfp_set_transmitter(params, phy, 1);
+	else
+		bnx2x_sfp_set_transmitter(params, phy, 0);
+
+	return rc;
+}
+
+void bnx2x_handle_module_detect_int(struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	struct bnx2x_phy *phy;
+	u32 gpio_val;
+	u8 gpio_num, gpio_port;
+	if (CHIP_IS_E3(bp))
+		phy = &params->phy[INT_PHY];
+	else
+		phy = &params->phy[EXT_PHY1];
+
+	if (bnx2x_get_mod_abs_int_cfg(bp, params->chip_id, params->shmem_base,
+				      params->port, &gpio_num, &gpio_port) ==
+	    -EINVAL) {
+		DP(NETIF_MSG_LINK, "Failed to get MOD_ABS interrupt config\n");
+		return;
+	}
+
+	/* Set valid module led off */
+	bnx2x_set_sfp_module_fault_led(params, MISC_REGISTERS_GPIO_HIGH);
+
+	/* Get current gpio val reflecting module plugged in / out*/
+	gpio_val = bnx2x_get_gpio(bp, gpio_num, gpio_port);
+
+	/* Call the handling function in case module is detected */
+	if (gpio_val == 0) {
+		bnx2x_power_sfp_module(params, phy, 1);
+		bnx2x_set_gpio_int(bp, gpio_num,
+				   MISC_REGISTERS_GPIO_INT_OUTPUT_CLR,
+				   gpio_port);
+		if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+			bnx2x_sfp_module_detection(phy, params);
+		else
+			DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+	} else {
+		u32 val = REG_RD(bp, params->shmem_base +
+				 offsetof(struct shmem_region, dev_info.
+					  port_feature_config[params->port].
+					  config));
+		bnx2x_set_gpio_int(bp, gpio_num,
+				   MISC_REGISTERS_GPIO_INT_OUTPUT_SET,
+				   gpio_port);
+		/*
+		 * Module was plugged out.
+		 * Disable transmit for this module
+		 */
+		phy->media_type = ETH_PHY_NOT_PRESENT;
+		if (((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+		     PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER) ||
+		    CHIP_IS_E3(bp))
+			bnx2x_sfp_set_transmitter(params, phy, 0);
+	}
+}
+
+/******************************************************************/
+/*		Used by 8706 and 8727                             */
+/******************************************************************/
+static void bnx2x_sfp_mask_fault(struct bnx2x *bp,
+				 struct bnx2x_phy *phy,
+				 u16 alarm_status_offset,
+				 u16 alarm_ctrl_offset)
+{
+	u16 alarm_status, val;
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, alarm_status_offset,
+			&alarm_status);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, alarm_status_offset,
+			&alarm_status);
+	/* Mask or enable the fault event. */
+	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, &val);
+	if (alarm_status & (1<<0))
+		val &= ~(1<<0);
+	else
+		val |= (1<<0);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, alarm_ctrl_offset, val);
+}
+/******************************************************************/
+/*		common BCM8706/BCM8726 PHY SECTION		  */
+/******************************************************************/
+static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy,
+				      struct link_params *params,
+				      struct link_vars *vars)
+{
+	u8 link_up = 0;
+	u16 val1, val2, rx_sd, pcs_status;
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "XGXS 8706/8726\n");
+	/* Clear RX Alarm*/
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &val2);
+
+	bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
+			     MDIO_PMA_LASI_TXCTRL);
+
+	/* clear LASI indication*/
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
+	DP(NETIF_MSG_LINK, "8706/8726 LASI status 0x%x--> 0x%x\n", val1, val2);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_RX_SD, &rx_sd);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &pcs_status);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_LINK_STATUS, &val2);
+
+	DP(NETIF_MSG_LINK, "8706/8726 rx_sd 0x%x pcs_status 0x%x 1Gbps"
+			" link_status 0x%x\n", rx_sd, pcs_status, val2);
+	/*
+	 * link is up if both bit 0 of pmd_rx_sd and bit 0 of pcs_status
+	 * are set, or if the autoneg bit 1 is set
+	 */
+	link_up = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
+	if (link_up) {
+		if (val2 & (1<<1))
+			vars->line_speed = SPEED_1000;
+		else
+			vars->line_speed = SPEED_10000;
+		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+		vars->duplex = DUPLEX_FULL;
+	}
+
+	/* Capture 10G link fault. Read twice to clear stale value. */
+	if (vars->line_speed == SPEED_10000) {
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			    MDIO_PMA_LASI_TXSTAT, &val1);
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			    MDIO_PMA_LASI_TXSTAT, &val1);
+		if (val1 & (1<<0))
+			vars->fault_detected = 1;
+	}
+
+	return link_up;
+}
+
+/******************************************************************/
+/*			BCM8706 PHY SECTION			  */
+/******************************************************************/
+static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy,
+				 struct link_params *params,
+				 struct link_vars *vars)
+{
+	u32 tx_en_mode;
+	u16 cnt, val, tmp1;
+	struct bnx2x *bp = params->bp;
+
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+	/* HW reset */
+	bnx2x_ext_phy_hw_reset(bp, params->port);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0xa040);
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	/* Wait until fw is loaded */
+	for (cnt = 0; cnt < 100; cnt++) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val);
+		if (val)
+			break;
+		msleep(10);
+	}
+	DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt);
+	if ((params->feature_config_flags &
+	     FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+		u8 i;
+		u16 reg;
+		for (i = 0; i < 4; i++) {
+			reg = MDIO_XS_8706_REG_BANK_RX0 +
+				i*(MDIO_XS_8706_REG_BANK_RX1 -
+				   MDIO_XS_8706_REG_BANK_RX0);
+			bnx2x_cl45_read(bp, phy, MDIO_XS_DEVAD, reg, &val);
+			/* Clear first 3 bits of the control */
+			val &= ~0x7;
+			/* Set control bits according to configuration */
+			val |= (phy->rx_preemphasis[i] & 0x7);
+			DP(NETIF_MSG_LINK, "Setting RX Equalizer to BCM8706"
+				   " reg 0x%x <-- val 0x%x\n", reg, val);
+			bnx2x_cl45_write(bp, phy, MDIO_XS_DEVAD, reg, val);
+		}
+	}
+	/* Force speed */
+	if (phy->req_line_speed == SPEED_10000) {
+		DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
+
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_DIGITAL_CTRL, 0x400);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+				 0);
+		/* Arm LASI for link and Tx fault. */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 3);
+	} else {
+		/* Force 1Gbps using autoneg with 1G advertisement */
+
+		/* Allow CL37 through CL73 */
+		DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+
+		/* Enable Full-Duplex advertisement on CL37 */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LP, 0x0020);
+		/* Enable CL37 AN */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+		/* 1G support */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_ADV, (1<<5));
+
+		/* Enable clause 73 AN */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+				 0x0400);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
+				 0x0004);
+	}
+	bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+
+	/*
+	 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+	 * power mode, if TX Laser is disabled
+	 */
+
+	tx_en_mode = REG_RD(bp, params->shmem_base +
+			    offsetof(struct shmem_region,
+				dev_info.port_hw_config[params->port].sfp_ctrl))
+			& PORT_HW_CFG_TX_LASER_MASK;
+
+	if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+		DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+		bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, &tmp1);
+		tmp1 |= 0x1;
+		bnx2x_cl45_write(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_DIGITAL_CTRL, tmp1);
+	}
+
+	return 0;
+}
+
+static int bnx2x_8706_read_status(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	return bnx2x_8706_8726_read_status(phy, params, vars);
+}
+
+/******************************************************************/
+/*			BCM8726 PHY SECTION			  */
+/******************************************************************/
+static void bnx2x_8726_config_loopback(struct bnx2x_phy *phy,
+				       struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "PMA/PMD ext_phy_loopback: 8726\n");
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0001);
+}
+
+static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy,
+					 struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	/* Need to wait 100ms after reset */
+	msleep(100);
+
+	/* Micro controller re-boot */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x018B);
+
+	/* Set soft reset */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL,
+			 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP);
+
+	/* wait for 150ms for microcode load */
+	msleep(150);
+
+	/* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_MISC_CTRL1, 0x0000);
+
+	msleep(200);
+	bnx2x_save_bcm_spirom_ver(bp, phy, params->port);
+}
+
+static u8 bnx2x_8726_read_status(struct bnx2x_phy *phy,
+				 struct link_params *params,
+				 struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val1;
+	u8 link_up = bnx2x_8706_8726_read_status(phy, params, vars);
+	if (link_up) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER,
+				&val1);
+		if (val1 & (1<<15)) {
+			DP(NETIF_MSG_LINK, "Tx is disabled\n");
+			link_up = 0;
+			vars->line_speed = 0;
+		}
+	}
+	return link_up;
+}
+
+
+static int bnx2x_8726_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Initializing BCM8726\n");
+
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	bnx2x_8726_external_rom_boot(phy, params);
+
+	/*
+	 * Need to call module detected on initialization since the module
+	 * detection triggered by actual module insertion might occur before
+	 * driver is loaded, and when driver is loaded, it reset all
+	 * registers, including the transmitter
+	 */
+	bnx2x_sfp_module_detection(phy, params);
+
+	if (phy->req_line_speed == SPEED_1000) {
+		DP(NETIF_MSG_LINK, "Setting 1G force\n");
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x5);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+				 0x400);
+	} else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+		   (phy->speed_cap_mask &
+		      PORT_HW_CFG_SPEED_CAPABILITY_D0_1G) &&
+		   ((phy->speed_cap_mask &
+		      PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+		    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+		DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+		/* Set Flow control */
+		bnx2x_ext_phy_set_pause(params, phy, vars);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_ADV, 0x20);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_CL73, 0x040c);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_FC_LD, 0x0020);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1000);
+		bnx2x_cl45_write(bp, phy,
+				MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x1200);
+		/*
+		 * Enable RX-ALARM control to receive interrupt for 1G speed
+		 * change
+		 */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x4);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+				 0x400);
+
+	} else { /* Default 10G. Set only LASI control */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 1);
+	}
+
+	/* Set TX PreEmphasis if needed */
+	if ((params->feature_config_flags &
+	     FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+		DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x,"
+			 "TX_CTRL2 0x%x\n",
+			 phy->tx_preemphasis[0],
+			 phy->tx_preemphasis[1]);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_8726_TX_CTRL1,
+				 phy->tx_preemphasis[0]);
+
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_8726_TX_CTRL2,
+				 phy->tx_preemphasis[1]);
+	}
+
+	return 0;
+
+}
+
+static void bnx2x_8726_link_reset(struct bnx2x_phy *phy,
+				  struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "bnx2x_8726_link_reset port %d\n", params->port);
+	/* Set serial boot control for external load */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_GEN_CTRL, 0x0001);
+}
+
+/******************************************************************/
+/*			BCM8727 PHY SECTION			  */
+/******************************************************************/
+
+static void bnx2x_8727_set_link_led(struct bnx2x_phy *phy,
+				    struct link_params *params, u8 mode)
+{
+	struct bnx2x *bp = params->bp;
+	u16 led_mode_bitmask = 0;
+	u16 gpio_pins_bitmask = 0;
+	u16 val;
+	/* Only NOC flavor requires to set the LED specifically */
+	if (!(phy->flags & FLAGS_NOC))
+		return;
+	switch (mode) {
+	case LED_MODE_FRONT_PANEL_OFF:
+	case LED_MODE_OFF:
+		led_mode_bitmask = 0;
+		gpio_pins_bitmask = 0x03;
+		break;
+	case LED_MODE_ON:
+		led_mode_bitmask = 0;
+		gpio_pins_bitmask = 0x02;
+		break;
+	case LED_MODE_OPER:
+		led_mode_bitmask = 0x60;
+		gpio_pins_bitmask = 0x11;
+		break;
+	}
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+			&val);
+	val &= 0xff8f;
+	val |= led_mode_bitmask;
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+			 val);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8727_GPIO_CTRL,
+			&val);
+	val &= 0xffe0;
+	val |= gpio_pins_bitmask;
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_8727_GPIO_CTRL,
+			 val);
+}
+static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy,
+				struct link_params *params) {
+	u32 swap_val, swap_override;
+	u8 port;
+	/*
+	 * The PHY reset is controlled by GPIO 1. Fake the port number
+	 * to cancel the swap done in set_gpio()
+	 */
+	struct bnx2x *bp = params->bp;
+	swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+	swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+	port = (swap_val && swap_override) ^ 1;
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+}
+
+static int bnx2x_8727_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	u32 tx_en_mode;
+	u16 tmp1, val, mod_abs, tmp2;
+	u16 rx_alarm_ctrl_val;
+	u16 lasi_ctrl_val;
+	struct bnx2x *bp = params->bp;
+	/* Enable PMD link, MOD_ABS_FLT, and 1G link alarm */
+
+	bnx2x_wait_reset_complete(bp, phy, params);
+	rx_alarm_ctrl_val = (1<<2) | (1<<5) ;
+	/* Should be 0x6 to enable XS on Tx side. */
+	lasi_ctrl_val = 0x0006;
+
+	DP(NETIF_MSG_LINK, "Initializing BCM8727\n");
+	/* enable LASI */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+			 rx_alarm_ctrl_val);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_LASI_TXCTRL,
+			 0);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, lasi_ctrl_val);
+
+	/*
+	 * Initially configure MOD_ABS to interrupt when module is
+	 * presence( bit 8)
+	 */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+	/*
+	 * Set EDC off by setting OPTXLOS signal input to low (bit 9).
+	 * When the EDC is off it locks onto a reference clock and avoids
+	 * becoming 'lost'
+	 */
+	mod_abs &= ~(1<<8);
+	if (!(phy->flags & FLAGS_NOC))
+		mod_abs &= ~(1<<9);
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+
+	/* Enable/Disable PHY transmitter output */
+	bnx2x_set_disable_pmd_transmit(params, phy, 0);
+
+	/* Make MOD_ABS give interrupt on change */
+	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL,
+			&val);
+	val |= (1<<12);
+	if (phy->flags & FLAGS_NOC)
+		val |= (3<<5);
+
+	/*
+	 * Set 8727 GPIOs to input to allow reading from the 8727 GPIO0
+	 * status which reflect SFP+ module over-current
+	 */
+	if (!(phy->flags & FLAGS_NOC))
+		val &= 0xff8f; /* Reset bits 4-6 */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_PCS_OPT_CTRL, val);
+
+	bnx2x_8727_power_module(bp, phy, 1);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &tmp1);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1);
+
+	/* Set option 1G speed */
+	if (phy->req_line_speed == SPEED_1000) {
+		DP(NETIF_MSG_LINK, "Setting 1G force\n");
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD);
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1);
+		DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
+		/*
+		 * Power down the XAUI until link is up in case of dual-media
+		 * and 1G
+		 */
+		if (DUAL_MEDIA(params)) {
+			bnx2x_cl45_read(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8727_PCS_GP, &val);
+			val |= (3<<10);
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8727_PCS_GP, val);
+		}
+	} else if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
+		   ((phy->speed_cap_mask &
+		     PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) &&
+		   ((phy->speed_cap_mask &
+		      PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) !=
+		   PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
+
+		DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300);
+	} else {
+		/*
+		 * Since the 8727 has only single reset pin, need to set the 10G
+		 * registers although it is default
+		 */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL,
+				 0x0020);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2,
+				 0x0008);
+	}
+
+	/*
+	 * Set 2-wire transfer rate of SFP+ module EEPROM
+	 * to 100Khz since some DACs(direct attached cables) do
+	 * not work at 400Khz.
+	 */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR,
+			 0xa001);
+
+	/* Set TX PreEmphasis if needed */
+	if ((params->feature_config_flags &
+	     FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) {
+		DP(NETIF_MSG_LINK, "Setting TX_CTRL1 0x%x, TX_CTRL2 0x%x\n",
+			   phy->tx_preemphasis[0],
+			   phy->tx_preemphasis[1]);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL1,
+				 phy->tx_preemphasis[0]);
+
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_TX_CTRL2,
+				 phy->tx_preemphasis[1]);
+	}
+
+	/*
+	 * If TX Laser is controlled by GPIO_0, do not let PHY go into low
+	 * power mode, if TX Laser is disabled
+	 */
+	tx_en_mode = REG_RD(bp, params->shmem_base +
+			    offsetof(struct shmem_region,
+				dev_info.port_hw_config[params->port].sfp_ctrl))
+			& PORT_HW_CFG_TX_LASER_MASK;
+
+	if (tx_en_mode == PORT_HW_CFG_TX_LASER_GPIO0) {
+
+		DP(NETIF_MSG_LINK, "Enabling TXONOFF_PWRDN_DIS\n");
+		bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, &tmp2);
+		tmp2 |= 0x1000;
+		tmp2 &= 0xFFEF;
+		bnx2x_cl45_write(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_OPT_CFG_REG, tmp2);
+	}
+
+	return 0;
+}
+
+static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy,
+				      struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 mod_abs, rx_alarm_status;
+	u32 val = REG_RD(bp, params->shmem_base +
+			     offsetof(struct shmem_region, dev_info.
+				      port_feature_config[params->port].
+				      config));
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_PHY_IDENTIFIER, &mod_abs);
+	if (mod_abs & (1<<8)) {
+
+		/* Module is absent */
+		DP(NETIF_MSG_LINK, "MOD_ABS indication "
+			    "show module is absent\n");
+		phy->media_type = ETH_PHY_NOT_PRESENT;
+		/*
+		 * 1. Set mod_abs to detect next module
+		 *    presence event
+		 * 2. Set EDC off by setting OPTXLOS signal input to low
+		 *    (bit 9).
+		 *    When the EDC is off it locks onto a reference clock and
+		 *    avoids becoming 'lost'.
+		 */
+		mod_abs &= ~(1<<8);
+		if (!(phy->flags & FLAGS_NOC))
+			mod_abs &= ~(1<<9);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+		/*
+		 * Clear RX alarm since it stays up as long as
+		 * the mod_abs wasn't changed
+		 */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+
+	} else {
+		/* Module is present */
+		DP(NETIF_MSG_LINK, "MOD_ABS indication "
+			    "show module is present\n");
+		/*
+		 * First disable transmitter, and if the module is ok, the
+		 * module_detection will enable it
+		 * 1. Set mod_abs to detect next module absent event ( bit 8)
+		 * 2. Restore the default polarity of the OPRXLOS signal and
+		 * this signal will then correctly indicate the presence or
+		 * absence of the Rx signal. (bit 9)
+		 */
+		mod_abs |= (1<<8);
+		if (!(phy->flags & FLAGS_NOC))
+			mod_abs |= (1<<9);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_PHY_IDENTIFIER, mod_abs);
+
+		/*
+		 * Clear RX alarm since it stays up as long as the mod_abs
+		 * wasn't changed. This is need to be done before calling the
+		 * module detection, otherwise it will clear* the link update
+		 * alarm
+		 */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+
+
+		if ((val & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_MASK) ==
+		    PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_DISABLE_TX_LASER)
+			bnx2x_sfp_set_transmitter(params, phy, 0);
+
+		if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0)
+			bnx2x_sfp_module_detection(phy, params);
+		else
+			DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n");
+	}
+
+	DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n",
+		   rx_alarm_status);
+	/* No need to check link status in case of module plugged in/out */
+}
+
+static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
+				 struct link_params *params,
+				 struct link_vars *vars)
+
+{
+	struct bnx2x *bp = params->bp;
+	u8 link_up = 0, oc_port = params->port;
+	u16 link_status = 0;
+	u16 rx_alarm_status, lasi_ctrl, val1;
+
+	/* If PHY is not initialized, do not check link status */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL,
+			&lasi_ctrl);
+	if (!lasi_ctrl)
+		return 0;
+
+	/* Check the LASI on Rx */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT,
+			&rx_alarm_status);
+	vars->line_speed = 0;
+	DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS  0x%x\n", rx_alarm_status);
+
+	bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT,
+			     MDIO_PMA_LASI_TXCTRL);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+
+	DP(NETIF_MSG_LINK, "8727 LASI status 0x%x\n", val1);
+
+	/* Clear MSG-OUT */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &val1);
+
+	/*
+	 * If a module is present and there is need to check
+	 * for over current
+	 */
+	if (!(phy->flags & FLAGS_NOC) && !(rx_alarm_status & (1<<5))) {
+		/* Check over-current using 8727 GPIO0 input*/
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD, MDIO_PMA_REG_8727_GPIO_CTRL,
+				&val1);
+
+		if ((val1 & (1<<8)) == 0) {
+			if (!CHIP_IS_E1x(bp))
+				oc_port = BP_PATH(bp) + (params->port << 1);
+			DP(NETIF_MSG_LINK, "8727 Power fault has been detected"
+				       " on port %d\n", oc_port);
+			netdev_err(bp->dev, "Error:  Power fault on Port %d has"
+					    " been detected and the power to "
+					    "that SFP+ module has been removed"
+					    " to prevent failure of the card."
+					    " Please remove the SFP+ module and"
+					    " restart the system to clear this"
+					    " error.\n",
+			 oc_port);
+			/* Disable all RX_ALARMs except for mod_abs */
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_LASI_RXCTRL, (1<<5));
+
+			bnx2x_cl45_read(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
+			/* Wait for module_absent_event */
+			val1 |= (1<<8);
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_PHY_IDENTIFIER, val1);
+			/* Clear RX alarm */
+			bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_LASI_RXSTAT, &rx_alarm_status);
+			return 0;
+		}
+	} /* Over current check */
+
+	/* When module absent bit is set, check module */
+	if (rx_alarm_status & (1<<5)) {
+		bnx2x_8727_handle_mod_abs(phy, params);
+		/* Enable all mod_abs and link detection bits */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL,
+				 ((1<<5) | (1<<2)));
+	}
+	DP(NETIF_MSG_LINK, "Enabling 8727 TX laser if SFP is approved\n");
+	bnx2x_8727_specific_func(phy, params, ENABLE_TX);
+	/* If transmitter is disabled, ignore false link up indication */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_PHY_IDENTIFIER, &val1);
+	if (val1 & (1<<15)) {
+		DP(NETIF_MSG_LINK, "Tx is disabled\n");
+		return 0;
+	}
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8073_SPEED_LINK_STATUS, &link_status);
+
+	/*
+	 * Bits 0..2 --> speed detected,
+	 * Bits 13..15--> link is down
+	 */
+	if ((link_status & (1<<2)) && (!(link_status & (1<<15)))) {
+		link_up = 1;
+		vars->line_speed = SPEED_10000;
+		DP(NETIF_MSG_LINK, "port %x: External link up in 10G\n",
+			   params->port);
+	} else if ((link_status & (1<<0)) && (!(link_status & (1<<13)))) {
+		link_up = 1;
+		vars->line_speed = SPEED_1000;
+		DP(NETIF_MSG_LINK, "port %x: External link up in 1G\n",
+			   params->port);
+	} else {
+		link_up = 0;
+		DP(NETIF_MSG_LINK, "port %x: External link is down\n",
+			   params->port);
+	}
+
+	/* Capture 10G link fault. */
+	if (vars->line_speed == SPEED_10000) {
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			    MDIO_PMA_LASI_TXSTAT, &val1);
+
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD,
+			    MDIO_PMA_LASI_TXSTAT, &val1);
+
+		if (val1 & (1<<0)) {
+			vars->fault_detected = 1;
+		}
+	}
+
+	if (link_up) {
+		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+		vars->duplex = DUPLEX_FULL;
+		DP(NETIF_MSG_LINK, "duplex = 0x%x\n", vars->duplex);
+	}
+
+	if ((DUAL_MEDIA(params)) &&
+	    (phy->req_line_speed == SPEED_1000)) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_8727_PCS_GP, &val1);
+		/*
+		 * In case of dual-media board and 1G, power up the XAUI side,
+		 * otherwise power it down. For 10G it is done automatically
+		 */
+		if (link_up)
+			val1 &= ~(3<<10);
+		else
+			val1 |= (3<<10);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_8727_PCS_GP, val1);
+	}
+	return link_up;
+}
+
+static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
+				  struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+
+	/* Enable/Disable PHY transmitter output */
+	bnx2x_set_disable_pmd_transmit(params, phy, 1);
+
+	/* Disable Transmitter */
+	bnx2x_sfp_set_transmitter(params, phy, 0);
+	/* Clear LASI */
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0);
+
+}
+
+/******************************************************************/
+/*		BCM8481/BCM84823/BCM84833 PHY SECTION	          */
+/******************************************************************/
+static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
+					   struct link_params *params)
+{
+	u16 val, fw_ver1, fw_ver2, cnt;
+	u8 port;
+	struct bnx2x *bp = params->bp;
+
+	port = params->port;
+
+	/* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
+	/* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+
+	for (cnt = 0; cnt < 100; cnt++) {
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+		if (val & 1)
+			break;
+		udelay(5);
+	}
+	if (cnt == 100) {
+		DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
+		bnx2x_save_spirom_version(bp, port, 0,
+					  phy->ver_addr);
+		return;
+	}
+
+
+	/* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+	for (cnt = 0; cnt < 100; cnt++) {
+		bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+		if (val & 1)
+			break;
+		udelay(5);
+	}
+	if (cnt == 100) {
+		DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
+		bnx2x_save_spirom_version(bp, port, 0,
+					  phy->ver_addr);
+		return;
+	}
+
+	/* lower 16 bits of the register SPI_FW_STATUS */
+	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+	/* upper 16 bits of register SPI_FW_STATUS */
+	bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
+
+	bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
+				  phy->ver_addr);
+}
+
+static void bnx2x_848xx_set_led(struct bnx2x *bp,
+				struct bnx2x_phy *phy)
+{
+	u16 val;
+
+	/* PHYC_CTL_LED_CTL */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8481_LINK_SIGNAL, &val);
+	val &= 0xFE00;
+	val |= 0x0092;
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_8481_LINK_SIGNAL, val);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_8481_LED1_MASK,
+			 0x80);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_8481_LED2_MASK,
+			 0x18);
+
+	/* Select activity source by Tx and Rx, as suggested by PHY AE */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_8481_LED3_MASK,
+			 0x0006);
+
+	/* Select the closest activity blink rate to that in 10/100/1000 */
+	bnx2x_cl45_write(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_8481_LED3_BLINK,
+			0);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
+	val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
+
+	/* 'Interrupt Mask' */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_AN_DEVAD,
+			 0xFFFB, 0xFFFD);
+}
+
+static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
+				       struct link_params *params,
+				       struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 autoneg_val, an_1000_val, an_10_100_val;
+	u16 tmp_req_line_speed;
+
+	tmp_req_line_speed = phy->req_line_speed;
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+		if (phy->req_line_speed == SPEED_10000)
+			phy->req_line_speed = SPEED_AUTO_NEG;
+
+	/*
+	 * This phy uses the NIG latch mechanism since link indication
+	 * arrives through its LED4 and not via its LASI signal, so we
+	 * get steady signal instead of clear on read
+	 */
+	bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
+		      1 << NIG_LATCH_BC_ENABLE_MI_INT);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x0000);
+
+	bnx2x_848xx_set_led(bp, phy);
+
+	/* set 1000 speed advertisement */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+			&an_1000_val);
+
+	bnx2x_ext_phy_set_pause(params, phy, vars);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD,
+			MDIO_AN_REG_8481_LEGACY_AN_ADV,
+			&an_10_100_val);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_MII_CTRL,
+			&autoneg_val);
+	/* Disable forced speed */
+	autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
+	an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8));
+
+	if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+	     (phy->speed_cap_mask &
+	     PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+	    (phy->req_line_speed == SPEED_1000)) {
+		an_1000_val |= (1<<8);
+		autoneg_val |= (1<<9 | 1<<12);
+		if (phy->req_duplex == DUPLEX_FULL)
+			an_1000_val |= (1<<9);
+		DP(NETIF_MSG_LINK, "Advertising 1G\n");
+	} else
+		an_1000_val &= ~((1<<8) | (1<<9));
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_AN_DEVAD, MDIO_AN_REG_8481_1000T_CTRL,
+			 an_1000_val);
+
+	/* set 100 speed advertisement */
+	if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+	     (phy->speed_cap_mask &
+	      (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
+	       PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) &&
+	     (phy->supported &
+	      (SUPPORTED_100baseT_Half |
+	       SUPPORTED_100baseT_Full)))) {
+		an_10_100_val |= (1<<7);
+		/* Enable autoneg and restart autoneg for legacy speeds */
+		autoneg_val |= (1<<9 | 1<<12);
+
+		if (phy->req_duplex == DUPLEX_FULL)
+			an_10_100_val |= (1<<8);
+		DP(NETIF_MSG_LINK, "Advertising 100M\n");
+	}
+	/* set 10 speed advertisement */
+	if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+	     (phy->speed_cap_mask &
+	      (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
+	       PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)) &&
+	     (phy->supported &
+	      (SUPPORTED_10baseT_Half |
+	       SUPPORTED_10baseT_Full)))) {
+		an_10_100_val |= (1<<5);
+		autoneg_val |= (1<<9 | 1<<12);
+		if (phy->req_duplex == DUPLEX_FULL)
+			an_10_100_val |= (1<<6);
+		DP(NETIF_MSG_LINK, "Advertising 10M\n");
+	}
+
+	/* Only 10/100 are allowed to work in FORCE mode */
+	if ((phy->req_line_speed == SPEED_100) &&
+	    (phy->supported &
+	     (SUPPORTED_100baseT_Half |
+	      SUPPORTED_100baseT_Full))) {
+		autoneg_val |= (1<<13);
+		/* Enabled AUTO-MDIX when autoneg is disabled */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+				 (1<<15 | 1<<9 | 7<<0));
+		DP(NETIF_MSG_LINK, "Setting 100M force\n");
+	}
+	if ((phy->req_line_speed == SPEED_10) &&
+	    (phy->supported &
+	     (SUPPORTED_10baseT_Half |
+	      SUPPORTED_10baseT_Full))) {
+		/* Enabled AUTO-MDIX when autoneg is disabled */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
+				 (1<<15 | 1<<9 | 7<<0));
+		DP(NETIF_MSG_LINK, "Setting 10M force\n");
+	}
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_AN_DEVAD, MDIO_AN_REG_8481_LEGACY_AN_ADV,
+			 an_10_100_val);
+
+	if (phy->req_duplex == DUPLEX_FULL)
+		autoneg_val |= (1<<8);
+
+	/*
+	 * Always write this if this is not 84833.
+	 * For 84833, write it only when it's a forced speed.
+	 */
+	if ((phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) ||
+		((autoneg_val & (1<<12)) == 0))
+		bnx2x_cl45_write(bp, phy,
+			 MDIO_AN_DEVAD,
+			 MDIO_AN_REG_8481_LEGACY_MII_CTRL, autoneg_val);
+
+	if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+	    (phy->speed_cap_mask &
+	     PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) ||
+		(phy->req_line_speed == SPEED_10000)) {
+			DP(NETIF_MSG_LINK, "Advertising 10G\n");
+			/* Restart autoneg for 10G*/
+
+			bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
+				 0x3200);
+	} else
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD,
+				 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+				 1);
+
+	/* Save spirom version */
+	bnx2x_save_848xx_spirom_version(phy, params);
+
+	phy->req_line_speed = tmp_req_line_speed;
+
+	return 0;
+}
+
+static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	/* Restore normal power mode*/
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+
+	/* HW reset */
+	bnx2x_ext_phy_hw_reset(bp, params->port);
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+	return bnx2x_848xx_cmn_config_init(phy, params, vars);
+}
+
+
+#define PHY84833_HDSHK_WAIT 300
+static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+				   struct link_params *params,
+				   struct link_vars *vars)
+{
+	u32 idx;
+	u32 pair_swap;
+	u16 val;
+	u16 data;
+	struct bnx2x *bp = params->bp;
+	/* Do pair swap */
+
+	/* Check for configuration. */
+	pair_swap = REG_RD(bp, params->shmem_base +
+			   offsetof(struct shmem_region,
+			dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
+		PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
+
+	if (pair_swap == 0)
+		return 0;
+
+	data = (u16)pair_swap;
+
+	/* Write CMD_OPEN_OVERRIDE to STATUS reg */
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_84833_TOP_CFG_SCRATCH_REG2,
+			PHY84833_CMD_OPEN_OVERRIDE);
+	for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+		if (val == PHY84833_CMD_OPEN_FOR_CMDS)
+			break;
+		msleep(1);
+	}
+	if (idx >= PHY84833_HDSHK_WAIT) {
+		DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n");
+		return -EINVAL;
+	}
+
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_84833_TOP_CFG_SCRATCH_REG4,
+			data);
+	/* Issue pair swap command */
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_84833_TOP_CFG_SCRATCH_REG0,
+			PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE);
+	for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+		if ((val == PHY84833_CMD_COMPLETE_PASS) ||
+			(val == PHY84833_CMD_COMPLETE_ERROR))
+			break;
+		msleep(1);
+	}
+	if ((idx >= PHY84833_HDSHK_WAIT) ||
+		(val == PHY84833_CMD_COMPLETE_ERROR)) {
+		DP(NETIF_MSG_LINK, "Pairswap: override failed.\n");
+		return -EINVAL;
+	}
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_84833_TOP_CFG_SCRATCH_REG2,
+			PHY84833_CMD_CLEAR_COMPLETE);
+	DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data);
+	return 0;
+}
+
+
+static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
+				      u32 shmem_base_path[],
+				      u32 chip_id)
+{
+	u32 reset_pin[2];
+	u32 idx;
+	u8 reset_gpios;
+	if (CHIP_IS_E3(bp)) {
+		/* Assume that these will be GPIOs, not EPIOs. */
+		for (idx = 0; idx < 2; idx++) {
+			/* Map config param to register bit. */
+			reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
+				offsetof(struct shmem_region,
+				dev_info.port_hw_config[0].e3_cmn_pin_cfg));
+			reset_pin[idx] = (reset_pin[idx] &
+				PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+				PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+			reset_pin[idx] -= PIN_CFG_GPIO0_P0;
+			reset_pin[idx] = (1 << reset_pin[idx]);
+		}
+		reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
+	} else {
+		/* E2, look from diff place of shmem. */
+		for (idx = 0; idx < 2; idx++) {
+			reset_pin[idx] = REG_RD(bp, shmem_base_path[idx] +
+				offsetof(struct shmem_region,
+				dev_info.port_hw_config[0].default_cfg));
+			reset_pin[idx] &= PORT_HW_CFG_EXT_PHY_GPIO_RST_MASK;
+			reset_pin[idx] -= PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0;
+			reset_pin[idx] >>= PORT_HW_CFG_EXT_PHY_GPIO_RST_SHIFT;
+			reset_pin[idx] = (1 << reset_pin[idx]);
+		}
+		reset_gpios = (u8)(reset_pin[0] | reset_pin[1]);
+	}
+
+	return reset_gpios;
+}
+
+static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
+				struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u8 reset_gpios;
+	u32 other_shmem_base_addr = REG_RD(bp, params->shmem2_base +
+				offsetof(struct shmem2_region,
+				other_shmem_base_addr));
+
+	u32 shmem_base_path[2];
+	shmem_base_path[0] = params->shmem_base;
+	shmem_base_path[1] = other_shmem_base_addr;
+
+	reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path,
+						  params->chip_id);
+
+	bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+	udelay(10);
+	DP(NETIF_MSG_LINK, "84833 hw reset on pin values 0x%x\n",
+		reset_gpios);
+
+	return 0;
+}
+
+static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
+						u32 shmem_base_path[],
+						u32 chip_id)
+{
+	u8 reset_gpios;
+
+	reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
+
+	bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+	udelay(10);
+	bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+	msleep(800);
+	DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
+		reset_gpios);
+
+	return 0;
+}
+
+#define PHY84833_CONSTANT_LATENCY 1193
+static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
+				   struct link_params *params,
+				   struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port, initialize = 1;
+	u16 val;
+	u16 temp;
+	u32 actual_phy_selection, cms_enable, idx;
+	int rc = 0;
+
+	msleep(1);
+
+	if (!(CHIP_IS_E1(bp)))
+		port = BP_PATH(bp);
+	else
+		port = params->port;
+
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+			       MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+			       port);
+	} else {
+		/* MDIO reset */
+		bnx2x_cl45_write(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_CTRL, 0x8000);
+		/* Bring PHY out of super isolate mode */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+		val &= ~MDIO_84833_SUPER_ISOLATE;
+		bnx2x_cl45_write(bp, phy,
+				MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+	}
+
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	/* Wait for GPHY to come out of reset */
+	msleep(50);
+
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+		bnx2x_84833_pair_swap_cfg(phy, params, vars);
+
+	/*
+	 * BCM84823 requires that XGXS links up first @ 10G for normal behavior
+	 */
+	temp = vars->line_speed;
+	vars->line_speed = SPEED_10000;
+	bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
+	bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
+	vars->line_speed = temp;
+
+	/* Set dual-media configuration according to configuration */
+
+	bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+			MDIO_CTL_REG_84823_MEDIA, &val);
+	val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+		 MDIO_CTL_REG_84823_MEDIA_LINE_MASK |
+		 MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN |
+		 MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK |
+		 MDIO_CTL_REG_84823_MEDIA_FIBER_1G);
+
+	if (CHIP_IS_E3(bp)) {
+		val &= ~(MDIO_CTL_REG_84823_MEDIA_MAC_MASK |
+			 MDIO_CTL_REG_84823_MEDIA_LINE_MASK);
+	} else {
+		val |= (MDIO_CTL_REG_84823_CTRL_MAC_XFI |
+			MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L);
+	}
+
+	actual_phy_selection = bnx2x_phy_selection(params);
+
+	switch (actual_phy_selection) {
+	case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
+		/* Do nothing. Essentially this is like the priority copper */
+		break;
+	case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+		val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER;
+		break;
+	case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+		val |= MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER;
+		break;
+	case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+		/* Do nothing here. The first PHY won't be initialized at all */
+		break;
+	case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+		val |= MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN;
+		initialize = 0;
+		break;
+	}
+	if (params->phy[EXT_PHY2].req_line_speed == SPEED_1000)
+		val |= MDIO_CTL_REG_84823_MEDIA_FIBER_1G;
+
+	bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			 MDIO_CTL_REG_84823_MEDIA, val);
+	DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
+		   params->multi_phy_config, val);
+
+	/* AutogrEEEn */
+	if (params->feature_config_flags &
+		FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
+		/* Ensure that f/w is ready */
+		for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+			bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+					MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+			if (val == PHY84833_CMD_OPEN_FOR_CMDS)
+				break;
+			usleep_range(1000, 1000);
+		}
+		if (idx >= PHY84833_HDSHK_WAIT) {
+			DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n");
+			return -EINVAL;
+		}
+
+		/* Select EEE mode */
+		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_SCRATCH_REG3,
+				0x2);
+
+		/* Set Idle and Latency */
+		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_SCRATCH_REG4,
+				PHY84833_CONSTANT_LATENCY + 1);
+
+		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_DATA3_REG,
+				PHY84833_CONSTANT_LATENCY + 1);
+
+		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_DATA4_REG,
+				PHY84833_CONSTANT_LATENCY);
+
+		/* Send EEE instruction to command register */
+		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_84833_TOP_CFG_SCRATCH_REG0,
+				PHY84833_DIAG_CMD_SET_EEE_MODE);
+
+		/* Ensure that the command has completed */
+		for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+			bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+					MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
+			if ((val == PHY84833_CMD_COMPLETE_PASS) ||
+				(val == PHY84833_CMD_COMPLETE_ERROR))
+				break;
+			usleep_range(1000, 1000);
+		}
+		if ((idx >= PHY84833_HDSHK_WAIT) ||
+			(val == PHY84833_CMD_COMPLETE_ERROR)) {
+			DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n");
+			return -EINVAL;
+		}
+
+		/* Reset command handler */
+		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+			    MDIO_84833_TOP_CFG_SCRATCH_REG2,
+			    PHY84833_CMD_CLEAR_COMPLETE);
+	}
+
+	if (initialize)
+		rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
+	else
+		bnx2x_save_848xx_spirom_version(phy, params);
+	/* 84833 PHY has a better feature and doesn't need to support this. */
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+		cms_enable = REG_RD(bp, params->shmem_base +
+			offsetof(struct shmem_region,
+			dev_info.port_hw_config[params->port].default_cfg)) &
+			PORT_HW_CFG_ENABLE_CMS_MASK;
+
+		bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+				MDIO_CTL_REG_84823_USER_CTRL_REG, &val);
+		if (cms_enable)
+			val |= MDIO_CTL_REG_84823_USER_CTRL_CMS;
+		else
+			val &= ~MDIO_CTL_REG_84823_USER_CTRL_CMS;
+		bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+				 MDIO_CTL_REG_84823_USER_CTRL_REG, val);
+	}
+
+	return rc;
+}
+
+static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val, val1, val2;
+	u8 link_up = 0;
+
+
+	/* Check 10G-BaseT link status */
+	/* Check PMD signal ok */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, 0xFFFA, &val1);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_PMD_SIGNAL,
+			&val2);
+	DP(NETIF_MSG_LINK, "BCM848xx: PMD_SIGNAL 1.a811 = 0x%x\n", val2);
+
+	/* Check link 10G */
+	if (val2 & (1<<11)) {
+		vars->line_speed = SPEED_10000;
+		vars->duplex = DUPLEX_FULL;
+		link_up = 1;
+		bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+	} else { /* Check Legacy speed link */
+		u16 legacy_status, legacy_speed;
+
+		/* Enable expansion register 0x42 (Operation mode status) */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD,
+				 MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf42);
+
+		/* Get legacy speed operation status */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_8481_EXPANSION_REG_RD_RW,
+				&legacy_status);
+
+		DP(NETIF_MSG_LINK, "Legacy speed status"
+			     " = 0x%x\n", legacy_status);
+		link_up = ((legacy_status & (1<<11)) == (1<<11));
+		if (link_up) {
+			legacy_speed = (legacy_status & (3<<9));
+			if (legacy_speed == (0<<9))
+				vars->line_speed = SPEED_10;
+			else if (legacy_speed == (1<<9))
+				vars->line_speed = SPEED_100;
+			else if (legacy_speed == (2<<9))
+				vars->line_speed = SPEED_1000;
+			else /* Should not happen */
+				vars->line_speed = 0;
+
+			if (legacy_status & (1<<8))
+				vars->duplex = DUPLEX_FULL;
+			else
+				vars->duplex = DUPLEX_HALF;
+
+			DP(NETIF_MSG_LINK, "Link is up in %dMbps,"
+				   " is_duplex_full= %d\n", vars->line_speed,
+				   (vars->duplex == DUPLEX_FULL));
+			/* Check legacy speed AN resolution */
+			bnx2x_cl45_read(bp, phy,
+					MDIO_AN_DEVAD,
+					MDIO_AN_REG_8481_LEGACY_MII_STATUS,
+					&val);
+			if (val & (1<<5))
+				vars->link_status |=
+					LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+			bnx2x_cl45_read(bp, phy,
+					MDIO_AN_DEVAD,
+					MDIO_AN_REG_8481_LEGACY_AN_EXPANSION,
+					&val);
+			if ((val & (1<<0)) == 0)
+				vars->link_status |=
+					LINK_STATUS_PARALLEL_DETECTION_USED;
+		}
+	}
+	if (link_up) {
+		DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
+			   vars->line_speed);
+		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+	}
+
+	return link_up;
+}
+
+
+static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
+{
+	int status = 0;
+	u32 spirom_ver;
+	spirom_ver = ((raw_ver & 0xF80) >> 7) << 16 | (raw_ver & 0x7F);
+	status = bnx2x_format_ver(spirom_ver, str, len);
+	return status;
+}
+
+static void bnx2x_8481_hw_reset(struct bnx2x_phy *phy,
+				struct link_params *params)
+{
+	bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, 0);
+	bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, 1);
+}
+
+static void bnx2x_8481_link_reset(struct bnx2x_phy *phy,
+					struct link_params *params)
+{
+	bnx2x_cl45_write(params->bp, phy,
+			 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0x0000);
+	bnx2x_cl45_write(params->bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1);
+}
+
+static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
+				   struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port;
+	u16 val16;
+
+	if (!(CHIP_IS_E1(bp)))
+		port = BP_PATH(bp);
+	else
+		port = params->port;
+
+	if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
+		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
+			       MISC_REGISTERS_GPIO_OUTPUT_LOW,
+			       port);
+	} else {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_CTL_DEVAD,
+				0x400f, &val16);
+		bnx2x_cl45_write(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_CTRL, 0x800);
+	}
+}
+
+static void bnx2x_848xx_set_link_led(struct bnx2x_phy *phy,
+				     struct link_params *params, u8 mode)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val;
+	u8 port;
+
+	if (!(CHIP_IS_E1(bp)))
+		port = BP_PATH(bp);
+	else
+		port = params->port;
+
+	switch (mode) {
+	case LED_MODE_OFF:
+
+		DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OFF\n", port);
+
+		if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+		    SHARED_HW_CFG_LED_EXTPHY1) {
+
+			/* Set LED masks */
+			bnx2x_cl45_write(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LED1_MASK,
+					0x0);
+
+			bnx2x_cl45_write(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LED2_MASK,
+					0x0);
+
+			bnx2x_cl45_write(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LED3_MASK,
+					0x0);
+
+			bnx2x_cl45_write(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LED5_MASK,
+					0x0);
+
+		} else {
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x0);
+		}
+		break;
+	case LED_MODE_FRONT_PANEL_OFF:
+
+		DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE FRONT PANEL OFF\n",
+		   port);
+
+		if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+		    SHARED_HW_CFG_LED_EXTPHY1) {
+
+			/* Set LED masks */
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x0);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED2_MASK,
+					 0x0);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED3_MASK,
+					 0x0);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED5_MASK,
+					 0x20);
+
+		} else {
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x0);
+		}
+		break;
+	case LED_MODE_ON:
+
+		DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE ON\n", port);
+
+		if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+		    SHARED_HW_CFG_LED_EXTPHY1) {
+			/* Set control reg */
+			bnx2x_cl45_read(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LINK_SIGNAL,
+					&val);
+			val &= 0x8000;
+			val |= 0x2492;
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LINK_SIGNAL,
+					 val);
+
+			/* Set LED masks */
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x0);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED2_MASK,
+					 0x20);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED3_MASK,
+					 0x20);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED5_MASK,
+					 0x0);
+		} else {
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x20);
+		}
+		break;
+
+	case LED_MODE_OPER:
+
+		DP(NETIF_MSG_LINK, "Port 0x%x: LED MODE OPER\n", port);
+
+		if ((params->hw_led_mode << SHARED_HW_CFG_LED_MODE_SHIFT) ==
+		    SHARED_HW_CFG_LED_EXTPHY1) {
+
+			/* Set control reg */
+			bnx2x_cl45_read(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LINK_SIGNAL,
+					&val);
+
+			if (!((val &
+			       MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK)
+			  >> MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT)) {
+				DP(NETIF_MSG_LINK, "Setting LINK_SIGNAL\n");
+				bnx2x_cl45_write(bp, phy,
+						 MDIO_PMA_DEVAD,
+						 MDIO_PMA_REG_8481_LINK_SIGNAL,
+						 0xa492);
+			}
+
+			/* Set LED masks */
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x10);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED2_MASK,
+					 0x80);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED3_MASK,
+					 0x98);
+
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED5_MASK,
+					 0x40);
+
+		} else {
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LED1_MASK,
+					 0x80);
+
+			/* Tell LED3 to blink on source */
+			bnx2x_cl45_read(bp, phy,
+					MDIO_PMA_DEVAD,
+					MDIO_PMA_REG_8481_LINK_SIGNAL,
+					&val);
+			val &= ~(7<<6);
+			val |= (1<<6); /* A83B[8:6]= 1 */
+			bnx2x_cl45_write(bp, phy,
+					 MDIO_PMA_DEVAD,
+					 MDIO_PMA_REG_8481_LINK_SIGNAL,
+					 val);
+		}
+		break;
+	}
+
+	/*
+	 * This is a workaround for E3+84833 until autoneg
+	 * restart is fixed in f/w
+	 */
+	if (CHIP_IS_E3(bp)) {
+		bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+				MDIO_WC_REG_GP2_STATUS_GP_2_1, &val);
+	}
+}
+
+/******************************************************************/
+/*			54618SE PHY SECTION			  */
+/******************************************************************/
+static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
+					       struct link_params *params,
+					       struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 port;
+	u16 autoneg_val, an_1000_val, an_10_100_val, fc_val, temp;
+	u32 cfg_pin;
+
+	DP(NETIF_MSG_LINK, "54618SE cfg init\n");
+	usleep_range(1000, 1000);
+
+	/* This works with E3 only, no need to check the chip
+	   before determining the port. */
+	port = params->port;
+
+	cfg_pin = (REG_RD(bp, params->shmem_base +
+			offsetof(struct shmem_region,
+			dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+			PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+			PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+	/* Drive pin high to bring the GPHY out of reset. */
+	bnx2x_set_cfg_pin(bp, cfg_pin, 1);
+
+	/* wait for GPHY to reset */
+	msleep(50);
+
+	/* reset phy */
+	bnx2x_cl22_write(bp, phy,
+			 MDIO_PMA_REG_CTRL, 0x8000);
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	/*wait for GPHY to reset */
+	msleep(50);
+
+	/* Configure LED4: set to INTR (0x6). */
+	/* Accessing shadow register 0xe. */
+	bnx2x_cl22_write(bp, phy,
+			MDIO_REG_GPHY_SHADOW,
+			MDIO_REG_GPHY_SHADOW_LED_SEL2);
+	bnx2x_cl22_read(bp, phy,
+			MDIO_REG_GPHY_SHADOW,
+			&temp);
+	temp &= ~(0xf << 4);
+	temp |= (0x6 << 4);
+	bnx2x_cl22_write(bp, phy,
+			MDIO_REG_GPHY_SHADOW,
+			MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+	/* Configure INTR based on link status change. */
+	bnx2x_cl22_write(bp, phy,
+			MDIO_REG_INTR_MASK,
+			~MDIO_REG_INTR_MASK_LINK_STATUS);
+
+	/* Flip the signal detect polarity (set 0x1c.0x1e[8]). */
+	bnx2x_cl22_write(bp, phy,
+			MDIO_REG_GPHY_SHADOW,
+			MDIO_REG_GPHY_SHADOW_AUTO_DET_MED);
+	bnx2x_cl22_read(bp, phy,
+			MDIO_REG_GPHY_SHADOW,
+			&temp);
+	temp |= MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD;
+	bnx2x_cl22_write(bp, phy,
+			MDIO_REG_GPHY_SHADOW,
+			MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+
+	/* Set up fc */
+	/* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
+	bnx2x_calc_ieee_aneg_adv(phy, params, &vars->ieee_fc);
+	fc_val = 0;
+	if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC) ==
+			MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC)
+		fc_val |= MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC;
+
+	if ((vars->ieee_fc & MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) ==
+			MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH)
+		fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE;
+
+	/* read all advertisement */
+	bnx2x_cl22_read(bp, phy,
+			0x09,
+			&an_1000_val);
+
+	bnx2x_cl22_read(bp, phy,
+			0x04,
+			&an_10_100_val);
+
+	bnx2x_cl22_read(bp, phy,
+			MDIO_PMA_REG_CTRL,
+			&autoneg_val);
+
+	/* Disable forced speed */
+	autoneg_val &= ~((1<<6) | (1<<8) | (1<<9) | (1<<12) | (1<<13));
+	an_10_100_val &= ~((1<<5) | (1<<6) | (1<<7) | (1<<8) | (1<<10) |
+			   (1<<11));
+
+	if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+			(phy->speed_cap_mask &
+			PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+			(phy->req_line_speed == SPEED_1000)) {
+		an_1000_val |= (1<<8);
+		autoneg_val |= (1<<9 | 1<<12);
+		if (phy->req_duplex == DUPLEX_FULL)
+			an_1000_val |= (1<<9);
+		DP(NETIF_MSG_LINK, "Advertising 1G\n");
+	} else
+		an_1000_val &= ~((1<<8) | (1<<9));
+
+	bnx2x_cl22_write(bp, phy,
+			0x09,
+			an_1000_val);
+	bnx2x_cl22_read(bp, phy,
+			0x09,
+			&an_1000_val);
+
+	/* set 100 speed advertisement */
+	if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+			(phy->speed_cap_mask &
+			(PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
+			PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
+		an_10_100_val |= (1<<7);
+		/* Enable autoneg and restart autoneg for legacy speeds */
+		autoneg_val |= (1<<9 | 1<<12);
+
+		if (phy->req_duplex == DUPLEX_FULL)
+			an_10_100_val |= (1<<8);
+		DP(NETIF_MSG_LINK, "Advertising 100M\n");
+	}
+
+	/* set 10 speed advertisement */
+	if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+			(phy->speed_cap_mask &
+			(PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
+			PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
+		an_10_100_val |= (1<<5);
+		autoneg_val |= (1<<9 | 1<<12);
+		if (phy->req_duplex == DUPLEX_FULL)
+			an_10_100_val |= (1<<6);
+		DP(NETIF_MSG_LINK, "Advertising 10M\n");
+	}
+
+	/* Only 10/100 are allowed to work in FORCE mode */
+	if (phy->req_line_speed == SPEED_100) {
+		autoneg_val |= (1<<13);
+		/* Enabled AUTO-MDIX when autoneg is disabled */
+		bnx2x_cl22_write(bp, phy,
+				0x18,
+				(1<<15 | 1<<9 | 7<<0));
+		DP(NETIF_MSG_LINK, "Setting 100M force\n");
+	}
+	if (phy->req_line_speed == SPEED_10) {
+		/* Enabled AUTO-MDIX when autoneg is disabled */
+		bnx2x_cl22_write(bp, phy,
+				0x18,
+				(1<<15 | 1<<9 | 7<<0));
+		DP(NETIF_MSG_LINK, "Setting 10M force\n");
+	}
+
+	/* Check if we should turn on Auto-GrEEEn */
+	bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &temp);
+	if (temp == MDIO_REG_GPHY_ID_54618SE) {
+		if (params->feature_config_flags &
+		    FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
+			temp = 6;
+			DP(NETIF_MSG_LINK, "Enabling Auto-GrEEEn\n");
+		} else {
+			temp = 0;
+			DP(NETIF_MSG_LINK, "Disabling Auto-GrEEEn\n");
+		}
+		bnx2x_cl22_write(bp, phy,
+				 MDIO_REG_GPHY_CL45_ADDR_REG, MDIO_AN_DEVAD);
+		bnx2x_cl22_write(bp, phy,
+				 MDIO_REG_GPHY_CL45_DATA_REG,
+				 MDIO_REG_GPHY_EEE_ADV);
+		bnx2x_cl22_write(bp, phy,
+				 MDIO_REG_GPHY_CL45_ADDR_REG,
+				 (0x1 << 14) | MDIO_AN_DEVAD);
+		bnx2x_cl22_write(bp, phy,
+				 MDIO_REG_GPHY_CL45_DATA_REG,
+				 temp);
+	}
+
+	bnx2x_cl22_write(bp, phy,
+			0x04,
+			an_10_100_val | fc_val);
+
+	if (phy->req_duplex == DUPLEX_FULL)
+		autoneg_val |= (1<<8);
+
+	bnx2x_cl22_write(bp, phy,
+			MDIO_PMA_REG_CTRL, autoneg_val);
+
+	return 0;
+}
+
+static void bnx2x_54618se_set_link_led(struct bnx2x_phy *phy,
+				       struct link_params *params, u8 mode)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "54618SE set link led (mode=%x)\n", mode);
+	switch (mode) {
+	case LED_MODE_FRONT_PANEL_OFF:
+	case LED_MODE_OFF:
+	case LED_MODE_OPER:
+	case LED_MODE_ON:
+	default:
+		break;
+	}
+	return;
+}
+
+static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
+				     struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u32 cfg_pin;
+	u8 port;
+
+	/*
+	 * In case of no EPIO routed to reset the GPHY, put it
+	 * in low power mode.
+	 */
+	bnx2x_cl22_write(bp, phy, MDIO_PMA_REG_CTRL, 0x800);
+	/*
+	 * This works with E3 only, no need to check the chip
+	 * before determining the port.
+	 */
+	port = params->port;
+	cfg_pin = (REG_RD(bp, params->shmem_base +
+			offsetof(struct shmem_region,
+			dev_info.port_hw_config[port].e3_cmn_pin_cfg)) &
+			PORT_HW_CFG_E3_PHY_RESET_MASK) >>
+			PORT_HW_CFG_E3_PHY_RESET_SHIFT;
+
+	/* Drive pin low to put GPHY in reset. */
+	bnx2x_set_cfg_pin(bp, cfg_pin, 0);
+}
+
+static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
+				    struct link_params *params,
+				    struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val;
+	u8 link_up = 0;
+	u16 legacy_status, legacy_speed;
+
+	/* Get speed operation status */
+	bnx2x_cl22_read(bp, phy,
+			0x19,
+			&legacy_status);
+	DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status);
+
+	/* Read status to clear the PHY interrupt. */
+	bnx2x_cl22_read(bp, phy,
+			MDIO_REG_INTR_STATUS,
+			&val);
+
+	link_up = ((legacy_status & (1<<2)) == (1<<2));
+
+	if (link_up) {
+		legacy_speed = (legacy_status & (7<<8));
+		if (legacy_speed == (7<<8)) {
+			vars->line_speed = SPEED_1000;
+			vars->duplex = DUPLEX_FULL;
+		} else if (legacy_speed == (6<<8)) {
+			vars->line_speed = SPEED_1000;
+			vars->duplex = DUPLEX_HALF;
+		} else if (legacy_speed == (5<<8)) {
+			vars->line_speed = SPEED_100;
+			vars->duplex = DUPLEX_FULL;
+		}
+		/* Omitting 100Base-T4 for now */
+		else if (legacy_speed == (3<<8)) {
+			vars->line_speed = SPEED_100;
+			vars->duplex = DUPLEX_HALF;
+		} else if (legacy_speed == (2<<8)) {
+			vars->line_speed = SPEED_10;
+			vars->duplex = DUPLEX_FULL;
+		} else if (legacy_speed == (1<<8)) {
+			vars->line_speed = SPEED_10;
+			vars->duplex = DUPLEX_HALF;
+		} else /* Should not happen */
+			vars->line_speed = 0;
+
+		DP(NETIF_MSG_LINK, "Link is up in %dMbps,"
+			   " is_duplex_full= %d\n", vars->line_speed,
+			   (vars->duplex == DUPLEX_FULL));
+
+		/* Check legacy speed AN resolution */
+		bnx2x_cl22_read(bp, phy,
+				0x01,
+				&val);
+		if (val & (1<<5))
+			vars->link_status |=
+				LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
+		bnx2x_cl22_read(bp, phy,
+				0x06,
+				&val);
+		if ((val & (1<<0)) == 0)
+			vars->link_status |=
+				LINK_STATUS_PARALLEL_DETECTION_USED;
+
+		DP(NETIF_MSG_LINK, "BCM54618SE: link speed is %d\n",
+			   vars->line_speed);
+
+		/* Report whether EEE is resolved. */
+		bnx2x_cl22_read(bp, phy, MDIO_REG_GPHY_PHYID_LSB, &val);
+		if (val == MDIO_REG_GPHY_ID_54618SE) {
+			if (vars->link_status &
+			    LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)
+				val = 0;
+			else {
+				bnx2x_cl22_write(bp, phy,
+					MDIO_REG_GPHY_CL45_ADDR_REG,
+					MDIO_AN_DEVAD);
+				bnx2x_cl22_write(bp, phy,
+					MDIO_REG_GPHY_CL45_DATA_REG,
+					MDIO_REG_GPHY_EEE_RESOLVED);
+				bnx2x_cl22_write(bp, phy,
+					MDIO_REG_GPHY_CL45_ADDR_REG,
+					(0x1 << 14) | MDIO_AN_DEVAD);
+				bnx2x_cl22_read(bp, phy,
+					MDIO_REG_GPHY_CL45_DATA_REG,
+					&val);
+			}
+			DP(NETIF_MSG_LINK, "EEE resolution: 0x%x\n", val);
+		}
+
+		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+	}
+	return link_up;
+}
+
+static void bnx2x_54618se_config_loopback(struct bnx2x_phy *phy,
+					  struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	u16 val;
+	u32 umac_base = params->port ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
+
+	DP(NETIF_MSG_LINK, "2PMA/PMD ext_phy_loopback: 54618se\n");
+
+	/* Enable master/slave manual mmode and set to master */
+	/* mii write 9 [bits set 11 12] */
+	bnx2x_cl22_write(bp, phy, 0x09, 3<<11);
+
+	/* forced 1G and disable autoneg */
+	/* set val [mii read 0] */
+	/* set val [expr $val & [bits clear 6 12 13]] */
+	/* set val [expr $val | [bits set 6 8]] */
+	/* mii write 0 $val */
+	bnx2x_cl22_read(bp, phy, 0x00, &val);
+	val &= ~((1<<6) | (1<<12) | (1<<13));
+	val |= (1<<6) | (1<<8);
+	bnx2x_cl22_write(bp, phy, 0x00, val);
+
+	/* Set external loopback and Tx using 6dB coding */
+	/* mii write 0x18 7 */
+	/* set val [mii read 0x18] */
+	/* mii write 0x18 [expr $val | [bits set 10 15]] */
+	bnx2x_cl22_write(bp, phy, 0x18, 7);
+	bnx2x_cl22_read(bp, phy, 0x18, &val);
+	bnx2x_cl22_write(bp, phy, 0x18, val | (1<<10) | (1<<15));
+
+	/* This register opens the gate for the UMAC despite its name */
+	REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + params->port*4, 1);
+
+	/*
+	 * Maximum Frame Length (RW). Defines a 14-Bit maximum frame
+	 * length used by the MAC receive logic to check frames.
+	 */
+	REG_WR(bp, umac_base + UMAC_REG_MAXFR, 0x2710);
+}
+
+/******************************************************************/
+/*			SFX7101 PHY SECTION			  */
+/******************************************************************/
+static void bnx2x_7101_config_loopback(struct bnx2x_phy *phy,
+				       struct link_params *params)
+{
+	struct bnx2x *bp = params->bp;
+	/* SFX7101_XGXS_TEST1 */
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_XS_DEVAD, MDIO_XS_SFX7101_XGXS_TEST1, 0x100);
+}
+
+static int bnx2x_7101_config_init(struct bnx2x_phy *phy,
+				  struct link_params *params,
+				  struct link_vars *vars)
+{
+	u16 fw_ver1, fw_ver2, val;
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Setting the SFX7101 LASI indication\n");
+
+	/* Restore normal power mode*/
+	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_HIGH, params->port);
+	/* HW reset */
+	bnx2x_ext_phy_hw_reset(bp, params->port);
+	bnx2x_wait_reset_complete(bp, phy, params);
+
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_LASI_CTRL, 0x1);
+	DP(NETIF_MSG_LINK, "Setting the SFX7101 LED to blink on traffic\n");
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD, MDIO_PMA_REG_7107_LED_CNTL, (1<<3));
+
+	bnx2x_ext_phy_set_pause(params, phy, vars);
+	/* Restart autoneg */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, &val);
+	val |= 0x200;
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, val);
+
+	/* Save spirom version */
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER1, &fw_ver1);
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_7101_VER2, &fw_ver2);
+	bnx2x_save_spirom_version(bp, params->port,
+				  (u32)(fw_ver1<<16 | fw_ver2), phy->ver_addr);
+	return 0;
+}
+
+static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
+				 struct link_params *params,
+				 struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u8 link_up;
+	u16 val1, val2;
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val2);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1);
+	DP(NETIF_MSG_LINK, "10G-base-T LASI status 0x%x->0x%x\n",
+		   val2, val1);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val2);
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD, MDIO_PMA_REG_STATUS, &val1);
+	DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n",
+		   val2, val1);
+	link_up = ((val1 & 4) == 4);
+	/* if link is up print the AN outcome of the SFX7101 PHY */
+	if (link_up) {
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS,
+				&val2);
+		vars->line_speed = SPEED_10000;
+		vars->duplex = DUPLEX_FULL;
+		DP(NETIF_MSG_LINK, "SFX7101 AN status 0x%x->Master=%x\n",
+			   val2, (val2 & (1<<14)));
+		bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
+		bnx2x_ext_phy_resolve_fc(phy, params, vars);
+	}
+	return link_up;
+}
+
+static int bnx2x_7101_format_ver(u32 spirom_ver, u8 *str, u16 *len)
+{
+	if (*len < 5)
+		return -EINVAL;
+	str[0] = (spirom_ver & 0xFF);
+	str[1] = (spirom_ver & 0xFF00) >> 8;
+	str[2] = (spirom_ver & 0xFF0000) >> 16;
+	str[3] = (spirom_ver & 0xFF000000) >> 24;
+	str[4] = '\0';
+	*len -= 5;
+	return 0;
+}
+
+void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy)
+{
+	u16 val, cnt;
+
+	bnx2x_cl45_read(bp, phy,
+			MDIO_PMA_DEVAD,
+			MDIO_PMA_REG_7101_RESET, &val);
+
+	for (cnt = 0; cnt < 10; cnt++) {
+		msleep(50);
+		/* Writes a self-clearing reset */
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_7101_RESET,
+				 (val | (1<<15)));
+		/* Wait for clear */
+		bnx2x_cl45_read(bp, phy,
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_7101_RESET, &val);
+
+		if ((val & (1<<15)) == 0)
+			break;
+	}
+}
+
+static void bnx2x_7101_hw_reset(struct bnx2x_phy *phy,
+				struct link_params *params) {
+	/* Low power mode is controlled by GPIO 2 */
+	bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_2,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+	/* The PHY reset is controlled by GPIO 1 */
+	bnx2x_set_gpio(params->bp, MISC_REGISTERS_GPIO_1,
+		       MISC_REGISTERS_GPIO_OUTPUT_LOW, params->port);
+}
+
+static void bnx2x_7101_set_link_led(struct bnx2x_phy *phy,
+				    struct link_params *params, u8 mode)
+{
+	u16 val = 0;
+	struct bnx2x *bp = params->bp;
+	switch (mode) {
+	case LED_MODE_FRONT_PANEL_OFF:
+	case LED_MODE_OFF:
+		val = 2;
+		break;
+	case LED_MODE_ON:
+		val = 1;
+		break;
+	case LED_MODE_OPER:
+		val = 0;
+		break;
+	}
+	bnx2x_cl45_write(bp, phy,
+			 MDIO_PMA_DEVAD,
+			 MDIO_PMA_REG_7107_LINK_LED_CNTL,
+			 val);
+}
+
+/******************************************************************/
+/*			STATIC PHY DECLARATION			  */
+/******************************************************************/
+
+static struct bnx2x_phy phy_null = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN,
+	.addr		= 0,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_INIT_XGXS_FIRST,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= 0,
+	.media_type	= ETH_PHY_NOT_PRESENT,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)NULL,
+	.read_status	= (read_status_t)NULL,
+	.link_reset	= (link_reset_t)NULL,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)NULL,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_serdes = {
+	.type		= PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= 0,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10baseT_Half |
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_2500baseX_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_xgxs_config_init,
+	.read_status	= (read_status_t)bnx2x_link_settings_status,
+	.link_reset	= (link_reset_t)bnx2x_int_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)NULL,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_xgxs = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= 0,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10baseT_Half |
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_2500baseX_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_FIBRE |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_CX4,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_xgxs_config_init,
+	.read_status	= (read_status_t)bnx2x_link_settings_status,
+	.link_reset	= (link_reset_t)bnx2x_int_link_reset,
+	.config_loopback = (config_loopback_t)bnx2x_set_xgxs_loopback,
+	.format_fw_ver	= (format_fw_ver_t)NULL,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_warpcore = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= (FLAGS_HW_LOCK_REQUIRED |
+			   FLAGS_TX_ERROR_CHECK),
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10baseT_Half |
+			     SUPPORTED_10baseT_Full |
+			     SUPPORTED_100baseT_Half |
+			     SUPPORTED_100baseT_Full |
+			     SUPPORTED_1000baseT_Full |
+			     SUPPORTED_10000baseT_Full |
+			     SUPPORTED_20000baseKR2_Full |
+			     SUPPORTED_20000baseMLD2_Full |
+			     SUPPORTED_FIBRE |
+			     SUPPORTED_Autoneg |
+			     SUPPORTED_Pause |
+			     SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_UNSPECIFIED,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	/* req_duplex = */0,
+	/* rsrv = */0,
+	.config_init	= (config_init_t)bnx2x_warpcore_config_init,
+	.read_status	= (read_status_t)bnx2x_warpcore_read_status,
+	.link_reset	= (link_reset_t)bnx2x_warpcore_link_reset,
+	.config_loopback = (config_loopback_t)bnx2x_set_warpcore_loopback,
+	.format_fw_ver	= (format_fw_ver_t)NULL,
+	.hw_reset	= (hw_reset_t)bnx2x_warpcore_hw_reset,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
+
+static struct bnx2x_phy phy_7101 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_FAN_FAILURE_DET_REQ,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_7101_config_init,
+	.read_status	= (read_status_t)bnx2x_7101_read_status,
+	.link_reset	= (link_reset_t)bnx2x_common_ext_link_reset,
+	.config_loopback = (config_loopback_t)bnx2x_7101_config_loopback,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_7101_format_ver,
+	.hw_reset	= (hw_reset_t)bnx2x_7101_hw_reset,
+	.set_link_led	= (set_link_led_t)bnx2x_7101_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_8073 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_HW_LOCK_REQUIRED,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10000baseT_Full |
+			   SUPPORTED_2500baseX_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_FIBRE |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_KR,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_8073_config_init,
+	.read_status	= (read_status_t)bnx2x_8073_read_status,
+	.link_reset	= (link_reset_t)bnx2x_8073_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_format_ver,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_8705 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_INIT_XGXS_FIRST,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10000baseT_Full |
+			   SUPPORTED_FIBRE |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_XFP_FIBER,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_8705_config_init,
+	.read_status	= (read_status_t)bnx2x_8705_read_status,
+	.link_reset	= (link_reset_t)bnx2x_common_ext_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_null_format_ver,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+static struct bnx2x_phy phy_8706 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= (FLAGS_INIT_XGXS_FIRST |
+			   FLAGS_TX_ERROR_CHECK),
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10000baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_FIBRE |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_SFP_FIBER,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_8706_config_init,
+	.read_status	= (read_status_t)bnx2x_8706_read_status,
+	.link_reset	= (link_reset_t)bnx2x_common_ext_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_format_ver,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_8726 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= (FLAGS_HW_LOCK_REQUIRED |
+			   FLAGS_INIT_XGXS_FIRST |
+			   FLAGS_TX_ERROR_CHECK),
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10000baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_FIBRE |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_NOT_PRESENT,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_8726_config_init,
+	.read_status	= (read_status_t)bnx2x_8726_read_status,
+	.link_reset	= (link_reset_t)bnx2x_8726_link_reset,
+	.config_loopback = (config_loopback_t)bnx2x_8726_config_loopback,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_format_ver,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)NULL,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_8727 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= (FLAGS_FAN_FAILURE_DET_REQ |
+			   FLAGS_TX_ERROR_CHECK),
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10000baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_FIBRE |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_NOT_PRESENT,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_8727_config_init,
+	.read_status	= (read_status_t)bnx2x_8727_read_status,
+	.link_reset	= (link_reset_t)bnx2x_8727_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_format_ver,
+	.hw_reset	= (hw_reset_t)bnx2x_8727_hw_reset,
+	.set_link_led	= (set_link_led_t)bnx2x_8727_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)bnx2x_8727_specific_func
+};
+static struct bnx2x_phy phy_8481 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
+			  FLAGS_REARM_LATCH_SIGNAL,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10baseT_Half |
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_8481_config_init,
+	.read_status	= (read_status_t)bnx2x_848xx_read_status,
+	.link_reset	= (link_reset_t)bnx2x_8481_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_848xx_format_ver,
+	.hw_reset	= (hw_reset_t)bnx2x_8481_hw_reset,
+	.set_link_led	= (set_link_led_t)bnx2x_848xx_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_84823 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
+			  FLAGS_REARM_LATCH_SIGNAL,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10baseT_Half |
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_848x3_config_init,
+	.read_status	= (read_status_t)bnx2x_848xx_read_status,
+	.link_reset	= (link_reset_t)bnx2x_848x3_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_848xx_format_ver,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)bnx2x_848xx_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_84833 = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_FAN_FAILURE_DET_REQ |
+			    FLAGS_REARM_LATCH_SIGNAL,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_10000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	.req_duplex	= 0,
+	.rsrv		= 0,
+	.config_init	= (config_init_t)bnx2x_848x3_config_init,
+	.read_status	= (read_status_t)bnx2x_848xx_read_status,
+	.link_reset	= (link_reset_t)bnx2x_848x3_link_reset,
+	.config_loopback = (config_loopback_t)NULL,
+	.format_fw_ver	= (format_fw_ver_t)bnx2x_848xx_format_ver,
+	.hw_reset	= (hw_reset_t)bnx2x_84833_hw_reset_phy,
+	.set_link_led	= (set_link_led_t)bnx2x_848xx_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+
+static struct bnx2x_phy phy_54618se = {
+	.type		= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE,
+	.addr		= 0xff,
+	.def_md_devad	= 0,
+	.flags		= FLAGS_INIT_XGXS_FIRST,
+	.rx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.tx_preemphasis	= {0xffff, 0xffff, 0xffff, 0xffff},
+	.mdio_ctrl	= 0,
+	.supported	= (SUPPORTED_10baseT_Half |
+			   SUPPORTED_10baseT_Full |
+			   SUPPORTED_100baseT_Half |
+			   SUPPORTED_100baseT_Full |
+			   SUPPORTED_1000baseT_Full |
+			   SUPPORTED_TP |
+			   SUPPORTED_Autoneg |
+			   SUPPORTED_Pause |
+			   SUPPORTED_Asym_Pause),
+	.media_type	= ETH_PHY_BASE_T,
+	.ver_addr	= 0,
+	.req_flow_ctrl	= 0,
+	.req_line_speed	= 0,
+	.speed_cap_mask	= 0,
+	/* req_duplex = */0,
+	/* rsrv = */0,
+	.config_init	= (config_init_t)bnx2x_54618se_config_init,
+	.read_status	= (read_status_t)bnx2x_54618se_read_status,
+	.link_reset	= (link_reset_t)bnx2x_54618se_link_reset,
+	.config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
+	.format_fw_ver	= (format_fw_ver_t)NULL,
+	.hw_reset	= (hw_reset_t)NULL,
+	.set_link_led	= (set_link_led_t)bnx2x_54618se_set_link_led,
+	.phy_specific_func = (phy_specific_func_t)NULL
+};
+/*****************************************************************/
+/*                                                               */
+/* Populate the phy according. Main function: bnx2x_populate_phy   */
+/*                                                               */
+/*****************************************************************/
+
+static void bnx2x_populate_preemphasis(struct bnx2x *bp, u32 shmem_base,
+				     struct bnx2x_phy *phy, u8 port,
+				     u8 phy_index)
+{
+	/* Get the 4 lanes xgxs config rx and tx */
+	u32 rx = 0, tx = 0, i;
+	for (i = 0; i < 2; i++) {
+		/*
+		 * INT_PHY and EXT_PHY1 share the same value location in the
+		 * shmem. When num_phys is greater than 1, than this value
+		 * applies only to EXT_PHY1
+		 */
+		if (phy_index == INT_PHY || phy_index == EXT_PHY1) {
+			rx = REG_RD(bp, shmem_base +
+				    offsetof(struct shmem_region,
+			  dev_info.port_hw_config[port].xgxs_config_rx[i<<1]));
+
+			tx = REG_RD(bp, shmem_base +
+				    offsetof(struct shmem_region,
+			  dev_info.port_hw_config[port].xgxs_config_tx[i<<1]));
+		} else {
+			rx = REG_RD(bp, shmem_base +
+				    offsetof(struct shmem_region,
+			 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+
+			tx = REG_RD(bp, shmem_base +
+				    offsetof(struct shmem_region,
+			 dev_info.port_hw_config[port].xgxs_config2_rx[i<<1]));
+		}
+
+		phy->rx_preemphasis[i << 1] = ((rx>>16) & 0xffff);
+		phy->rx_preemphasis[(i << 1) + 1] = (rx & 0xffff);
+
+		phy->tx_preemphasis[i << 1] = ((tx>>16) & 0xffff);
+		phy->tx_preemphasis[(i << 1) + 1] = (tx & 0xffff);
+	}
+}
+
+static u32 bnx2x_get_ext_phy_config(struct bnx2x *bp, u32 shmem_base,
+				    u8 phy_index, u8 port)
+{
+	u32 ext_phy_config = 0;
+	switch (phy_index) {
+	case EXT_PHY1:
+		ext_phy_config = REG_RD(bp, shmem_base +
+					      offsetof(struct shmem_region,
+			dev_info.port_hw_config[port].external_phy_config));
+		break;
+	case EXT_PHY2:
+		ext_phy_config = REG_RD(bp, shmem_base +
+					      offsetof(struct shmem_region,
+			dev_info.port_hw_config[port].external_phy_config2));
+		break;
+	default:
+		DP(NETIF_MSG_LINK, "Invalid phy_index %d\n", phy_index);
+		return -EINVAL;
+	}
+
+	return ext_phy_config;
+}
+static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
+				  struct bnx2x_phy *phy)
+{
+	u32 phy_addr;
+	u32 chip_id;
+	u32 switch_cfg = (REG_RD(bp, shmem_base +
+				       offsetof(struct shmem_region,
+			dev_info.port_feature_config[port].link_config)) &
+			  PORT_FEATURE_CONNECTED_SWITCH_MASK);
+	chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
+	DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
+	if (USES_WARPCORE(bp)) {
+		u32 serdes_net_if;
+		phy_addr = REG_RD(bp,
+				  MISC_REG_WC0_CTRL_PHY_ADDR);
+		*phy = phy_warpcore;
+		if (REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR) == 0x3)
+			phy->flags |= FLAGS_4_PORT_MODE;
+		else
+			phy->flags &= ~FLAGS_4_PORT_MODE;
+			/* Check Dual mode */
+		serdes_net_if = (REG_RD(bp, shmem_base +
+					offsetof(struct shmem_region, dev_info.
+					port_hw_config[port].default_cfg)) &
+				 PORT_HW_CFG_NET_SERDES_IF_MASK);
+		/*
+		 * Set the appropriate supported and flags indications per
+		 * interface type of the chip
+		 */
+		switch (serdes_net_if) {
+		case PORT_HW_CFG_NET_SERDES_IF_SGMII:
+			phy->supported &= (SUPPORTED_10baseT_Half |
+					   SUPPORTED_10baseT_Full |
+					   SUPPORTED_100baseT_Half |
+					   SUPPORTED_100baseT_Full |
+					   SUPPORTED_1000baseT_Full |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Autoneg |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			phy->media_type = ETH_PHY_BASE_T;
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_XFI:
+			phy->media_type = ETH_PHY_XFP_FIBER;
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_SFI:
+			phy->supported &= (SUPPORTED_1000baseT_Full |
+					   SUPPORTED_10000baseT_Full |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			phy->media_type = ETH_PHY_SFP_FIBER;
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_KR:
+			phy->media_type = ETH_PHY_KR;
+			phy->supported &= (SUPPORTED_1000baseT_Full |
+					   SUPPORTED_10000baseT_Full |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Autoneg |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_DXGXS:
+			phy->media_type = ETH_PHY_KR;
+			phy->flags |= FLAGS_WC_DUAL_MODE;
+			phy->supported &= (SUPPORTED_20000baseMLD2_Full |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			break;
+		case PORT_HW_CFG_NET_SERDES_IF_KR2:
+			phy->media_type = ETH_PHY_KR;
+			phy->flags |= FLAGS_WC_DUAL_MODE;
+			phy->supported &= (SUPPORTED_20000baseKR2_Full |
+					   SUPPORTED_FIBRE |
+					   SUPPORTED_Pause |
+					   SUPPORTED_Asym_Pause);
+			break;
+		default:
+			DP(NETIF_MSG_LINK, "Unknown WC interface type 0x%x\n",
+				       serdes_net_if);
+			break;
+		}
+
+		/*
+		 * Enable MDC/MDIO work-around for E3 A0 since free running MDC
+		 * was not set as expected. For B0, ECO will be enabled so there
+		 * won't be an issue there
+		 */
+		if (CHIP_REV(bp) == CHIP_REV_Ax)
+			phy->flags |= FLAGS_MDC_MDIO_WA;
+		else
+			phy->flags |= FLAGS_MDC_MDIO_WA_B0;
+	} else {
+		switch (switch_cfg) {
+		case SWITCH_CFG_1G:
+			phy_addr = REG_RD(bp,
+					  NIG_REG_SERDES0_CTRL_PHY_ADDR +
+					  port * 0x10);
+			*phy = phy_serdes;
+			break;
+		case SWITCH_CFG_10G:
+			phy_addr = REG_RD(bp,
+					  NIG_REG_XGXS0_CTRL_PHY_ADDR +
+					  port * 0x18);
+			*phy = phy_xgxs;
+			break;
+		default:
+			DP(NETIF_MSG_LINK, "Invalid switch_cfg\n");
+			return -EINVAL;
+		}
+	}
+	phy->addr = (u8)phy_addr;
+	phy->mdio_ctrl = bnx2x_get_emac_base(bp,
+					    SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH,
+					    port);
+	if (CHIP_IS_E2(bp))
+		phy->def_md_devad = E2_DEFAULT_PHY_DEV_ADDR;
+	else
+		phy->def_md_devad = DEFAULT_PHY_DEV_ADDR;
+
+	DP(NETIF_MSG_LINK, "Internal phy port=%d, addr=0x%x, mdio_ctl=0x%x\n",
+		   port, phy->addr, phy->mdio_ctrl);
+
+	bnx2x_populate_preemphasis(bp, shmem_base, phy, port, INT_PHY);
+	return 0;
+}
+
+static int bnx2x_populate_ext_phy(struct bnx2x *bp,
+				  u8 phy_index,
+				  u32 shmem_base,
+				  u32 shmem2_base,
+				  u8 port,
+				  struct bnx2x_phy *phy)
+{
+	u32 ext_phy_config, phy_type, config2;
+	u32 mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH;
+	ext_phy_config = bnx2x_get_ext_phy_config(bp, shmem_base,
+						  phy_index, port);
+	phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+	/* Select the phy type */
+	switch (phy_type) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+		mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_SWAPPED;
+		*phy = phy_8073;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
+		*phy = phy_8705;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
+		*phy = phy_8706;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+		mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+		*phy = phy_8726;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
+		/* BCM8727_NOC => BCM8727 no over current */
+		mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+		*phy = phy_8727;
+		phy->flags |= FLAGS_NOC;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+		mdc_mdio_access = SHARED_HW_CFG_MDC_MDIO_ACCESS1_EMAC1;
+		*phy = phy_8727;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
+		*phy = phy_8481;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
+		*phy = phy_84823;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+		*phy = phy_84833;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE:
+		*phy = phy_54618se;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
+		*phy = phy_7101;
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+		*phy = phy_null;
+		return -EINVAL;
+	default:
+		*phy = phy_null;
+		return 0;
+	}
+
+	phy->addr = XGXS_EXT_PHY_ADDR(ext_phy_config);
+	bnx2x_populate_preemphasis(bp, shmem_base, phy, port, phy_index);
+
+	/*
+	 * The shmem address of the phy version is located on different
+	 * structures. In case this structure is too old, do not set
+	 * the address
+	 */
+	config2 = REG_RD(bp, shmem_base + offsetof(struct shmem_region,
+					dev_info.shared_hw_config.config2));
+	if (phy_index == EXT_PHY1) {
+		phy->ver_addr = shmem_base + offsetof(struct shmem_region,
+				port_mb[port].ext_phy_fw_version);
+
+		/* Check specific mdc mdio settings */
+		if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK)
+			mdc_mdio_access = config2 &
+			SHARED_HW_CFG_MDC_MDIO_ACCESS1_MASK;
+	} else {
+		u32 size = REG_RD(bp, shmem2_base);
+
+		if (size >
+		    offsetof(struct shmem2_region, ext_phy_fw_version2)) {
+			phy->ver_addr = shmem2_base +
+			    offsetof(struct shmem2_region,
+				     ext_phy_fw_version2[port]);
+		}
+		/* Check specific mdc mdio settings */
+		if (config2 & SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK)
+			mdc_mdio_access = (config2 &
+			SHARED_HW_CFG_MDC_MDIO_ACCESS2_MASK) >>
+			(SHARED_HW_CFG_MDC_MDIO_ACCESS2_SHIFT -
+			 SHARED_HW_CFG_MDC_MDIO_ACCESS1_SHIFT);
+	}
+	phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
+
+	/*
+	 * In case mdc/mdio_access of the external phy is different than the
+	 * mdc/mdio access of the XGXS, a HW lock must be taken in each access
+	 * to prevent one port interfere with another port's CL45 operations.
+	 */
+	if (mdc_mdio_access != SHARED_HW_CFG_MDC_MDIO_ACCESS1_BOTH)
+		phy->flags |= FLAGS_HW_LOCK_REQUIRED;
+	DP(NETIF_MSG_LINK, "phy_type 0x%x port %d found in index %d\n",
+		   phy_type, port, phy_index);
+	DP(NETIF_MSG_LINK, "             addr=0x%x, mdio_ctl=0x%x\n",
+		   phy->addr, phy->mdio_ctrl);
+	return 0;
+}
+
+static int bnx2x_populate_phy(struct bnx2x *bp, u8 phy_index, u32 shmem_base,
+			      u32 shmem2_base, u8 port, struct bnx2x_phy *phy)
+{
+	int status = 0;
+	phy->type = PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN;
+	if (phy_index == INT_PHY)
+		return bnx2x_populate_int_phy(bp, shmem_base, port, phy);
+	status = bnx2x_populate_ext_phy(bp, phy_index, shmem_base, shmem2_base,
+					port, phy);
+	return status;
+}
+
+static void bnx2x_phy_def_cfg(struct link_params *params,
+			      struct bnx2x_phy *phy,
+			      u8 phy_index)
+{
+	struct bnx2x *bp = params->bp;
+	u32 link_config;
+	/* Populate the default phy configuration for MF mode */
+	if (phy_index == EXT_PHY2) {
+		link_config = REG_RD(bp, params->shmem_base +
+				     offsetof(struct shmem_region, dev_info.
+			port_feature_config[params->port].link_config2));
+		phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
+					     offsetof(struct shmem_region,
+						      dev_info.
+			port_hw_config[params->port].speed_capability_mask2));
+	} else {
+		link_config = REG_RD(bp, params->shmem_base +
+				     offsetof(struct shmem_region, dev_info.
+				port_feature_config[params->port].link_config));
+		phy->speed_cap_mask = REG_RD(bp, params->shmem_base +
+					     offsetof(struct shmem_region,
+						      dev_info.
+			port_hw_config[params->port].speed_capability_mask));
+	}
+	DP(NETIF_MSG_LINK, "Default config phy idx %x cfg 0x%x speed_cap_mask"
+		       " 0x%x\n", phy_index, link_config, phy->speed_cap_mask);
+
+	phy->req_duplex = DUPLEX_FULL;
+	switch (link_config  & PORT_FEATURE_LINK_SPEED_MASK) {
+	case PORT_FEATURE_LINK_SPEED_10M_HALF:
+		phy->req_duplex = DUPLEX_HALF;
+	case PORT_FEATURE_LINK_SPEED_10M_FULL:
+		phy->req_line_speed = SPEED_10;
+		break;
+	case PORT_FEATURE_LINK_SPEED_100M_HALF:
+		phy->req_duplex = DUPLEX_HALF;
+	case PORT_FEATURE_LINK_SPEED_100M_FULL:
+		phy->req_line_speed = SPEED_100;
+		break;
+	case PORT_FEATURE_LINK_SPEED_1G:
+		phy->req_line_speed = SPEED_1000;
+		break;
+	case PORT_FEATURE_LINK_SPEED_2_5G:
+		phy->req_line_speed = SPEED_2500;
+		break;
+	case PORT_FEATURE_LINK_SPEED_10G_CX4:
+		phy->req_line_speed = SPEED_10000;
+		break;
+	default:
+		phy->req_line_speed = SPEED_AUTO_NEG;
+		break;
+	}
+
+	switch (link_config  & PORT_FEATURE_FLOW_CONTROL_MASK) {
+	case PORT_FEATURE_FLOW_CONTROL_AUTO:
+		phy->req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
+		break;
+	case PORT_FEATURE_FLOW_CONTROL_TX:
+		phy->req_flow_ctrl = BNX2X_FLOW_CTRL_TX;
+		break;
+	case PORT_FEATURE_FLOW_CONTROL_RX:
+		phy->req_flow_ctrl = BNX2X_FLOW_CTRL_RX;
+		break;
+	case PORT_FEATURE_FLOW_CONTROL_BOTH:
+		phy->req_flow_ctrl = BNX2X_FLOW_CTRL_BOTH;
+		break;
+	default:
+		phy->req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+		break;
+	}
+}
+
+u32 bnx2x_phy_selection(struct link_params *params)
+{
+	u32 phy_config_swapped, prio_cfg;
+	u32 return_cfg = PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT;
+
+	phy_config_swapped = params->multi_phy_config &
+		PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+	prio_cfg = params->multi_phy_config &
+			PORT_HW_CFG_PHY_SELECTION_MASK;
+
+	if (phy_config_swapped) {
+		switch (prio_cfg) {
+		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
+		     return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY;
+		     break;
+		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
+		     return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY;
+		     break;
+		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
+		     return_cfg = PORT_HW_CFG_PHY_SELECTION_FIRST_PHY;
+		     break;
+		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
+		     return_cfg = PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
+		     break;
+		}
+	} else
+		return_cfg = prio_cfg;
+
+	return return_cfg;
+}
+
+
+int bnx2x_phy_probe(struct link_params *params)
+{
+	u8 phy_index, actual_phy_idx, link_cfg_idx;
+	u32 phy_config_swapped, sync_offset, media_types;
+	struct bnx2x *bp = params->bp;
+	struct bnx2x_phy *phy;
+	params->num_phys = 0;
+	DP(NETIF_MSG_LINK, "Begin phy probe\n");
+	phy_config_swapped = params->multi_phy_config &
+		PORT_HW_CFG_PHY_SWAPPED_ENABLED;
+
+	for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+	      phy_index++) {
+		link_cfg_idx = LINK_CONFIG_IDX(phy_index);
+		actual_phy_idx = phy_index;
+		if (phy_config_swapped) {
+			if (phy_index == EXT_PHY1)
+				actual_phy_idx = EXT_PHY2;
+			else if (phy_index == EXT_PHY2)
+				actual_phy_idx = EXT_PHY1;
+		}
+		DP(NETIF_MSG_LINK, "phy_config_swapped %x, phy_index %x,"
+			       " actual_phy_idx %x\n", phy_config_swapped,
+			   phy_index, actual_phy_idx);
+		phy = &params->phy[actual_phy_idx];
+		if (bnx2x_populate_phy(bp, phy_index, params->shmem_base,
+				       params->shmem2_base, params->port,
+				       phy) != 0) {
+			params->num_phys = 0;
+			DP(NETIF_MSG_LINK, "phy probe failed in phy index %d\n",
+				   phy_index);
+			for (phy_index = INT_PHY;
+			      phy_index < MAX_PHYS;
+			      phy_index++)
+				*phy = phy_null;
+			return -EINVAL;
+		}
+		if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)
+			break;
+
+		sync_offset = params->shmem_base +
+			offsetof(struct shmem_region,
+			dev_info.port_hw_config[params->port].media_type);
+		media_types = REG_RD(bp, sync_offset);
+
+		/*
+		 * Update media type for non-PMF sync only for the first time
+		 * In case the media type changes afterwards, it will be updated
+		 * using the update_status function
+		 */
+		if ((media_types & (PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK <<
+				    (PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+				     actual_phy_idx))) == 0) {
+			media_types |= ((phy->media_type &
+					PORT_HW_CFG_MEDIA_TYPE_PHY0_MASK) <<
+				(PORT_HW_CFG_MEDIA_TYPE_PHY1_SHIFT *
+				 actual_phy_idx));
+		}
+		REG_WR(bp, sync_offset, media_types);
+
+		bnx2x_phy_def_cfg(params, phy, phy_index);
+		params->num_phys++;
+	}
+
+	DP(NETIF_MSG_LINK, "End phy probe. #phys found %x\n", params->num_phys);
+	return 0;
+}
+
+void bnx2x_init_bmac_loopback(struct link_params *params,
+			      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+		vars->link_up = 1;
+		vars->line_speed = SPEED_10000;
+		vars->duplex = DUPLEX_FULL;
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+		vars->mac_type = MAC_TYPE_BMAC;
+
+		vars->phy_flags = PHY_XGXS_FLAG;
+
+		bnx2x_xgxs_deassert(params);
+
+		/* set bmac loopback */
+		bnx2x_bmac_enable(params, vars, 1);
+
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_emac_loopback(struct link_params *params,
+			      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+		vars->link_up = 1;
+		vars->line_speed = SPEED_1000;
+		vars->duplex = DUPLEX_FULL;
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+		vars->mac_type = MAC_TYPE_EMAC;
+
+		vars->phy_flags = PHY_XGXS_FLAG;
+
+		bnx2x_xgxs_deassert(params);
+		/* set bmac loopback */
+		bnx2x_emac_enable(params, vars, 1);
+		bnx2x_emac_program(params, vars);
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_xmac_loopback(struct link_params *params,
+			      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	vars->link_up = 1;
+	if (!params->req_line_speed[0])
+		vars->line_speed = SPEED_10000;
+	else
+		vars->line_speed = params->req_line_speed[0];
+	vars->duplex = DUPLEX_FULL;
+	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+	vars->mac_type = MAC_TYPE_XMAC;
+	vars->phy_flags = PHY_XGXS_FLAG;
+	/*
+	 * Set WC to loopback mode since link is required to provide clock
+	 * to the XMAC in 20G mode
+	 */
+	bnx2x_set_aer_mmd(params, &params->phy[0]);
+	bnx2x_warpcore_reset_lane(bp, &params->phy[0], 0);
+	params->phy[INT_PHY].config_loopback(
+			&params->phy[INT_PHY],
+			params);
+
+	bnx2x_xmac_enable(params, vars, 1);
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_umac_loopback(struct link_params *params,
+			      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	vars->link_up = 1;
+	vars->line_speed = SPEED_1000;
+	vars->duplex = DUPLEX_FULL;
+	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+	vars->mac_type = MAC_TYPE_UMAC;
+	vars->phy_flags = PHY_XGXS_FLAG;
+	bnx2x_umac_enable(params, vars, 1);
+
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+}
+
+void bnx2x_init_xgxs_loopback(struct link_params *params,
+			      struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+		vars->link_up = 1;
+		vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+		vars->duplex = DUPLEX_FULL;
+	if (params->req_line_speed[0] == SPEED_1000)
+			vars->line_speed = SPEED_1000;
+	else
+			vars->line_speed = SPEED_10000;
+
+	if (!USES_WARPCORE(bp))
+		bnx2x_xgxs_deassert(params);
+	bnx2x_link_initialize(params, vars);
+
+	if (params->req_line_speed[0] == SPEED_1000) {
+		if (USES_WARPCORE(bp))
+			bnx2x_umac_enable(params, vars, 0);
+		else {
+			bnx2x_emac_program(params, vars);
+			bnx2x_emac_enable(params, vars, 0);
+		}
+	} else {
+		if (USES_WARPCORE(bp))
+			bnx2x_xmac_enable(params, vars, 0);
+		else
+			bnx2x_bmac_enable(params, vars, 0);
+	}
+
+		if (params->loopback_mode == LOOPBACK_XGXS) {
+			/* set 10G XGXS loopback */
+			params->phy[INT_PHY].config_loopback(
+				&params->phy[INT_PHY],
+				params);
+
+		} else {
+			/* set external phy loopback */
+			u8 phy_index;
+			for (phy_index = EXT_PHY1;
+			      phy_index < params->num_phys; phy_index++) {
+				if (params->phy[phy_index].config_loopback)
+					params->phy[phy_index].config_loopback(
+						&params->phy[phy_index],
+						params);
+			}
+		}
+		REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0);
+
+	bnx2x_set_led(params, vars, LED_MODE_OPER, vars->line_speed);
+}
+
+int bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	DP(NETIF_MSG_LINK, "Phy Initialization started\n");
+	DP(NETIF_MSG_LINK, "(1) req_speed %d, req_flowctrl %d\n",
+		   params->req_line_speed[0], params->req_flow_ctrl[0]);
+	DP(NETIF_MSG_LINK, "(2) req_speed %d, req_flowctrl %d\n",
+		   params->req_line_speed[1], params->req_flow_ctrl[1]);
+	vars->link_status = 0;
+	vars->phy_link_up = 0;
+	vars->link_up = 0;
+	vars->line_speed = 0;
+	vars->duplex = DUPLEX_FULL;
+	vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
+	vars->mac_type = MAC_TYPE_NONE;
+	vars->phy_flags = 0;
+
+	/* disable attentions */
+	bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
+		       (NIG_MASK_XGXS0_LINK_STATUS |
+			NIG_MASK_XGXS0_LINK10G |
+			NIG_MASK_SERDES0_LINK_STATUS |
+			NIG_MASK_MI_INT));
+
+	bnx2x_emac_init(params, vars);
+
+	if (params->num_phys == 0) {
+		DP(NETIF_MSG_LINK, "No phy found for initialization !!\n");
+		return -EINVAL;
+	}
+	set_phy_vars(params, vars);
+
+	DP(NETIF_MSG_LINK, "Num of phys on board: %d\n", params->num_phys);
+	switch (params->loopback_mode) {
+	case LOOPBACK_BMAC:
+		bnx2x_init_bmac_loopback(params, vars);
+		break;
+	case LOOPBACK_EMAC:
+		bnx2x_init_emac_loopback(params, vars);
+		break;
+	case LOOPBACK_XMAC:
+		bnx2x_init_xmac_loopback(params, vars);
+		break;
+	case LOOPBACK_UMAC:
+		bnx2x_init_umac_loopback(params, vars);
+		break;
+	case LOOPBACK_XGXS:
+	case LOOPBACK_EXT_PHY:
+		bnx2x_init_xgxs_loopback(params, vars);
+		break;
+	default:
+		if (!CHIP_IS_E3(bp)) {
+			if (params->switch_cfg == SWITCH_CFG_10G)
+				bnx2x_xgxs_deassert(params);
+			else
+				bnx2x_serdes_deassert(bp, params->port);
+		}
+		bnx2x_link_initialize(params, vars);
+		msleep(30);
+		bnx2x_link_int_enable(params);
+		break;
+	}
+	return 0;
+}
+
+int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
+		     u8 reset_ext_phy)
+{
+	struct bnx2x *bp = params->bp;
+	u8 phy_index, port = params->port, clear_latch_ind = 0;
+	DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
+	/* disable attentions */
+	vars->link_status = 0;
+	bnx2x_update_mng(params, vars->link_status);
+	bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+		       (NIG_MASK_XGXS0_LINK_STATUS |
+			NIG_MASK_XGXS0_LINK10G |
+			NIG_MASK_SERDES0_LINK_STATUS |
+			NIG_MASK_MI_INT));
+
+	/* activate nig drain */
+	REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
+
+	/* disable nig egress interface */
+	if (!CHIP_IS_E3(bp)) {
+		REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0);
+		REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
+	}
+
+	/* Stop BigMac rx */
+	if (!CHIP_IS_E3(bp))
+		bnx2x_bmac_rx_disable(bp, port);
+	else
+		bnx2x_xmac_disable(params);
+	/* disable emac */
+	if (!CHIP_IS_E3(bp))
+		REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0);
+
+	msleep(10);
+	/* The PHY reset is controlled by GPIO 1
+	 * Hold it as vars low
+	 */
+	 /* clear link led */
+	bnx2x_set_led(params, vars, LED_MODE_OFF, 0);
+
+	if (reset_ext_phy) {
+		bnx2x_set_mdio_clk(bp, params->chip_id, port);
+		for (phy_index = EXT_PHY1; phy_index < params->num_phys;
+		      phy_index++) {
+			if (params->phy[phy_index].link_reset) {
+				bnx2x_set_aer_mmd(params,
+						  &params->phy[phy_index]);
+				params->phy[phy_index].link_reset(
+					&params->phy[phy_index],
+					params);
+			}
+			if (params->phy[phy_index].flags &
+			    FLAGS_REARM_LATCH_SIGNAL)
+				clear_latch_ind = 1;
+		}
+	}
+
+	if (clear_latch_ind) {
+		/* Clear latching indication */
+		bnx2x_rearm_latch_signal(bp, port, 0);
+		bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
+			       1 << NIG_LATCH_BC_ENABLE_MI_INT);
+	}
+	if (params->phy[INT_PHY].link_reset)
+		params->phy[INT_PHY].link_reset(
+			&params->phy[INT_PHY], params);
+	/* reset BigMac */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
+
+	/* disable nig ingress interface */
+	if (!CHIP_IS_E3(bp)) {
+		REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0);
+		REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0);
+	}
+	vars->link_up = 0;
+	vars->phy_flags = 0;
+	return 0;
+}
+
+/****************************************************************************/
+/*				Common function				    */
+/****************************************************************************/
+static int bnx2x_8073_common_init_phy(struct bnx2x *bp,
+				      u32 shmem_base_path[],
+				      u32 shmem2_base_path[], u8 phy_index,
+				      u32 chip_id)
+{
+	struct bnx2x_phy phy[PORT_MAX];
+	struct bnx2x_phy *phy_blk[PORT_MAX];
+	u16 val;
+	s8 port = 0;
+	s8 port_of_path = 0;
+	u32 swap_val, swap_override;
+	swap_val = REG_RD(bp,  NIG_REG_PORT_SWAP);
+	swap_override = REG_RD(bp,  NIG_REG_STRAP_OVERRIDE);
+	port ^= (swap_val && swap_override);
+	bnx2x_ext_phy_hw_reset(bp, port);
+	/* PART1 - Reset both phys */
+	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+		u32 shmem_base, shmem2_base;
+		/* In E2, same phy is using for port0 of the two paths */
+		if (CHIP_IS_E1x(bp)) {
+			shmem_base = shmem_base_path[0];
+			shmem2_base = shmem2_base_path[0];
+			port_of_path = port;
+		} else {
+			shmem_base = shmem_base_path[port];
+			shmem2_base = shmem2_base_path[port];
+			port_of_path = 0;
+		}
+
+		/* Extract the ext phy address for the port */
+		if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+				       port_of_path, &phy[port]) !=
+		    0) {
+			DP(NETIF_MSG_LINK, "populate_phy failed\n");
+			return -EINVAL;
+		}
+		/* disable attentions */
+		bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+			       port_of_path*4,
+			       (NIG_MASK_XGXS0_LINK_STATUS |
+				NIG_MASK_XGXS0_LINK10G |
+				NIG_MASK_SERDES0_LINK_STATUS |
+				NIG_MASK_MI_INT));
+
+		/* Need to take the phy out of low power mode in order
+			to write to access its registers */
+		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+			       MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+			       port);
+
+		/* Reset the phy */
+		bnx2x_cl45_write(bp, &phy[port],
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_CTRL,
+				 1<<15);
+	}
+
+	/* Add delay of 150ms after reset */
+	msleep(150);
+
+	if (phy[PORT_0].addr & 0x1) {
+		phy_blk[PORT_0] = &(phy[PORT_1]);
+		phy_blk[PORT_1] = &(phy[PORT_0]);
+	} else {
+		phy_blk[PORT_0] = &(phy[PORT_0]);
+		phy_blk[PORT_1] = &(phy[PORT_1]);
+	}
+
+	/* PART2 - Download firmware to both phys */
+	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+		if (CHIP_IS_E1x(bp))
+			port_of_path = port;
+		else
+			port_of_path = 0;
+
+		DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
+			   phy_blk[port]->addr);
+		if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+						      port_of_path))
+			return -EINVAL;
+
+		/* Only set bit 10 = 1 (Tx power down) */
+		bnx2x_cl45_read(bp, phy_blk[port],
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_TX_POWER_DOWN, &val);
+
+		/* Phase1 of TX_POWER_DOWN reset */
+		bnx2x_cl45_write(bp, phy_blk[port],
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_TX_POWER_DOWN,
+				 (val | 1<<10));
+	}
+
+	/*
+	 * Toggle Transmitter: Power down and then up with 600ms delay
+	 * between
+	 */
+	msleep(600);
+
+	/* PART3 - complete TX_POWER_DOWN process, and set GPIO2 back to low */
+	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+		/* Phase2 of POWER_DOWN_RESET */
+		/* Release bit 10 (Release Tx power down) */
+		bnx2x_cl45_read(bp, phy_blk[port],
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_TX_POWER_DOWN, &val);
+
+		bnx2x_cl45_write(bp, phy_blk[port],
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10))));
+		msleep(15);
+
+		/* Read modify write the SPI-ROM version select register */
+		bnx2x_cl45_read(bp, phy_blk[port],
+				MDIO_PMA_DEVAD,
+				MDIO_PMA_REG_EDC_FFE_MAIN, &val);
+		bnx2x_cl45_write(bp, phy_blk[port],
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_EDC_FFE_MAIN, (val | (1<<12)));
+
+		/* set GPIO2 back to LOW */
+		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
+			       MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
+	}
+	return 0;
+}
+static int bnx2x_8726_common_init_phy(struct bnx2x *bp,
+				      u32 shmem_base_path[],
+				      u32 shmem2_base_path[], u8 phy_index,
+				      u32 chip_id)
+{
+	u32 val;
+	s8 port;
+	struct bnx2x_phy phy;
+	/* Use port1 because of the static port-swap */
+	/* Enable the module detection interrupt */
+	val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
+	val |= ((1<<MISC_REGISTERS_GPIO_3)|
+		(1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
+	REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
+
+	bnx2x_ext_phy_hw_reset(bp, 0);
+	msleep(5);
+	for (port = 0; port < PORT_MAX; port++) {
+		u32 shmem_base, shmem2_base;
+
+		/* In E2, same phy is using for port0 of the two paths */
+		if (CHIP_IS_E1x(bp)) {
+			shmem_base = shmem_base_path[0];
+			shmem2_base = shmem2_base_path[0];
+		} else {
+			shmem_base = shmem_base_path[port];
+			shmem2_base = shmem2_base_path[port];
+		}
+		/* Extract the ext phy address for the port */
+		if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+				       port, &phy) !=
+		    0) {
+			DP(NETIF_MSG_LINK, "populate phy failed\n");
+			return -EINVAL;
+		}
+
+		/* Reset phy*/
+		bnx2x_cl45_write(bp, &phy,
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, 0x0001);
+
+
+		/* Set fault module detected LED on */
+		bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
+			       MISC_REGISTERS_GPIO_HIGH,
+			       port);
+	}
+
+	return 0;
+}
+static void bnx2x_get_ext_phy_reset_gpio(struct bnx2x *bp, u32 shmem_base,
+					 u8 *io_gpio, u8 *io_port)
+{
+
+	u32 phy_gpio_reset = REG_RD(bp, shmem_base +
+					  offsetof(struct shmem_region,
+				dev_info.port_hw_config[PORT_0].default_cfg));
+	switch (phy_gpio_reset) {
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P0:
+		*io_gpio = 0;
+		*io_port = 0;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P0:
+		*io_gpio = 1;
+		*io_port = 0;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P0:
+		*io_gpio = 2;
+		*io_port = 0;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P0:
+		*io_gpio = 3;
+		*io_port = 0;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO0_P1:
+		*io_gpio = 0;
+		*io_port = 1;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO1_P1:
+		*io_gpio = 1;
+		*io_port = 1;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO2_P1:
+		*io_gpio = 2;
+		*io_port = 1;
+		break;
+	case PORT_HW_CFG_EXT_PHY_GPIO_RST_GPIO3_P1:
+		*io_gpio = 3;
+		*io_port = 1;
+		break;
+	default:
+		/* Don't override the io_gpio and io_port */
+		break;
+	}
+}
+
+static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
+				      u32 shmem_base_path[],
+				      u32 shmem2_base_path[], u8 phy_index,
+				      u32 chip_id)
+{
+	s8 port, reset_gpio;
+	u32 swap_val, swap_override;
+	struct bnx2x_phy phy[PORT_MAX];
+	struct bnx2x_phy *phy_blk[PORT_MAX];
+	s8 port_of_path;
+	swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+	swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+
+	reset_gpio = MISC_REGISTERS_GPIO_1;
+	port = 1;
+
+	/*
+	 * Retrieve the reset gpio/port which control the reset.
+	 * Default is GPIO1, PORT1
+	 */
+	bnx2x_get_ext_phy_reset_gpio(bp, shmem_base_path[0],
+				     (u8 *)&reset_gpio, (u8 *)&port);
+
+	/* Calculate the port based on port swap */
+	port ^= (swap_val && swap_override);
+
+	/* Initiate PHY reset*/
+	bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW,
+		       port);
+	msleep(1);
+	bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH,
+		       port);
+
+	msleep(5);
+
+	/* PART1 - Reset both phys */
+	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+		u32 shmem_base, shmem2_base;
+
+		/* In E2, same phy is using for port0 of the two paths */
+		if (CHIP_IS_E1x(bp)) {
+			shmem_base = shmem_base_path[0];
+			shmem2_base = shmem2_base_path[0];
+			port_of_path = port;
+		} else {
+			shmem_base = shmem_base_path[port];
+			shmem2_base = shmem2_base_path[port];
+			port_of_path = 0;
+		}
+
+		/* Extract the ext phy address for the port */
+		if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+				       port_of_path, &phy[port]) !=
+				       0) {
+			DP(NETIF_MSG_LINK, "populate phy failed\n");
+			return -EINVAL;
+		}
+		/* disable attentions */
+		bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+			       port_of_path*4,
+			       (NIG_MASK_XGXS0_LINK_STATUS |
+				NIG_MASK_XGXS0_LINK10G |
+				NIG_MASK_SERDES0_LINK_STATUS |
+				NIG_MASK_MI_INT));
+
+
+		/* Reset the phy */
+		bnx2x_cl45_write(bp, &phy[port],
+				 MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
+	}
+
+	/* Add delay of 150ms after reset */
+	msleep(150);
+	if (phy[PORT_0].addr & 0x1) {
+		phy_blk[PORT_0] = &(phy[PORT_1]);
+		phy_blk[PORT_1] = &(phy[PORT_0]);
+	} else {
+		phy_blk[PORT_0] = &(phy[PORT_0]);
+		phy_blk[PORT_1] = &(phy[PORT_1]);
+	}
+	/* PART2 - Download firmware to both phys */
+	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
+		if (CHIP_IS_E1x(bp))
+			port_of_path = port;
+		else
+			port_of_path = 0;
+		DP(NETIF_MSG_LINK, "Loading spirom for phy address 0x%x\n",
+			   phy_blk[port]->addr);
+		if (bnx2x_8073_8727_external_rom_boot(bp, phy_blk[port],
+						      port_of_path))
+			return -EINVAL;
+		/* Disable PHY transmitter output */
+		bnx2x_cl45_write(bp, phy_blk[port],
+				 MDIO_PMA_DEVAD,
+				 MDIO_PMA_REG_TX_DISABLE, 1);
+
+	}
+	return 0;
+}
+
+static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
+				     u32 shmem2_base_path[], u8 phy_index,
+				     u32 ext_phy_type, u32 chip_id)
+{
+	int rc = 0;
+
+	switch (ext_phy_type) {
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
+		rc = bnx2x_8073_common_init_phy(bp, shmem_base_path,
+						shmem2_base_path,
+						phy_index, chip_id);
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8722:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC:
+		rc = bnx2x_8727_common_init_phy(bp, shmem_base_path,
+						shmem2_base_path,
+						phy_index, chip_id);
+		break;
+
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
+		/*
+		 * GPIO1 affects both ports, so there's need to pull
+		 * it for single port alone
+		 */
+		rc = bnx2x_8726_common_init_phy(bp, shmem_base_path,
+						shmem2_base_path,
+						phy_index, chip_id);
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+		/*
+		 * GPIO3's are linked, and so both need to be toggled
+		 * to obtain required 2us pulse.
+		 */
+		rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id);
+		break;
+	case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
+		rc = -EINVAL;
+		break;
+	default:
+		DP(NETIF_MSG_LINK,
+			   "ext_phy 0x%x common init not required\n",
+			   ext_phy_type);
+		break;
+	}
+
+	if (rc != 0)
+		netdev_err(bp->dev,  "Warning: PHY was not initialized,"
+				      " Port %d\n",
+			 0);
+	return rc;
+}
+
+int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
+			  u32 shmem2_base_path[], u32 chip_id)
+{
+	int rc = 0;
+	u32 phy_ver, val;
+	u8 phy_index = 0;
+	u32 ext_phy_type, ext_phy_config;
+	bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
+	bnx2x_set_mdio_clk(bp, chip_id, PORT_1);
+	DP(NETIF_MSG_LINK, "Begin common phy init\n");
+	if (CHIP_IS_E3(bp)) {
+		/* Enable EPIO */
+		val = REG_RD(bp, MISC_REG_GEN_PURP_HWG);
+		REG_WR(bp, MISC_REG_GEN_PURP_HWG, val | 1);
+	}
+	/* Check if common init was already done */
+	phy_ver = REG_RD(bp, shmem_base_path[0] +
+			 offsetof(struct shmem_region,
+				  port_mb[PORT_0].ext_phy_fw_version));
+	if (phy_ver) {
+		DP(NETIF_MSG_LINK, "Not doing common init; phy ver is 0x%x\n",
+			       phy_ver);
+		return 0;
+	}
+
+	/* Read the ext_phy_type for arbitrary port(0) */
+	for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+	      phy_index++) {
+		ext_phy_config = bnx2x_get_ext_phy_config(bp,
+							  shmem_base_path[0],
+							  phy_index, 0);
+		ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+		rc |= bnx2x_ext_phy_common_init(bp, shmem_base_path,
+						shmem2_base_path,
+						phy_index, ext_phy_type,
+						chip_id);
+	}
+	return rc;
+}
+
+static void bnx2x_check_over_curr(struct link_params *params,
+				  struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u32 cfg_pin;
+	u8 port = params->port;
+	u32 pin_val;
+
+	cfg_pin = (REG_RD(bp, params->shmem_base +
+			  offsetof(struct shmem_region,
+			       dev_info.port_hw_config[port].e3_cmn_pin_cfg1)) &
+		   PORT_HW_CFG_E3_OVER_CURRENT_MASK) >>
+		PORT_HW_CFG_E3_OVER_CURRENT_SHIFT;
+
+	/* Ignore check if no external input PIN available */
+	if (bnx2x_get_cfg_pin(bp, cfg_pin, &pin_val) != 0)
+		return;
+
+	if (!pin_val) {
+		if ((vars->phy_flags & PHY_OVER_CURRENT_FLAG) == 0) {
+			netdev_err(bp->dev, "Error:  Power fault on Port %d has"
+					    " been detected and the power to "
+					    "that SFP+ module has been removed"
+					    " to prevent failure of the card."
+					    " Please remove the SFP+ module and"
+					    " restart the system to clear this"
+					    " error.\n",
+			 params->port);
+			vars->phy_flags |= PHY_OVER_CURRENT_FLAG;
+		}
+	} else
+		vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG;
+}
+
+static void bnx2x_analyze_link_error(struct link_params *params,
+				     struct link_vars *vars, u32 lss_status)
+{
+	struct bnx2x *bp = params->bp;
+	/* Compare new value with previous value */
+	u8 led_mode;
+	u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0;
+
+	if ((lss_status ^ half_open_conn) == 0)
+		return;
+
+	/* If values differ */
+	DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up,
+		       half_open_conn, lss_status);
+
+	/*
+	 * a. Update shmem->link_status accordingly
+	 * b. Update link_vars->link_up
+	 */
+	if (lss_status) {
+		DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n");
+		vars->link_status &= ~LINK_STATUS_LINK_UP;
+		vars->link_up = 0;
+		vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
+		/*
+		 * Set LED mode to off since the PHY doesn't know about these
+		 * errors
+		 */
+		led_mode = LED_MODE_OFF;
+	} else {
+		DP(NETIF_MSG_LINK, "Remote Fault cleared\n");
+		vars->link_status |= LINK_STATUS_LINK_UP;
+		vars->link_up = 1;
+		vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG;
+		led_mode = LED_MODE_OPER;
+	}
+	/* Update the LED according to the link state */
+	bnx2x_set_led(params, vars, led_mode, SPEED_10000);
+
+	/* Update link status in the shared memory */
+	bnx2x_update_mng(params, vars->link_status);
+
+	/* C. Trigger General Attention */
+	vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT;
+	bnx2x_notify_link_changed(bp);
+}
+
+/******************************************************************************
+* Description:
+*	This function checks for half opened connection change indication.
+*	When such change occurs, it calls the bnx2x_analyze_link_error
+*	to check if Remote Fault is set or cleared. Reception of remote fault
+*	status message in the MAC indicates that the peer's MAC has detected
+*	a fault, for example, due to break in the TX side of fiber.
+*
+******************************************************************************/
+static void bnx2x_check_half_open_conn(struct link_params *params,
+				       struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u32 lss_status = 0;
+	u32 mac_base;
+	/* In case link status is physically up @ 10G do */
+	if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
+		return;
+
+	if (CHIP_IS_E3(bp) &&
+	    (REG_RD(bp, MISC_REG_RESET_REG_2) &
+	      (MISC_REGISTERS_RESET_REG_2_XMAC))) {
+		/* Check E3 XMAC */
+		/*
+		 * Note that link speed cannot be queried here, since it may be
+		 * zero while link is down. In case UMAC is active, LSS will
+		 * simply not be set
+		 */
+		mac_base = (params->port) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
+
+		/* Clear stick bits (Requires rising edge) */
+		REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS, 0);
+		REG_WR(bp, mac_base + XMAC_REG_CLEAR_RX_LSS_STATUS,
+		       XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS |
+		       XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS);
+		if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS))
+			lss_status = 1;
+
+		bnx2x_analyze_link_error(params, vars, lss_status);
+	} else if (REG_RD(bp, MISC_REG_RESET_REG_2) &
+		   (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) {
+		/* Check E1X / E2 BMAC */
+		u32 lss_status_reg;
+		u32 wb_data[2];
+		mac_base = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
+			NIG_REG_INGRESS_BMAC0_MEM;
+		/*  Read BIGMAC_REGISTER_RX_LSS_STATUS */
+		if (CHIP_IS_E2(bp))
+			lss_status_reg = BIGMAC2_REGISTER_RX_LSS_STAT;
+		else
+			lss_status_reg = BIGMAC_REGISTER_RX_LSS_STATUS;
+
+		REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2);
+		lss_status = (wb_data[0] > 0);
+
+		bnx2x_analyze_link_error(params, vars, lss_status);
+	}
+}
+
+void bnx2x_period_func(struct link_params *params, struct link_vars *vars)
+{
+	struct bnx2x *bp = params->bp;
+	u16 phy_idx;
+	if (!params) {
+		DP(NETIF_MSG_LINK, "Uninitialized params !\n");
+		return;
+	}
+
+	for (phy_idx = INT_PHY; phy_idx < MAX_PHYS; phy_idx++) {
+		if (params->phy[phy_idx].flags & FLAGS_TX_ERROR_CHECK) {
+			bnx2x_set_aer_mmd(params, &params->phy[phy_idx]);
+			bnx2x_check_half_open_conn(params, vars);
+			break;
+		}
+	}
+
+	if (CHIP_IS_E3(bp))
+		bnx2x_check_over_curr(params, vars);
+}
+
+u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base, u32 shmem2_base)
+{
+	u8 phy_index;
+	struct bnx2x_phy phy;
+	for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+	      phy_index++) {
+		if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+				       0, &phy) != 0) {
+			DP(NETIF_MSG_LINK, "populate phy failed\n");
+			return 0;
+		}
+
+		if (phy.flags & FLAGS_HW_LOCK_REQUIRED)
+			return 1;
+	}
+	return 0;
+}
+
+u8 bnx2x_fan_failure_det_req(struct bnx2x *bp,
+			     u32 shmem_base,
+			     u32 shmem2_base,
+			     u8 port)
+{
+	u8 phy_index, fan_failure_det_req = 0;
+	struct bnx2x_phy phy;
+	for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+	      phy_index++) {
+		if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
+				       port, &phy)
+		    != 0) {
+			DP(NETIF_MSG_LINK, "populate phy failed\n");
+			return 0;
+		}
+		fan_failure_det_req |= (phy.flags &
+					FLAGS_FAN_FAILURE_DET_REQ);
+	}
+	return fan_failure_det_req;
+}
+
+void bnx2x_hw_reset_phy(struct link_params *params)
+{
+	u8 phy_index;
+	struct bnx2x *bp = params->bp;
+	bnx2x_update_mng(params, 0);
+	bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4,
+		       (NIG_MASK_XGXS0_LINK_STATUS |
+			NIG_MASK_XGXS0_LINK10G |
+			NIG_MASK_SERDES0_LINK_STATUS |
+			NIG_MASK_MI_INT));
+
+	for (phy_index = INT_PHY; phy_index < MAX_PHYS;
+	      phy_index++) {
+		if (params->phy[phy_index].hw_reset) {
+			params->phy[phy_index].hw_reset(
+				&params->phy[phy_index],
+				params);
+			params->phy[phy_index] = phy_null;
+		}
+	}
+}
+
+void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
+			    u32 chip_id, u32 shmem_base, u32 shmem2_base,
+			    u8 port)
+{
+	u8 gpio_num = 0xff, gpio_port = 0xff, phy_index;
+	u32 val;
+	u32 offset, aeu_mask, swap_val, swap_override, sync_offset;
+	if (CHIP_IS_E3(bp)) {
+		if (bnx2x_get_mod_abs_int_cfg(bp, chip_id,
+					      shmem_base,
+					      port,
+					      &gpio_num,
+					      &gpio_port) != 0)
+			return;
+	} else {
+		struct bnx2x_phy phy;
+		for (phy_index = EXT_PHY1; phy_index < MAX_PHYS;
+		      phy_index++) {
+			if (bnx2x_populate_phy(bp, phy_index, shmem_base,
+					       shmem2_base, port, &phy)
+			    != 0) {
+				DP(NETIF_MSG_LINK, "populate phy failed\n");
+				return;
+			}
+			if (phy.type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) {
+				gpio_num = MISC_REGISTERS_GPIO_3;
+				gpio_port = port;
+				break;
+			}
+		}
+	}
+
+	if (gpio_num == 0xff)
+		return;
+
+	/* Set GPIO3 to trigger SFP+ module insertion/removal */
+	bnx2x_set_gpio(bp, gpio_num, MISC_REGISTERS_GPIO_INPUT_HI_Z, gpio_port);
+
+	swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+	swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+	gpio_port ^= (swap_val && swap_override);
+
+	vars->aeu_int_mask = AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0 <<
+		(gpio_num + (gpio_port << 2));
+
+	sync_offset = shmem_base +
+		offsetof(struct shmem_region,
+			 dev_info.port_hw_config[port].aeu_int_mask);
+	REG_WR(bp, sync_offset, vars->aeu_int_mask);
+
+	DP(NETIF_MSG_LINK, "Setting MOD_ABS (GPIO%d_P%d) AEU to 0x%x\n",
+		       gpio_num, gpio_port, vars->aeu_int_mask);
+
+	if (port == 0)
+		offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
+	else
+		offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
+
+	/* Open appropriate AEU for interrupts */
+	aeu_mask = REG_RD(bp, offset);
+	aeu_mask |= vars->aeu_int_mask;
+	REG_WR(bp, offset, aeu_mask);
+
+	/* Enable the GPIO to trigger interrupt */
+	val = REG_RD(bp, MISC_REG_GPIO_EVENT_EN);
+	val |= 1 << (gpio_num + (gpio_port << 2));
+	REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
new file mode 100644
index 0000000..c12db6d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -0,0 +1,493 @@
+/* Copyright 2008-2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Written by Yaniv Rosner
+ *
+ */
+
+#ifndef BNX2X_LINK_H
+#define BNX2X_LINK_H
+
+
+
+/***********************************************************/
+/*                         Defines                         */
+/***********************************************************/
+#define DEFAULT_PHY_DEV_ADDR	3
+#define E2_DEFAULT_PHY_DEV_ADDR	5
+
+
+
+#define BNX2X_FLOW_CTRL_AUTO		PORT_FEATURE_FLOW_CONTROL_AUTO
+#define BNX2X_FLOW_CTRL_TX		PORT_FEATURE_FLOW_CONTROL_TX
+#define BNX2X_FLOW_CTRL_RX		PORT_FEATURE_FLOW_CONTROL_RX
+#define BNX2X_FLOW_CTRL_BOTH		PORT_FEATURE_FLOW_CONTROL_BOTH
+#define BNX2X_FLOW_CTRL_NONE		PORT_FEATURE_FLOW_CONTROL_NONE
+
+#define NET_SERDES_IF_XFI		1
+#define NET_SERDES_IF_SFI		2
+#define NET_SERDES_IF_KR		3
+#define NET_SERDES_IF_DXGXS	4
+
+#define SPEED_AUTO_NEG		0
+#define SPEED_20000		20000
+
+#define SFP_EEPROM_VENDOR_NAME_ADDR		0x14
+#define SFP_EEPROM_VENDOR_NAME_SIZE		16
+#define SFP_EEPROM_VENDOR_OUI_ADDR		0x25
+#define SFP_EEPROM_VENDOR_OUI_SIZE		3
+#define SFP_EEPROM_PART_NO_ADDR			0x28
+#define SFP_EEPROM_PART_NO_SIZE			16
+#define SFP_EEPROM_REVISION_ADDR		0x38
+#define SFP_EEPROM_REVISION_SIZE		4
+#define SFP_EEPROM_SERIAL_ADDR			0x44
+#define SFP_EEPROM_SERIAL_SIZE			16
+#define SFP_EEPROM_DATE_ADDR			0x54 /* ASCII YYMMDD */
+#define SFP_EEPROM_DATE_SIZE			6
+#define PWR_FLT_ERR_MSG_LEN			250
+
+#define XGXS_EXT_PHY_TYPE(ext_phy_config) \
+		((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK)
+#define XGXS_EXT_PHY_ADDR(ext_phy_config) \
+		(((ext_phy_config) & PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >> \
+		 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT)
+#define SERDES_EXT_PHY_TYPE(ext_phy_config) \
+		((ext_phy_config) & PORT_HW_CFG_SERDES_EXT_PHY_TYPE_MASK)
+
+/* Single Media Direct board is the plain 577xx board with CX4/RJ45 jacks */
+#define SINGLE_MEDIA_DIRECT(params)	(params->num_phys == 1)
+/* Single Media board contains single external phy */
+#define SINGLE_MEDIA(params)		(params->num_phys == 2)
+/* Dual Media board contains two external phy with different media */
+#define DUAL_MEDIA(params)		(params->num_phys == 3)
+
+#define FW_PARAM_PHY_ADDR_MASK		0x000000FF
+#define FW_PARAM_PHY_TYPE_MASK		0x0000FF00
+#define FW_PARAM_MDIO_CTRL_MASK		0xFFFF0000
+#define FW_PARAM_MDIO_CTRL_OFFSET		16
+#define FW_PARAM_PHY_ADDR(fw_param) (fw_param & \
+					   FW_PARAM_PHY_ADDR_MASK)
+#define FW_PARAM_PHY_TYPE(fw_param) (fw_param & \
+					   FW_PARAM_PHY_TYPE_MASK)
+#define FW_PARAM_MDIO_CTRL(fw_param) ((fw_param & \
+					    FW_PARAM_MDIO_CTRL_MASK) >> \
+					    FW_PARAM_MDIO_CTRL_OFFSET)
+#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
+	(phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
+
+
+#define PFC_BRB_FULL_LB_XOFF_THRESHOLD				170
+#define PFC_BRB_FULL_LB_XON_THRESHOLD				250
+
+#define MAXVAL(a, b) (((a) > (b)) ? (a) : (b))
+/***********************************************************/
+/*                         Structs                         */
+/***********************************************************/
+#define INT_PHY		0
+#define EXT_PHY1	1
+#define EXT_PHY2	2
+#define MAX_PHYS	3
+
+/* Same configuration is shared between the XGXS and the first external phy */
+#define LINK_CONFIG_SIZE (MAX_PHYS - 1)
+#define LINK_CONFIG_IDX(_phy_idx) ((_phy_idx == INT_PHY) ? \
+					 0 : (_phy_idx - 1))
+/***********************************************************/
+/*                      bnx2x_phy struct                     */
+/*  Defines the required arguments and function per phy    */
+/***********************************************************/
+struct link_vars;
+struct link_params;
+struct bnx2x_phy;
+
+typedef u8 (*config_init_t)(struct bnx2x_phy *phy, struct link_params *params,
+			    struct link_vars *vars);
+typedef u8 (*read_status_t)(struct bnx2x_phy *phy, struct link_params *params,
+			    struct link_vars *vars);
+typedef void (*link_reset_t)(struct bnx2x_phy *phy,
+			     struct link_params *params);
+typedef void (*config_loopback_t)(struct bnx2x_phy *phy,
+				  struct link_params *params);
+typedef u8 (*format_fw_ver_t)(u32 raw, u8 *str, u16 *len);
+typedef void (*hw_reset_t)(struct bnx2x_phy *phy, struct link_params *params);
+typedef void (*set_link_led_t)(struct bnx2x_phy *phy,
+			       struct link_params *params, u8 mode);
+typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy,
+				    struct link_params *params, u32 action);
+
+struct bnx2x_phy {
+	u32 type;
+
+	/* Loaded during init */
+	u8 addr;
+	u8 def_md_devad;
+	u16 flags;
+	/* Require HW lock */
+#define FLAGS_HW_LOCK_REQUIRED		(1<<0)
+	/* No Over-Current detection */
+#define FLAGS_NOC			(1<<1)
+	/* Fan failure detection required */
+#define FLAGS_FAN_FAILURE_DET_REQ	(1<<2)
+	/* Initialize first the XGXS and only then the phy itself */
+#define FLAGS_INIT_XGXS_FIRST		(1<<3)
+#define FLAGS_WC_DUAL_MODE		(1<<4)
+#define FLAGS_4_PORT_MODE		(1<<5)
+#define FLAGS_REARM_LATCH_SIGNAL	(1<<6)
+#define FLAGS_SFP_NOT_APPROVED		(1<<7)
+#define FLAGS_MDC_MDIO_WA		(1<<8)
+#define FLAGS_DUMMY_READ		(1<<9)
+#define FLAGS_MDC_MDIO_WA_B0		(1<<10)
+#define FLAGS_TX_ERROR_CHECK		(1<<12)
+
+	/* preemphasis values for the rx side */
+	u16 rx_preemphasis[4];
+
+	/* preemphasis values for the tx side */
+	u16 tx_preemphasis[4];
+
+	/* EMAC address for access MDIO */
+	u32 mdio_ctrl;
+
+	u32 supported;
+
+	u32 media_type;
+#define	ETH_PHY_UNSPECIFIED 0x0
+#define	ETH_PHY_SFP_FIBER   0x1
+#define	ETH_PHY_XFP_FIBER   0x2
+#define	ETH_PHY_DA_TWINAX   0x3
+#define	ETH_PHY_BASE_T      0x4
+#define	ETH_PHY_KR          0xf0
+#define	ETH_PHY_CX4         0xf1
+#define	ETH_PHY_NOT_PRESENT 0xff
+
+	/* The address in which version is located*/
+	u32 ver_addr;
+
+	u16 req_flow_ctrl;
+
+	u16 req_line_speed;
+
+	u32 speed_cap_mask;
+
+	u16 req_duplex;
+	u16 rsrv;
+	/* Called per phy/port init, and it configures LASI, speed, autoneg,
+	 duplex, flow control negotiation, etc. */
+	config_init_t config_init;
+
+	/* Called due to interrupt. It determines the link, speed */
+	read_status_t read_status;
+
+	/* Called when driver is unloading. Should reset the phy */
+	link_reset_t link_reset;
+
+	/* Set the loopback configuration for the phy */
+	config_loopback_t config_loopback;
+
+	/* Format the given raw number into str up to len */
+	format_fw_ver_t format_fw_ver;
+
+	/* Reset the phy (both ports) */
+	hw_reset_t hw_reset;
+
+	/* Set link led mode (on/off/oper)*/
+	set_link_led_t set_link_led;
+
+	/* PHY Specific tasks */
+	phy_specific_func_t phy_specific_func;
+#define DISABLE_TX	1
+#define ENABLE_TX	2
+};
+
+/* Inputs parameters to the CLC */
+struct link_params {
+
+	u8 port;
+
+	/* Default / User Configuration */
+	u8 loopback_mode;
+#define LOOPBACK_NONE		0
+#define LOOPBACK_EMAC		1
+#define LOOPBACK_BMAC		2
+#define LOOPBACK_XGXS		3
+#define LOOPBACK_EXT_PHY	4
+#define LOOPBACK_EXT		5
+#define LOOPBACK_UMAC		6
+#define LOOPBACK_XMAC		7
+
+	/* Device parameters */
+	u8 mac_addr[6];
+
+	u16 req_duplex[LINK_CONFIG_SIZE];
+	u16 req_flow_ctrl[LINK_CONFIG_SIZE];
+
+	u16 req_line_speed[LINK_CONFIG_SIZE]; /* Also determine AutoNeg */
+
+	/* shmem parameters */
+	u32 shmem_base;
+	u32 shmem2_base;
+	u32 speed_cap_mask[LINK_CONFIG_SIZE];
+	u32 switch_cfg;
+#define SWITCH_CFG_1G		PORT_FEATURE_CON_SWITCH_1G_SWITCH
+#define SWITCH_CFG_10G		PORT_FEATURE_CON_SWITCH_10G_SWITCH
+#define SWITCH_CFG_AUTO_DETECT	PORT_FEATURE_CON_SWITCH_AUTO_DETECT
+
+	u32 lane_config;
+
+	/* Phy register parameter */
+	u32 chip_id;
+
+	/* features */
+	u32 feature_config_flags;
+#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED	(1<<0)
+#define FEATURE_CONFIG_PFC_ENABLED			(1<<1)
+#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY		(1<<2)
+#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY	(1<<3)
+#define FEATURE_CONFIG_AUTOGREEEN_ENABLED			(1<<9)
+#define FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED		(1<<10)
+	/* Will be populated during common init */
+	struct bnx2x_phy phy[MAX_PHYS];
+
+	/* Will be populated during common init */
+	u8 num_phys;
+
+	u8 rsrv;
+	u16 hw_led_mode; /* part of the hw_config read from the shmem */
+	u32 multi_phy_config;
+
+	/* Device pointer passed to all callback functions */
+	struct bnx2x *bp;
+	u16 req_fc_auto_adv; /* Should be set to TX / BOTH when
+				req_flow_ctrl is set to AUTO */
+};
+
+/* Output parameters */
+struct link_vars {
+	u8 phy_flags;
+#define PHY_XGXS_FLAG			(1<<0)
+#define PHY_SGMII_FLAG			(1<<1)
+#define PHY_PHYSICAL_LINK_FLAG		(1<<2)
+#define PHY_HALF_OPEN_CONN_FLAG		(1<<3)
+#define PHY_OVER_CURRENT_FLAG		(1<<4)
+
+	u8 mac_type;
+#define MAC_TYPE_NONE		0
+#define MAC_TYPE_EMAC		1
+#define MAC_TYPE_BMAC		2
+#define MAC_TYPE_UMAC		3
+#define MAC_TYPE_XMAC		4
+
+	u8 phy_link_up; /* internal phy link indication */
+	u8 link_up;
+
+	u16 line_speed;
+	u16 duplex;
+
+	u16 flow_ctrl;
+	u16 ieee_fc;
+
+	/* The same definitions as the shmem parameter */
+	u32 link_status;
+	u8 fault_detected;
+	u8 rsrv1;
+	u16 periodic_flags;
+#define PERIODIC_FLAGS_LINK_EVENT	0x0001
+
+	u32 aeu_int_mask;
+};
+
+/***********************************************************/
+/*                         Functions                       */
+/***********************************************************/
+int bnx2x_phy_init(struct link_params *params, struct link_vars *vars);
+
+/* Reset the link. Should be called when driver or interface goes down
+   Before calling phy firmware upgrade, the reset_ext_phy should be set
+   to 0 */
+int bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
+		     u8 reset_ext_phy);
+
+/* bnx2x_link_update should be called upon link interrupt */
+int bnx2x_link_update(struct link_params *params, struct link_vars *vars);
+
+/* use the following phy functions to read/write from external_phy
+  In order to use it to read/write internal phy registers, use
+  DEFAULT_PHY_DEV_ADDR as devad, and (_bank + (_addr & 0xf)) as
+  the register */
+int bnx2x_phy_read(struct link_params *params, u8 phy_addr,
+		   u8 devad, u16 reg, u16 *ret_val);
+
+int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
+		    u8 devad, u16 reg, u16 val);
+
+/* Reads the link_status from the shmem,
+   and update the link vars accordingly */
+void bnx2x_link_status_update(struct link_params *input,
+			    struct link_vars *output);
+/* returns string representing the fw_version of the external phy */
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
+				 u8 *version, u16 len);
+
+/* Set/Unset the led
+   Basically, the CLC takes care of the led for the link, but in case one needs
+   to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
+   blink the led, and LED_MODE_OFF to set the led off.*/
+int bnx2x_set_led(struct link_params *params,
+		  struct link_vars *vars, u8 mode, u32 speed);
+#define LED_MODE_OFF			0
+#define LED_MODE_ON			1
+#define LED_MODE_OPER			2
+#define LED_MODE_FRONT_PANEL_OFF	3
+
+/* bnx2x_handle_module_detect_int should be called upon module detection
+   interrupt */
+void bnx2x_handle_module_detect_int(struct link_params *params);
+
+/* Get the actual link status. In case it returns 0, link is up,
+	otherwise link is down*/
+int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
+		    u8 is_serdes);
+
+/* One-time initialization for external phy after power up */
+int bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base_path[],
+			  u32 shmem2_base_path[], u32 chip_id);
+
+/* Reset the external PHY using GPIO */
+void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port);
+
+/* Reset the external of SFX7101 */
+void bnx2x_sfx7101_sp_sw_reset(struct bnx2x *bp, struct bnx2x_phy *phy);
+
+/* Read "byte_cnt" bytes from address "addr" from the SFP+ EEPROM */
+int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy,
+				 struct link_params *params, u16 addr,
+				 u8 byte_cnt, u8 *o_buf);
+
+void bnx2x_hw_reset_phy(struct link_params *params);
+
+/* Checks if HW lock is required for this phy/board type */
+u8 bnx2x_hw_lock_required(struct bnx2x *bp, u32 shmem_base,
+			  u32 shmem2_base);
+
+/* Check swap bit and adjust PHY order */
+u32 bnx2x_phy_selection(struct link_params *params);
+
+/* Probe the phys on board, and populate them in "params" */
+int bnx2x_phy_probe(struct link_params *params);
+
+/* Checks if fan failure detection is required on one of the phys on board */
+u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
+			     u32 shmem2_base, u8 port);
+
+
+
+/* DCBX structs */
+
+/* Number of maximum COS per chip */
+#define DCBX_E2E3_MAX_NUM_COS		(2)
+#define DCBX_E3B0_MAX_NUM_COS_PORT0	(6)
+#define DCBX_E3B0_MAX_NUM_COS_PORT1	(3)
+#define DCBX_E3B0_MAX_NUM_COS		( \
+			MAXVAL(DCBX_E3B0_MAX_NUM_COS_PORT0, \
+			    DCBX_E3B0_MAX_NUM_COS_PORT1))
+
+#define DCBX_MAX_NUM_COS			( \
+			MAXVAL(DCBX_E3B0_MAX_NUM_COS, \
+			    DCBX_E2E3_MAX_NUM_COS))
+
+/* PFC port configuration params */
+struct bnx2x_nig_brb_pfc_port_params {
+	/* NIG */
+	u32 pause_enable;
+	u32 llfc_out_en;
+	u32 llfc_enable;
+	u32 pkt_priority_to_cos;
+	u8 num_of_rx_cos_priority_mask;
+	u32 rx_cos_priority_mask[DCBX_MAX_NUM_COS];
+	u32 llfc_high_priority_classes;
+	u32 llfc_low_priority_classes;
+	/* BRB */
+	u32 cos0_pauseable;
+	u32 cos1_pauseable;
+};
+
+
+/* ETS port configuration params */
+struct bnx2x_ets_bw_params {
+	u8 bw;
+};
+
+struct bnx2x_ets_sp_params {
+	/**
+	 * valid values are 0 - 5. 0 is highest strict priority.
+	 * There can't be two COS's with the same pri.
+	 */
+	u8 pri;
+};
+
+enum bnx2x_cos_state {
+	bnx2x_cos_state_strict = 0,
+	bnx2x_cos_state_bw = 1,
+};
+
+struct bnx2x_ets_cos_params {
+	enum bnx2x_cos_state state ;
+	union {
+		struct bnx2x_ets_bw_params bw_params;
+		struct bnx2x_ets_sp_params sp_params;
+	} params;
+};
+
+struct bnx2x_ets_params {
+	u8 num_of_cos; /* Number of valid COS entries*/
+	struct bnx2x_ets_cos_params cos[DCBX_MAX_NUM_COS];
+};
+
+/**
+ * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
+ * when link is already up
+ */
+int bnx2x_update_pfc(struct link_params *params,
+		      struct link_vars *vars,
+		      struct bnx2x_nig_brb_pfc_port_params *pfc_params);
+
+
+/* Used to configure the ETS to disable */
+int bnx2x_ets_disabled(struct link_params *params,
+		       struct link_vars *vars);
+
+/* Used to configure the ETS to BW limited */
+void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
+			const u32 cos1_bw);
+
+/* Used to configure the ETS to strict */
+int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
+
+
+/*  Configure the COS to ETS according to BW and SP settings.*/
+int bnx2x_ets_e3b0_config(const struct link_params *params,
+			 const struct link_vars *vars,
+			 const struct bnx2x_ets_params *ets_params);
+/* Read pfc statistic*/
+void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
+						 u32 pfc_frames_sent[2],
+						 u32 pfc_frames_received[2]);
+void bnx2x_init_mod_abs_int(struct bnx2x *bp, struct link_vars *vars,
+			    u32 chip_id, u32 shmem_base, u32 shmem2_base,
+			    u8 port);
+
+int bnx2x_sfp_module_detection(struct bnx2x_phy *phy,
+			       struct link_params *params);
+
+void bnx2x_period_func(struct link_params *params, struct link_vars *vars);
+
+#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
new file mode 100644
index 0000000..1507091
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -0,0 +1,11531 @@
+/* bnx2x_main.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/device.h>  /* for dev_info() */
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/bitops.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <asm/byteorder.h>
+#include <linux/time.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/workqueue.h>
+#include <linux/crc32.h>
+#include <linux/crc32c.h>
+#include <linux/prefetch.h>
+#include <linux/zlib.h>
+#include <linux/io.h>
+#include <linux/stringify.h>
+#include <linux/vmalloc.h>
+
+#include "bnx2x.h"
+#include "bnx2x_init.h"
+#include "bnx2x_init_ops.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_dcb.h"
+#include "bnx2x_sp.h"
+
+#include <linux/firmware.h>
+#include "bnx2x_fw_file_hdr.h"
+/* FW files */
+#define FW_FILE_VERSION					\
+	__stringify(BCM_5710_FW_MAJOR_VERSION) "."	\
+	__stringify(BCM_5710_FW_MINOR_VERSION) "."	\
+	__stringify(BCM_5710_FW_REVISION_VERSION) "."	\
+	__stringify(BCM_5710_FW_ENGINEERING_VERSION)
+#define FW_FILE_NAME_E1		"bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E1H	"bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
+#define FW_FILE_NAME_E2		"bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
+
+/* Time in jiffies before concluding the transmitter is hung */
+#define TX_TIMEOUT		(5*HZ)
+
+static char version[] __devinitdata =
+	"Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
+	DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Eliezer Tamir");
+MODULE_DESCRIPTION("Broadcom NetXtreme II "
+		   "BCM57710/57711/57711E/"
+		   "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
+		   "57840/57840_MF Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_FIRMWARE(FW_FILE_NAME_E1);
+MODULE_FIRMWARE(FW_FILE_NAME_E1H);
+MODULE_FIRMWARE(FW_FILE_NAME_E2);
+
+static int multi_mode = 1;
+module_param(multi_mode, int, 0);
+MODULE_PARM_DESC(multi_mode, " Multi queue mode "
+			     "(0 Disable; 1 Enable (default))");
+
+int num_queues;
+module_param(num_queues, int, 0);
+MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
+				" (default is as a number of CPUs)");
+
+static int disable_tpa;
+module_param(disable_tpa, int, 0);
+MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
+
+#define INT_MODE_INTx			1
+#define INT_MODE_MSI			2
+static int int_mode;
+module_param(int_mode, int, 0);
+MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
+				"(1 INT#x; 2 MSI)");
+
+static int dropless_fc;
+module_param(dropless_fc, int, 0);
+MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
+
+static int poll;
+module_param(poll, int, 0);
+MODULE_PARM_DESC(poll, " Use polling (for debug)");
+
+static int mrrs = -1;
+module_param(mrrs, int, 0);
+MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
+
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, " Default debug msglevel");
+
+
+
+struct workqueue_struct *bnx2x_wq;
+
+enum bnx2x_board_type {
+	BCM57710 = 0,
+	BCM57711,
+	BCM57711E,
+	BCM57712,
+	BCM57712_MF,
+	BCM57800,
+	BCM57800_MF,
+	BCM57810,
+	BCM57810_MF,
+	BCM57840,
+	BCM57840_MF
+};
+
+/* indexed by board_type, above */
+static struct {
+	char *name;
+} board_info[] __devinitdata = {
+	{ "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
+	{ "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
+	{ "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
+	{ "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
+	{ "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
+	{ "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
+	{ "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
+	{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
+	{ "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
+	{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
+	{ "Broadcom NetXtreme II BCM57840 10/20 Gigabit "
+						"Ethernet Multi Function"}
+};
+
+#ifndef PCI_DEVICE_ID_NX2_57710
+#define PCI_DEVICE_ID_NX2_57710		CHIP_NUM_57710
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711
+#define PCI_DEVICE_ID_NX2_57711		CHIP_NUM_57711
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57711E
+#define PCI_DEVICE_ID_NX2_57711E	CHIP_NUM_57711E
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712
+#define PCI_DEVICE_ID_NX2_57712		CHIP_NUM_57712
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57712_MF
+#define PCI_DEVICE_ID_NX2_57712_MF	CHIP_NUM_57712_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800
+#define PCI_DEVICE_ID_NX2_57800		CHIP_NUM_57800
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57800_MF
+#define PCI_DEVICE_ID_NX2_57800_MF	CHIP_NUM_57800_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810
+#define PCI_DEVICE_ID_NX2_57810		CHIP_NUM_57810
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57810_MF
+#define PCI_DEVICE_ID_NX2_57810_MF	CHIP_NUM_57810_MF
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840
+#define PCI_DEVICE_ID_NX2_57840		CHIP_NUM_57840
+#endif
+#ifndef PCI_DEVICE_ID_NX2_57840_MF
+#define PCI_DEVICE_ID_NX2_57840_MF	CHIP_NUM_57840_MF
+#endif
+static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840), BCM57840 },
+	{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
+				       u32 addr, dma_addr_t mapping)
+{
+	REG_WR(bp,  addr, U64_LO(mapping));
+	REG_WR(bp,  addr + 4, U64_HI(mapping));
+}
+
+static inline void storm_memset_spq_addr(struct bnx2x *bp,
+					 dma_addr_t mapping, u16 abs_fid)
+{
+	u32 addr = XSEM_REG_FAST_MEMORY +
+			XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
+
+	__storm_memset_dma_mapping(bp, addr, mapping);
+}
+
+static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
+					 u16 pf_id)
+{
+	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
+		pf_id);
+	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
+		pf_id);
+	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
+		pf_id);
+	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
+		pf_id);
+}
+
+static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
+					u8 enable)
+{
+	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
+		enable);
+	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
+		enable);
+	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
+		enable);
+	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
+		enable);
+}
+
+static inline void storm_memset_eq_data(struct bnx2x *bp,
+				struct event_ring_data *eq_data,
+				u16 pfid)
+{
+	size_t size = sizeof(struct event_ring_data);
+
+	u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
+
+	__storm_memset_struct(bp, addr, size, (u32 *)eq_data);
+}
+
+static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
+					u16 pfid)
+{
+	u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
+	REG_WR16(bp, addr, eq_prod);
+}
+
+/* used only at init
+ * locking is done by mcp
+ */
+static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
+{
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+			       PCICFG_VENDOR_ID_OFFSET);
+}
+
+static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
+{
+	u32 val;
+
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
+	pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+			       PCICFG_VENDOR_ID_OFFSET);
+
+	return val;
+}
+
+#define DMAE_DP_SRC_GRC		"grc src_addr [%08x]"
+#define DMAE_DP_SRC_PCI		"pci src_addr [%x:%08x]"
+#define DMAE_DP_DST_GRC		"grc dst_addr [%08x]"
+#define DMAE_DP_DST_PCI		"pci dst_addr [%x:%08x]"
+#define DMAE_DP_DST_NONE	"dst_addr [none]"
+
+static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
+			  int msglvl)
+{
+	u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
+
+	switch (dmae->opcode & DMAE_COMMAND_DST) {
+	case DMAE_CMD_DST_PCI:
+		if (src_type == DMAE_CMD_SRC_PCI)
+			DP(msglvl, "DMAE: opcode 0x%08x\n"
+			   "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
+			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
+			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+			   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+			   dmae->comp_addr_hi, dmae->comp_addr_lo,
+			   dmae->comp_val);
+		else
+			DP(msglvl, "DMAE: opcode 0x%08x\n"
+			   "src [%08x], len [%d*4], dst [%x:%08x]\n"
+			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
+			   dmae->opcode, dmae->src_addr_lo >> 2,
+			   dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
+			   dmae->comp_addr_hi, dmae->comp_addr_lo,
+			   dmae->comp_val);
+		break;
+	case DMAE_CMD_DST_GRC:
+		if (src_type == DMAE_CMD_SRC_PCI)
+			DP(msglvl, "DMAE: opcode 0x%08x\n"
+			   "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
+			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
+			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+			   dmae->len, dmae->dst_addr_lo >> 2,
+			   dmae->comp_addr_hi, dmae->comp_addr_lo,
+			   dmae->comp_val);
+		else
+			DP(msglvl, "DMAE: opcode 0x%08x\n"
+			   "src [%08x], len [%d*4], dst [%08x]\n"
+			   "comp_addr [%x:%08x], comp_val 0x%08x\n",
+			   dmae->opcode, dmae->src_addr_lo >> 2,
+			   dmae->len, dmae->dst_addr_lo >> 2,
+			   dmae->comp_addr_hi, dmae->comp_addr_lo,
+			   dmae->comp_val);
+		break;
+	default:
+		if (src_type == DMAE_CMD_SRC_PCI)
+			DP(msglvl, "DMAE: opcode 0x%08x\n"
+			   DP_LEVEL "src_addr [%x:%08x]  len [%d * 4]  "
+				    "dst_addr [none]\n"
+			   DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
+			   dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
+			   dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+			   dmae->comp_val);
+		else
+			DP(msglvl, "DMAE: opcode 0x%08x\n"
+			   DP_LEVEL "src_addr [%08x]  len [%d * 4]  "
+				    "dst_addr [none]\n"
+			   DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
+			   dmae->opcode, dmae->src_addr_lo >> 2,
+			   dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
+			   dmae->comp_val);
+		break;
+	}
+
+}
+
+/* copy command into DMAE command memory and set DMAE command go */
+void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
+{
+	u32 cmd_offset;
+	int i;
+
+	cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
+	for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
+		REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
+
+		DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
+		   idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
+	}
+	REG_WR(bp, dmae_reg_go_c[idx], 1);
+}
+
+u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
+{
+	return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
+			   DMAE_CMD_C_ENABLE);
+}
+
+u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
+{
+	return opcode & ~DMAE_CMD_SRC_RESET;
+}
+
+u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
+			     bool with_comp, u8 comp_type)
+{
+	u32 opcode = 0;
+
+	opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
+		   (dst_type << DMAE_COMMAND_DST_SHIFT));
+
+	opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
+
+	opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
+	opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
+		   (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
+	opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
+
+#ifdef __BIG_ENDIAN
+	opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
+#else
+	opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
+#endif
+	if (with_comp)
+		opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
+	return opcode;
+}
+
+static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
+				      struct dmae_command *dmae,
+				      u8 src_type, u8 dst_type)
+{
+	memset(dmae, 0, sizeof(struct dmae_command));
+
+	/* set the opcode */
+	dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
+					 true, DMAE_COMP_PCI);
+
+	/* fill in the completion parameters */
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+}
+
+/* issue a dmae command over the init-channel and wailt for completion */
+static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
+				      struct dmae_command *dmae)
+{
+	u32 *wb_comp = bnx2x_sp(bp, wb_comp);
+	int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
+	int rc = 0;
+
+	DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
+	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
+	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
+
+	/*
+	 * Lock the dmae channel. Disable BHs to prevent a dead-lock
+	 * as long as this code is called both from syscall context and
+	 * from ndo_set_rx_mode() flow that may be called from BH.
+	 */
+	spin_lock_bh(&bp->dmae_lock);
+
+	/* reset completion */
+	*wb_comp = 0;
+
+	/* post the command on the channel used for initializations */
+	bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+
+	/* wait for completion */
+	udelay(5);
+	while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
+		DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
+
+		if (!cnt) {
+			BNX2X_ERR("DMAE timeout!\n");
+			rc = DMAE_TIMEOUT;
+			goto unlock;
+		}
+		cnt--;
+		udelay(50);
+	}
+	if (*wb_comp & DMAE_PCI_ERR_FLAG) {
+		BNX2X_ERR("DMAE PCI error!\n");
+		rc = DMAE_PCI_ERROR;
+	}
+
+	DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
+	   bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
+	   bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
+
+unlock:
+	spin_unlock_bh(&bp->dmae_lock);
+	return rc;
+}
+
+void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
+		      u32 len32)
+{
+	struct dmae_command dmae;
+
+	if (!bp->dmae_ready) {
+		u32 *data = bnx2x_sp(bp, wb_data[0]);
+
+		DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
+		   "  using indirect\n", dst_addr, len32);
+		bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+		return;
+	}
+
+	/* set opcode and fixed command fields */
+	bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
+
+	/* fill in addresses and len */
+	dmae.src_addr_lo = U64_LO(dma_addr);
+	dmae.src_addr_hi = U64_HI(dma_addr);
+	dmae.dst_addr_lo = dst_addr >> 2;
+	dmae.dst_addr_hi = 0;
+	dmae.len = len32;
+
+	bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
+
+	/* issue the command and wait for completion */
+	bnx2x_issue_dmae_with_comp(bp, &dmae);
+}
+
+void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
+{
+	struct dmae_command dmae;
+
+	if (!bp->dmae_ready) {
+		u32 *data = bnx2x_sp(bp, wb_data[0]);
+		int i;
+
+		DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
+		   "  using indirect\n", src_addr, len32);
+		for (i = 0; i < len32; i++)
+			data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
+		return;
+	}
+
+	/* set opcode and fixed command fields */
+	bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
+
+	/* fill in addresses and len */
+	dmae.src_addr_lo = src_addr >> 2;
+	dmae.src_addr_hi = 0;
+	dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
+	dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
+	dmae.len = len32;
+
+	bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
+
+	/* issue the command and wait for completion */
+	bnx2x_issue_dmae_with_comp(bp, &dmae);
+}
+
+static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
+				      u32 addr, u32 len)
+{
+	int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
+	int offset = 0;
+
+	while (len > dmae_wr_max) {
+		bnx2x_write_dmae(bp, phys_addr + offset,
+				 addr + offset, dmae_wr_max);
+		offset += dmae_wr_max * 4;
+		len -= dmae_wr_max;
+	}
+
+	bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
+}
+
+/* used only for slowpath so not inlined */
+static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
+{
+	u32 wb_write[2];
+
+	wb_write[0] = val_hi;
+	wb_write[1] = val_lo;
+	REG_WR_DMAE(bp, reg, wb_write, 2);
+}
+
+#ifdef USE_WB_RD
+static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
+{
+	u32 wb_data[2];
+
+	REG_RD_DMAE(bp, reg, wb_data, 2);
+
+	return HILO_U64(wb_data[0], wb_data[1]);
+}
+#endif
+
+static int bnx2x_mc_assert(struct bnx2x *bp)
+{
+	char last_idx;
+	int i, rc = 0;
+	u32 row0, row1, row2, row3;
+
+	/* XSTORM */
+	last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
+			   XSTORM_ASSERT_LIST_INDEX_OFFSET);
+	if (last_idx)
+		BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+	/* print the asserts */
+	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+		row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+			      XSTORM_ASSERT_LIST_OFFSET(i));
+		row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+			      XSTORM_ASSERT_LIST_OFFSET(i) + 4);
+		row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+			      XSTORM_ASSERT_LIST_OFFSET(i) + 8);
+		row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
+			      XSTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+			BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
+				  " 0x%08x 0x%08x 0x%08x\n",
+				  i, row3, row2, row1, row0);
+			rc++;
+		} else {
+			break;
+		}
+	}
+
+	/* TSTORM */
+	last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
+			   TSTORM_ASSERT_LIST_INDEX_OFFSET);
+	if (last_idx)
+		BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+	/* print the asserts */
+	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+		row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+			      TSTORM_ASSERT_LIST_OFFSET(i));
+		row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+			      TSTORM_ASSERT_LIST_OFFSET(i) + 4);
+		row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+			      TSTORM_ASSERT_LIST_OFFSET(i) + 8);
+		row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
+			      TSTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+			BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
+				  " 0x%08x 0x%08x 0x%08x\n",
+				  i, row3, row2, row1, row0);
+			rc++;
+		} else {
+			break;
+		}
+	}
+
+	/* CSTORM */
+	last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
+			   CSTORM_ASSERT_LIST_INDEX_OFFSET);
+	if (last_idx)
+		BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+	/* print the asserts */
+	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+		row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+			      CSTORM_ASSERT_LIST_OFFSET(i));
+		row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+			      CSTORM_ASSERT_LIST_OFFSET(i) + 4);
+		row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+			      CSTORM_ASSERT_LIST_OFFSET(i) + 8);
+		row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
+			      CSTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+			BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
+				  " 0x%08x 0x%08x 0x%08x\n",
+				  i, row3, row2, row1, row0);
+			rc++;
+		} else {
+			break;
+		}
+	}
+
+	/* USTORM */
+	last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
+			   USTORM_ASSERT_LIST_INDEX_OFFSET);
+	if (last_idx)
+		BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
+
+	/* print the asserts */
+	for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
+
+		row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
+			      USTORM_ASSERT_LIST_OFFSET(i));
+		row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
+			      USTORM_ASSERT_LIST_OFFSET(i) + 4);
+		row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
+			      USTORM_ASSERT_LIST_OFFSET(i) + 8);
+		row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
+			      USTORM_ASSERT_LIST_OFFSET(i) + 12);
+
+		if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
+			BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
+				  " 0x%08x 0x%08x 0x%08x\n",
+				  i, row3, row2, row1, row0);
+			rc++;
+		} else {
+			break;
+		}
+	}
+
+	return rc;
+}
+
+void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
+{
+	u32 addr, val;
+	u32 mark, offset;
+	__be32 data[9];
+	int word;
+	u32 trace_shmem_base;
+	if (BP_NOMCP(bp)) {
+		BNX2X_ERR("NO MCP - can not dump\n");
+		return;
+	}
+	netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
+		(bp->common.bc_ver & 0xff0000) >> 16,
+		(bp->common.bc_ver & 0xff00) >> 8,
+		(bp->common.bc_ver & 0xff));
+
+	val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
+	if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
+		printk("%s" "MCP PC at 0x%x\n", lvl, val);
+
+	if (BP_PATH(bp) == 0)
+		trace_shmem_base = bp->common.shmem_base;
+	else
+		trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
+	addr = trace_shmem_base - 0x0800 + 4;
+	mark = REG_RD(bp, addr);
+	mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+			+ ((mark + 0x3) & ~0x3) - 0x08000000;
+	printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
+
+	printk("%s", lvl);
+	for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
+		for (word = 0; word < 8; word++)
+			data[word] = htonl(REG_RD(bp, offset + 4*word));
+		data[8] = 0x0;
+		pr_cont("%s", (char *)data);
+	}
+	for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
+		for (word = 0; word < 8; word++)
+			data[word] = htonl(REG_RD(bp, offset + 4*word));
+		data[8] = 0x0;
+		pr_cont("%s", (char *)data);
+	}
+	printk("%s" "end of fw dump\n", lvl);
+}
+
+static inline void bnx2x_fw_dump(struct bnx2x *bp)
+{
+	bnx2x_fw_dump_lvl(bp, KERN_ERR);
+}
+
+void bnx2x_panic_dump(struct bnx2x *bp)
+{
+	int i;
+	u16 j;
+	struct hc_sp_status_block_data sp_sb_data;
+	int func = BP_FUNC(bp);
+#ifdef BNX2X_STOP_ON_ERROR
+	u16 start = 0, end = 0;
+	u8 cos;
+#endif
+
+	bp->stats_state = STATS_STATE_DISABLED;
+	DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
+
+	BNX2X_ERR("begin crash dump -----------------\n");
+
+	/* Indices */
+	/* Common */
+	BNX2X_ERR("def_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
+		  "  spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+		  bp->def_idx, bp->def_att_idx, bp->attn_state,
+		  bp->spq_prod_idx, bp->stats_counter);
+	BNX2X_ERR("DSB: attn bits(0x%x)  ack(0x%x)  id(0x%x)  idx(0x%x)\n",
+		  bp->def_status_blk->atten_status_block.attn_bits,
+		  bp->def_status_blk->atten_status_block.attn_bits_ack,
+		  bp->def_status_blk->atten_status_block.status_block_id,
+		  bp->def_status_blk->atten_status_block.attn_bits_index);
+	BNX2X_ERR("     def (");
+	for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
+		pr_cont("0x%x%s",
+		       bp->def_status_blk->sp_sb.index_values[i],
+		       (i == HC_SP_SB_MAX_INDICES - 1) ? ")  " : " ");
+
+	for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
+		*((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
+			i*sizeof(u32));
+
+	pr_cont("igu_sb_id(0x%x)  igu_seg_id(0x%x) "
+			 "pf_id(0x%x)  vnic_id(0x%x)  "
+			 "vf_id(0x%x)  vf_valid (0x%x) "
+			 "state(0x%x)\n",
+	       sp_sb_data.igu_sb_id,
+	       sp_sb_data.igu_seg_id,
+	       sp_sb_data.p_func.pf_id,
+	       sp_sb_data.p_func.vnic_id,
+	       sp_sb_data.p_func.vf_id,
+	       sp_sb_data.p_func.vf_valid,
+	       sp_sb_data.state);
+
+
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		int loop;
+		struct hc_status_block_data_e2 sb_data_e2;
+		struct hc_status_block_data_e1x sb_data_e1x;
+		struct hc_status_block_sm  *hc_sm_p =
+			CHIP_IS_E1x(bp) ?
+			sb_data_e1x.common.state_machine :
+			sb_data_e2.common.state_machine;
+		struct hc_index_data *hc_index_p =
+			CHIP_IS_E1x(bp) ?
+			sb_data_e1x.index_data :
+			sb_data_e2.index_data;
+		u8 data_size, cos;
+		u32 *sb_data_p;
+		struct bnx2x_fp_txdata txdata;
+
+		/* Rx */
+		BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
+			  "  rx_comp_prod(0x%x)"
+			  "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
+			  i, fp->rx_bd_prod, fp->rx_bd_cons,
+			  fp->rx_comp_prod,
+			  fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
+		BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
+			  "  fp_hc_idx(0x%x)\n",
+			  fp->rx_sge_prod, fp->last_max_sge,
+			  le16_to_cpu(fp->fp_hc_idx));
+
+		/* Tx */
+		for_each_cos_in_tx_queue(fp, cos)
+		{
+			txdata = fp->txdata[cos];
+			BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
+				  "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
+				  "  *tx_cons_sb(0x%x)\n",
+				  i, txdata.tx_pkt_prod,
+				  txdata.tx_pkt_cons, txdata.tx_bd_prod,
+				  txdata.tx_bd_cons,
+				  le16_to_cpu(*txdata.tx_cons_sb));
+		}
+
+		loop = CHIP_IS_E1x(bp) ?
+			HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
+
+		/* host sb data */
+
+#ifdef BCM_CNIC
+		if (IS_FCOE_FP(fp))
+			continue;
+#endif
+		BNX2X_ERR("     run indexes (");
+		for (j = 0; j < HC_SB_MAX_SM; j++)
+			pr_cont("0x%x%s",
+			       fp->sb_running_index[j],
+			       (j == HC_SB_MAX_SM - 1) ? ")" : " ");
+
+		BNX2X_ERR("     indexes (");
+		for (j = 0; j < loop; j++)
+			pr_cont("0x%x%s",
+			       fp->sb_index_values[j],
+			       (j == loop - 1) ? ")" : " ");
+		/* fw sb data */
+		data_size = CHIP_IS_E1x(bp) ?
+			sizeof(struct hc_status_block_data_e1x) :
+			sizeof(struct hc_status_block_data_e2);
+		data_size /= sizeof(u32);
+		sb_data_p = CHIP_IS_E1x(bp) ?
+			(u32 *)&sb_data_e1x :
+			(u32 *)&sb_data_e2;
+		/* copy sb data in here */
+		for (j = 0; j < data_size; j++)
+			*(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
+				CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
+				j * sizeof(u32));
+
+		if (!CHIP_IS_E1x(bp)) {
+			pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) "
+				"vnic_id(0x%x)  same_igu_sb_1b(0x%x) "
+				"state(0x%x)\n",
+				sb_data_e2.common.p_func.pf_id,
+				sb_data_e2.common.p_func.vf_id,
+				sb_data_e2.common.p_func.vf_valid,
+				sb_data_e2.common.p_func.vnic_id,
+				sb_data_e2.common.same_igu_sb_1b,
+				sb_data_e2.common.state);
+		} else {
+			pr_cont("pf_id(0x%x)  vf_id(0x%x)  vf_valid(0x%x) "
+				"vnic_id(0x%x)  same_igu_sb_1b(0x%x) "
+				"state(0x%x)\n",
+				sb_data_e1x.common.p_func.pf_id,
+				sb_data_e1x.common.p_func.vf_id,
+				sb_data_e1x.common.p_func.vf_valid,
+				sb_data_e1x.common.p_func.vnic_id,
+				sb_data_e1x.common.same_igu_sb_1b,
+				sb_data_e1x.common.state);
+		}
+
+		/* SB_SMs data */
+		for (j = 0; j < HC_SB_MAX_SM; j++) {
+			pr_cont("SM[%d] __flags (0x%x) "
+			       "igu_sb_id (0x%x)  igu_seg_id(0x%x) "
+			       "time_to_expire (0x%x) "
+			       "timer_value(0x%x)\n", j,
+			       hc_sm_p[j].__flags,
+			       hc_sm_p[j].igu_sb_id,
+			       hc_sm_p[j].igu_seg_id,
+			       hc_sm_p[j].time_to_expire,
+			       hc_sm_p[j].timer_value);
+		}
+
+		/* Indecies data */
+		for (j = 0; j < loop; j++) {
+			pr_cont("INDEX[%d] flags (0x%x) "
+					 "timeout (0x%x)\n", j,
+			       hc_index_p[j].flags,
+			       hc_index_p[j].timeout);
+		}
+	}
+
+#ifdef BNX2X_STOP_ON_ERROR
+	/* Rings */
+	/* Rx */
+	for_each_rx_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
+		end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
+		for (j = start; j != end; j = RX_BD(j + 1)) {
+			u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
+			struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
+
+			BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
+				  i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
+		}
+
+		start = RX_SGE(fp->rx_sge_prod);
+		end = RX_SGE(fp->last_max_sge);
+		for (j = start; j != end; j = RX_SGE(j + 1)) {
+			u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
+			struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
+
+			BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
+				  i, j, rx_sge[1], rx_sge[0], sw_page->page);
+		}
+
+		start = RCQ_BD(fp->rx_comp_cons - 10);
+		end = RCQ_BD(fp->rx_comp_cons + 503);
+		for (j = start; j != end; j = RCQ_BD(j + 1)) {
+			u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
+
+			BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
+				  i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
+		}
+	}
+
+	/* Tx */
+	for_each_tx_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		for_each_cos_in_tx_queue(fp, cos) {
+			struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+
+			start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
+			end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
+			for (j = start; j != end; j = TX_BD(j + 1)) {
+				struct sw_tx_bd *sw_bd =
+					&txdata->tx_buf_ring[j];
+
+				BNX2X_ERR("fp%d: txdata %d, "
+					  "packet[%x]=[%p,%x]\n",
+					  i, cos, j, sw_bd->skb,
+					  sw_bd->first_bd);
+			}
+
+			start = TX_BD(txdata->tx_bd_cons - 10);
+			end = TX_BD(txdata->tx_bd_cons + 254);
+			for (j = start; j != end; j = TX_BD(j + 1)) {
+				u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
+
+				BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]="
+					  "[%x:%x:%x:%x]\n",
+					  i, cos, j, tx_bd[0], tx_bd[1],
+					  tx_bd[2], tx_bd[3]);
+			}
+		}
+	}
+#endif
+	bnx2x_fw_dump(bp);
+	bnx2x_mc_assert(bp);
+	BNX2X_ERR("end crash dump -----------------\n");
+}
+
+/*
+ * FLR Support for E2
+ *
+ * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
+ * initialization.
+ */
+#define FLR_WAIT_USEC		10000	/* 10 miliseconds */
+#define FLR_WAIT_INTERAVAL	50	/* usec */
+#define	FLR_POLL_CNT		(FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */
+
+struct pbf_pN_buf_regs {
+	int pN;
+	u32 init_crd;
+	u32 crd;
+	u32 crd_freed;
+};
+
+struct pbf_pN_cmd_regs {
+	int pN;
+	u32 lines_occup;
+	u32 lines_freed;
+};
+
+static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
+				     struct pbf_pN_buf_regs *regs,
+				     u32 poll_count)
+{
+	u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
+	u32 cur_cnt = poll_count;
+
+	crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
+	crd = crd_start = REG_RD(bp, regs->crd);
+	init_crd = REG_RD(bp, regs->init_crd);
+
+	DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
+	DP(BNX2X_MSG_SP, "CREDIT[%d]      : s:%x\n", regs->pN, crd);
+	DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
+
+	while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
+	       (init_crd - crd_start))) {
+		if (cur_cnt--) {
+			udelay(FLR_WAIT_INTERAVAL);
+			crd = REG_RD(bp, regs->crd);
+			crd_freed = REG_RD(bp, regs->crd_freed);
+		} else {
+			DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
+			   regs->pN);
+			DP(BNX2X_MSG_SP, "CREDIT[%d]      : c:%x\n",
+			   regs->pN, crd);
+			DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
+			   regs->pN, crd_freed);
+			break;
+		}
+	}
+	DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
+	   poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+}
+
+static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
+				     struct pbf_pN_cmd_regs *regs,
+				     u32 poll_count)
+{
+	u32 occup, to_free, freed, freed_start;
+	u32 cur_cnt = poll_count;
+
+	occup = to_free = REG_RD(bp, regs->lines_occup);
+	freed = freed_start = REG_RD(bp, regs->lines_freed);
+
+	DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n", regs->pN, occup);
+	DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
+
+	while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
+		if (cur_cnt--) {
+			udelay(FLR_WAIT_INTERAVAL);
+			occup = REG_RD(bp, regs->lines_occup);
+			freed = REG_RD(bp, regs->lines_freed);
+		} else {
+			DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
+			   regs->pN);
+			DP(BNX2X_MSG_SP, "OCCUPANCY[%d]   : s:%x\n",
+			   regs->pN, occup);
+			DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
+			   regs->pN, freed);
+			break;
+		}
+	}
+	DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
+	   poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+}
+
+static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
+				     u32 expected, u32 poll_count)
+{
+	u32 cur_cnt = poll_count;
+	u32 val;
+
+	while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
+		udelay(FLR_WAIT_INTERAVAL);
+
+	return val;
+}
+
+static inline int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
+						  char *msg, u32 poll_cnt)
+{
+	u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
+	if (val != 0) {
+		BNX2X_ERR("%s usage count=%d\n", msg, val);
+		return 1;
+	}
+	return 0;
+}
+
+static u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
+{
+	/* adjust polling timeout */
+	if (CHIP_REV_IS_EMUL(bp))
+		return FLR_POLL_CNT * 2000;
+
+	if (CHIP_REV_IS_FPGA(bp))
+		return FLR_POLL_CNT * 120;
+
+	return FLR_POLL_CNT;
+}
+
+static void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
+{
+	struct pbf_pN_cmd_regs cmd_regs[] = {
+		{0, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_OCCUPANCY_Q0 :
+			PBF_REG_P0_TQ_OCCUPANCY,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_LINES_FREED_CNT_Q0 :
+			PBF_REG_P0_TQ_LINES_FREED_CNT},
+		{1, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_OCCUPANCY_Q1 :
+			PBF_REG_P1_TQ_OCCUPANCY,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_LINES_FREED_CNT_Q1 :
+			PBF_REG_P1_TQ_LINES_FREED_CNT},
+		{4, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_OCCUPANCY_LB_Q :
+			PBF_REG_P4_TQ_OCCUPANCY,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
+			PBF_REG_P4_TQ_LINES_FREED_CNT}
+	};
+
+	struct pbf_pN_buf_regs buf_regs[] = {
+		{0, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INIT_CRD_Q0 :
+			PBF_REG_P0_INIT_CRD ,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_CREDIT_Q0 :
+			PBF_REG_P0_CREDIT,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
+			PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
+		{1, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INIT_CRD_Q1 :
+			PBF_REG_P1_INIT_CRD,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_CREDIT_Q1 :
+			PBF_REG_P1_CREDIT,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
+			PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
+		{4, (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INIT_CRD_LB_Q :
+			PBF_REG_P4_INIT_CRD,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_CREDIT_LB_Q :
+			PBF_REG_P4_CREDIT,
+		    (CHIP_IS_E3B0(bp)) ?
+			PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
+			PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
+	};
+
+	int i;
+
+	/* Verify the command queues are flushed P0, P1, P4 */
+	for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
+		bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
+
+
+	/* Verify the transmission buffers are flushed P0, P1, P4 */
+	for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
+		bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
+}
+
+#define OP_GEN_PARAM(param) \
+	(((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
+
+#define OP_GEN_TYPE(type) \
+	(((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
+
+#define OP_GEN_AGG_VECT(index) \
+	(((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
+
+
+static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
+					 u32 poll_cnt)
+{
+	struct sdm_op_gen op_gen = {0};
+
+	u32 comp_addr = BAR_CSTRORM_INTMEM +
+			CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
+	int ret = 0;
+
+	if (REG_RD(bp, comp_addr)) {
+		BNX2X_ERR("Cleanup complete is not 0\n");
+		return 1;
+	}
+
+	op_gen.command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
+	op_gen.command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
+	op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
+	op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
+
+	DP(BNX2X_MSG_SP, "FW Final cleanup\n");
+	REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
+
+	if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
+		BNX2X_ERR("FW final cleanup did not succeed\n");
+		ret = 1;
+	}
+	/* Zero completion for nxt FLR */
+	REG_WR(bp, comp_addr, 0);
+
+	return ret;
+}
+
+static inline u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
+{
+	int pos;
+	u16 status;
+
+	pos = pci_pcie_cap(dev);
+	if (!pos)
+		return false;
+
+	pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
+	return status & PCI_EXP_DEVSTA_TRPND;
+}
+
+/* PF FLR specific routines
+*/
+static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
+{
+
+	/* wait for CFC PF usage-counter to zero (includes all the VFs) */
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			CFC_REG_NUM_LCIDS_INSIDE_PF,
+			"CFC PF usage counter timed out",
+			poll_cnt))
+		return 1;
+
+
+	/* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			DORQ_REG_PF_USAGE_CNT,
+			"DQ PF usage counter timed out",
+			poll_cnt))
+		return 1;
+
+	/* Wait for QM PF usage-counter to zero (until DQ cleanup) */
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
+			"QM PF usage counter timed out",
+			poll_cnt))
+		return 1;
+
+	/* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
+			"Timers VNIC usage counter timed out",
+			poll_cnt))
+		return 1;
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
+			"Timers NUM_SCANS usage counter timed out",
+			poll_cnt))
+		return 1;
+
+	/* Wait DMAE PF usage counter to zero */
+	if (bnx2x_flr_clnup_poll_hw_counter(bp,
+			dmae_reg_go_c[INIT_DMAE_C(bp)],
+			"DMAE dommand register timed out",
+			poll_cnt))
+		return 1;
+
+	return 0;
+}
+
+static void bnx2x_hw_enable_status(struct bnx2x *bp)
+{
+	u32 val;
+
+	val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
+	DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
+
+	val = REG_RD(bp, PBF_REG_DISABLE_PF);
+	DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
+
+	val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
+	DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
+
+	val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
+	DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
+
+	val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
+	DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
+
+	val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
+	DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
+
+	val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
+	DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
+
+	val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
+	DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
+	   val);
+}
+
+static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
+{
+	u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
+
+	DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
+
+	/* Re-enable PF target read access */
+	REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+	/* Poll HW usage counters */
+	if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
+		return -EBUSY;
+
+	/* Zero the igu 'trailing edge' and 'leading edge' */
+
+	/* Send the FW cleanup command */
+	if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
+		return -EBUSY;
+
+	/* ATC cleanup */
+
+	/* Verify TX hw is flushed */
+	bnx2x_tx_hw_flushed(bp, poll_cnt);
+
+	/* Wait 100ms (not adjusted according to platform) */
+	msleep(100);
+
+	/* Verify no pending pci transactions */
+	if (bnx2x_is_pcie_pending(bp->pdev))
+		BNX2X_ERR("PCIE Transactions still pending\n");
+
+	/* Debug */
+	bnx2x_hw_enable_status(bp);
+
+	/*
+	 * Master enable - Due to WB DMAE writes performed before this
+	 * register is re-initialized as part of the regular function init
+	 */
+	REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
+	return 0;
+}
+
+static void bnx2x_hc_int_enable(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+	u32 val = REG_RD(bp, addr);
+	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+	int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+
+	if (msix) {
+		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+			 HC_CONFIG_0_REG_INT_LINE_EN_0);
+		val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+	} else if (msi) {
+		val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
+		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+	} else {
+		val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+			HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+			HC_CONFIG_0_REG_INT_LINE_EN_0 |
+			HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+		if (!CHIP_IS_E1(bp)) {
+			DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
+			   val, port, addr);
+
+			REG_WR(bp, addr, val);
+
+			val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
+		}
+	}
+
+	if (CHIP_IS_E1(bp))
+		REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
+
+	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
+	   val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+
+	REG_WR(bp, addr, val);
+	/*
+	 * Ensure that HC_CONFIG is written before leading/trailing edge config
+	 */
+	mmiowb();
+	barrier();
+
+	if (!CHIP_IS_E1(bp)) {
+		/* init leading/trailing edge */
+		if (IS_MF(bp)) {
+			val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
+			if (bp->port.pmf)
+				/* enable nig and gpio3 attention */
+				val |= 0x1100;
+		} else
+			val = 0xffff;
+
+		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+	}
+
+	/* Make sure that interrupts are indeed enabled from here on */
+	mmiowb();
+}
+
+static void bnx2x_igu_int_enable(struct bnx2x *bp)
+{
+	u32 val;
+	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+	int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
+
+	val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+	if (msix) {
+		val &= ~(IGU_PF_CONF_INT_LINE_EN |
+			 IGU_PF_CONF_SINGLE_ISR_EN);
+		val |= (IGU_PF_CONF_FUNC_EN |
+			IGU_PF_CONF_MSI_MSIX_EN |
+			IGU_PF_CONF_ATTN_BIT_EN);
+	} else if (msi) {
+		val &= ~IGU_PF_CONF_INT_LINE_EN;
+		val |= (IGU_PF_CONF_FUNC_EN |
+			IGU_PF_CONF_MSI_MSIX_EN |
+			IGU_PF_CONF_ATTN_BIT_EN |
+			IGU_PF_CONF_SINGLE_ISR_EN);
+	} else {
+		val &= ~IGU_PF_CONF_MSI_MSIX_EN;
+		val |= (IGU_PF_CONF_FUNC_EN |
+			IGU_PF_CONF_INT_LINE_EN |
+			IGU_PF_CONF_ATTN_BIT_EN |
+			IGU_PF_CONF_SINGLE_ISR_EN);
+	}
+
+	DP(NETIF_MSG_INTR, "write 0x%x to IGU  mode %s\n",
+	   val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+
+	REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+
+	barrier();
+
+	/* init leading/trailing edge */
+	if (IS_MF(bp)) {
+		val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
+		if (bp->port.pmf)
+			/* enable nig and gpio3 attention */
+			val |= 0x1100;
+	} else
+		val = 0xffff;
+
+	REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
+	REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
+
+	/* Make sure that interrupts are indeed enabled from here on */
+	mmiowb();
+}
+
+void bnx2x_int_enable(struct bnx2x *bp)
+{
+	if (bp->common.int_block == INT_BLOCK_HC)
+		bnx2x_hc_int_enable(bp);
+	else
+		bnx2x_igu_int_enable(bp);
+}
+
+static void bnx2x_hc_int_disable(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
+	u32 val = REG_RD(bp, addr);
+
+	/*
+	 * in E1 we must use only PCI configuration space to disable
+	 * MSI/MSIX capablility
+	 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
+	 */
+	if (CHIP_IS_E1(bp)) {
+		/*  Since IGU_PF_CONF_MSI_MSIX_EN still always on
+		 *  Use mask register to prevent from HC sending interrupts
+		 *  after we exit the function
+		 */
+		REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
+
+		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+			 HC_CONFIG_0_REG_INT_LINE_EN_0 |
+			 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+	} else
+		val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
+			 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
+			 HC_CONFIG_0_REG_INT_LINE_EN_0 |
+			 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
+
+	DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
+	   val, port, addr);
+
+	/* flush all outstanding writes */
+	mmiowb();
+
+	REG_WR(bp, addr, val);
+	if (REG_RD(bp, addr) != val)
+		BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+static void bnx2x_igu_int_disable(struct bnx2x *bp)
+{
+	u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+
+	val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
+		 IGU_PF_CONF_INT_LINE_EN |
+		 IGU_PF_CONF_ATTN_BIT_EN);
+
+	DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
+
+	/* flush all outstanding writes */
+	mmiowb();
+
+	REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+	if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
+		BNX2X_ERR("BUG! proper val not read from IGU!\n");
+}
+
+void bnx2x_int_disable(struct bnx2x *bp)
+{
+	if (bp->common.int_block == INT_BLOCK_HC)
+		bnx2x_hc_int_disable(bp);
+	else
+		bnx2x_igu_int_disable(bp);
+}
+
+void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
+{
+	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+	int i, offset;
+
+	if (disable_hw)
+		/* prevent the HW from sending interrupts */
+		bnx2x_int_disable(bp);
+
+	/* make sure all ISRs are done */
+	if (msix) {
+		synchronize_irq(bp->msix_table[0].vector);
+		offset = 1;
+#ifdef BCM_CNIC
+		offset++;
+#endif
+		for_each_eth_queue(bp, i)
+			synchronize_irq(bp->msix_table[offset++].vector);
+	} else
+		synchronize_irq(bp->pdev->irq);
+
+	/* make sure sp_task is not running */
+	cancel_delayed_work(&bp->sp_task);
+	cancel_delayed_work(&bp->period_task);
+	flush_workqueue(bnx2x_wq);
+}
+
+/* fast path */
+
+/*
+ * General service functions
+ */
+
+/* Return true if succeeded to acquire the lock */
+static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
+{
+	u32 lock_status;
+	u32 resource_bit = (1 << resource);
+	int func = BP_FUNC(bp);
+	u32 hw_lock_control_reg;
+
+	DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
+
+	/* Validating that the resource is within range */
+	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+		DP(NETIF_MSG_HW,
+		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
+		return false;
+	}
+
+	if (func <= 5)
+		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+	else
+		hw_lock_control_reg =
+				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+
+	/* Try to acquire the lock */
+	REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+	lock_status = REG_RD(bp, hw_lock_control_reg);
+	if (lock_status & resource_bit)
+		return true;
+
+	DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
+	return false;
+}
+
+/**
+ * bnx2x_get_leader_lock_resource - get the recovery leader resource id
+ *
+ * @bp:	driver handle
+ *
+ * Returns the recovery leader resource id according to the engine this function
+ * belongs to. Currently only only 2 engines is supported.
+ */
+static inline int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
+{
+	if (BP_PATH(bp))
+		return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
+	else
+		return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
+}
+
+/**
+ * bnx2x_trylock_leader_lock- try to aquire a leader lock.
+ *
+ * @bp: driver handle
+ *
+ * Tries to aquire a leader lock for cuurent engine.
+ */
+static inline bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
+{
+	return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
+#ifdef BCM_CNIC
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
+#endif
+
+void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
+{
+	struct bnx2x *bp = fp->bp;
+	int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+	int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
+	enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
+	struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj;
+
+	DP(BNX2X_MSG_SP,
+	   "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
+	   fp->index, cid, command, bp->state,
+	   rr_cqe->ramrod_cqe.ramrod_type);
+
+	switch (command) {
+	case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
+		DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
+		drv_cmd = BNX2X_Q_CMD_UPDATE;
+		break;
+
+	case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
+		DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
+		drv_cmd = BNX2X_Q_CMD_SETUP;
+		break;
+
+	case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
+		DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid);
+		drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
+		break;
+
+	case (RAMROD_CMD_ID_ETH_HALT):
+		DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
+		drv_cmd = BNX2X_Q_CMD_HALT;
+		break;
+
+	case (RAMROD_CMD_ID_ETH_TERMINATE):
+		DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid);
+		drv_cmd = BNX2X_Q_CMD_TERMINATE;
+		break;
+
+	case (RAMROD_CMD_ID_ETH_EMPTY):
+		DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
+		drv_cmd = BNX2X_Q_CMD_EMPTY;
+		break;
+
+	default:
+		BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
+			  command, fp->index);
+		return;
+	}
+
+	if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
+	    q_obj->complete_cmd(bp, q_obj, drv_cmd))
+		/* q_obj->complete_cmd() failure means that this was
+		 * an unexpected completion.
+		 *
+		 * In this case we don't want to increase the bp->spq_left
+		 * because apparently we haven't sent this command the first
+		 * place.
+		 */
+#ifdef BNX2X_STOP_ON_ERROR
+		bnx2x_panic();
+#else
+		return;
+#endif
+
+	smp_mb__before_atomic_inc();
+	atomic_inc(&bp->cq_spq_left);
+	/* push the change in bp->spq_left and towards the memory */
+	smp_mb__after_atomic_inc();
+
+	DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
+
+	return;
+}
+
+void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod)
+{
+	u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset;
+
+	bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod,
+				 start);
+}
+
+irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
+{
+	struct bnx2x *bp = netdev_priv(dev_instance);
+	u16 status = bnx2x_ack_int(bp);
+	u16 mask;
+	int i;
+	u8 cos;
+
+	/* Return here if interrupt is shared and it's not for us */
+	if (unlikely(status == 0)) {
+		DP(NETIF_MSG_INTR, "not our interrupt!\n");
+		return IRQ_NONE;
+	}
+	DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return IRQ_HANDLED;
+#endif
+
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		mask = 0x2 << (fp->index + CNIC_PRESENT);
+		if (status & mask) {
+			/* Handle Rx or Tx according to SB id */
+			prefetch(fp->rx_cons_sb);
+			for_each_cos_in_tx_queue(fp, cos)
+				prefetch(fp->txdata[cos].tx_cons_sb);
+			prefetch(&fp->sb_running_index[SM_RX_ID]);
+			napi_schedule(&bnx2x_fp(bp, fp->index, napi));
+			status &= ~mask;
+		}
+	}
+
+#ifdef BCM_CNIC
+	mask = 0x2;
+	if (status & (mask | 0x1)) {
+		struct cnic_ops *c_ops = NULL;
+
+		if (likely(bp->state == BNX2X_STATE_OPEN)) {
+			rcu_read_lock();
+			c_ops = rcu_dereference(bp->cnic_ops);
+			if (c_ops)
+				c_ops->cnic_handler(bp->cnic_data, NULL);
+			rcu_read_unlock();
+		}
+
+		status &= ~mask;
+	}
+#endif
+
+	if (unlikely(status & 0x1)) {
+		queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+
+		status &= ~0x1;
+		if (!status)
+			return IRQ_HANDLED;
+	}
+
+	if (unlikely(status))
+		DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+		   status);
+
+	return IRQ_HANDLED;
+}
+
+/* Link */
+
+/*
+ * General service functions
+ */
+
+int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
+{
+	u32 lock_status;
+	u32 resource_bit = (1 << resource);
+	int func = BP_FUNC(bp);
+	u32 hw_lock_control_reg;
+	int cnt;
+
+	/* Validating that the resource is within range */
+	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+		DP(NETIF_MSG_HW,
+		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
+		return -EINVAL;
+	}
+
+	if (func <= 5) {
+		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+	} else {
+		hw_lock_control_reg =
+				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+	}
+
+	/* Validating that the resource is not already taken */
+	lock_status = REG_RD(bp, hw_lock_control_reg);
+	if (lock_status & resource_bit) {
+		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
+		   lock_status, resource_bit);
+		return -EEXIST;
+	}
+
+	/* Try for 5 second every 5ms */
+	for (cnt = 0; cnt < 1000; cnt++) {
+		/* Try to acquire the lock */
+		REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
+		lock_status = REG_RD(bp, hw_lock_control_reg);
+		if (lock_status & resource_bit)
+			return 0;
+
+		msleep(5);
+	}
+	DP(NETIF_MSG_HW, "Timeout\n");
+	return -EAGAIN;
+}
+
+int bnx2x_release_leader_lock(struct bnx2x *bp)
+{
+	return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
+}
+
+int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
+{
+	u32 lock_status;
+	u32 resource_bit = (1 << resource);
+	int func = BP_FUNC(bp);
+	u32 hw_lock_control_reg;
+
+	DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
+
+	/* Validating that the resource is within range */
+	if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
+		DP(NETIF_MSG_HW,
+		   "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+		   resource, HW_LOCK_MAX_RESOURCE_VALUE);
+		return -EINVAL;
+	}
+
+	if (func <= 5) {
+		hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
+	} else {
+		hw_lock_control_reg =
+				(MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
+	}
+
+	/* Validating that the resource is currently taken */
+	lock_status = REG_RD(bp, hw_lock_control_reg);
+	if (!(lock_status & resource_bit)) {
+		DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
+		   lock_status, resource_bit);
+		return -EFAULT;
+	}
+
+	REG_WR(bp, hw_lock_control_reg, resource_bit);
+	return 0;
+}
+
+
+int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
+{
+	/* The GPIO should be swapped if swap register is set and active */
+	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+	int gpio_shift = gpio_num +
+			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+	u32 gpio_mask = (1 << gpio_shift);
+	u32 gpio_reg;
+	int value;
+
+	if (gpio_num > MISC_REGISTERS_GPIO_3) {
+		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+		return -EINVAL;
+	}
+
+	/* read GPIO value */
+	gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+
+	/* get the requested pin value */
+	if ((gpio_reg & gpio_mask) == gpio_mask)
+		value = 1;
+	else
+		value = 0;
+
+	DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
+
+	return value;
+}
+
+int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
+{
+	/* The GPIO should be swapped if swap register is set and active */
+	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+	int gpio_shift = gpio_num +
+			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+	u32 gpio_mask = (1 << gpio_shift);
+	u32 gpio_reg;
+
+	if (gpio_num > MISC_REGISTERS_GPIO_3) {
+		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+		return -EINVAL;
+	}
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+	/* read GPIO and mask except the float bits */
+	gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
+
+	switch (mode) {
+	case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
+		   gpio_num, gpio_shift);
+		/* clear FLOAT and set CLR */
+		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
+		break;
+
+	case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
+		   gpio_num, gpio_shift);
+		/* clear FLOAT and set SET */
+		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
+		break;
+
+	case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+		DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
+		   gpio_num, gpio_shift);
+		/* set FLOAT */
+		gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
+		break;
+
+	default:
+		break;
+	}
+
+	REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+	return 0;
+}
+
+int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
+{
+	u32 gpio_reg = 0;
+	int rc = 0;
+
+	/* Any port swapping should be handled by caller. */
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+	/* read GPIO and mask except the float bits */
+	gpio_reg = REG_RD(bp, MISC_REG_GPIO);
+	gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+	gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
+	gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
+
+	switch (mode) {
+	case MISC_REGISTERS_GPIO_OUTPUT_LOW:
+		DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
+		/* set CLR */
+		gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
+		break;
+
+	case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
+		DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
+		/* set SET */
+		gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
+		break;
+
+	case MISC_REGISTERS_GPIO_INPUT_HI_Z:
+		DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
+		/* set FLOAT */
+		gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
+		break;
+
+	default:
+		BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
+		rc = -EINVAL;
+		break;
+	}
+
+	if (rc == 0)
+		REG_WR(bp, MISC_REG_GPIO, gpio_reg);
+
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+	return rc;
+}
+
+int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
+{
+	/* The GPIO should be swapped if swap register is set and active */
+	int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
+			 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
+	int gpio_shift = gpio_num +
+			(gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
+	u32 gpio_mask = (1 << gpio_shift);
+	u32 gpio_reg;
+
+	if (gpio_num > MISC_REGISTERS_GPIO_3) {
+		BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
+		return -EINVAL;
+	}
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+	/* read GPIO int */
+	gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
+
+	switch (mode) {
+	case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
+		DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
+				   "output low\n", gpio_num, gpio_shift);
+		/* clear SET and set CLR */
+		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+		break;
+
+	case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
+		DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
+				   "output high\n", gpio_num, gpio_shift);
+		/* clear CLR and set SET */
+		gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
+		gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
+		break;
+
+	default:
+		break;
+	}
+
+	REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
+
+	return 0;
+}
+
+static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
+{
+	u32 spio_mask = (1 << spio_num);
+	u32 spio_reg;
+
+	if ((spio_num < MISC_REGISTERS_SPIO_4) ||
+	    (spio_num > MISC_REGISTERS_SPIO_7)) {
+		BNX2X_ERR("Invalid SPIO %d\n", spio_num);
+		return -EINVAL;
+	}
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+	/* read SPIO and mask except the float bits */
+	spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
+
+	switch (mode) {
+	case MISC_REGISTERS_SPIO_OUTPUT_LOW:
+		DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
+		/* clear FLOAT and set CLR */
+		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+		spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
+		break;
+
+	case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
+		DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
+		/* clear FLOAT and set SET */
+		spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+		spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
+		break;
+
+	case MISC_REGISTERS_SPIO_INPUT_HI_Z:
+		DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
+		/* set FLOAT */
+		spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
+		break;
+
+	default:
+		break;
+	}
+
+	REG_WR(bp, MISC_REG_SPIO, spio_reg);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
+
+	return 0;
+}
+
+void bnx2x_calc_fc_adv(struct bnx2x *bp)
+{
+	u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
+	switch (bp->link_vars.ieee_fc &
+		MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
+	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
+		bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
+						   ADVERTISED_Pause);
+		break;
+
+	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
+		bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
+						  ADVERTISED_Pause);
+		break;
+
+	case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
+		bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
+		break;
+
+	default:
+		bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
+						   ADVERTISED_Pause);
+		break;
+	}
+}
+
+u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
+{
+	if (!BP_NOMCP(bp)) {
+		u8 rc;
+		int cfx_idx = bnx2x_get_link_cfg_idx(bp);
+		u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
+		/*
+		 * Initialize link parameters structure variables
+		 * It is recommended to turn off RX FC for jumbo frames
+		 * for better performance
+		 */
+		if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
+			bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
+		else
+			bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
+
+		bnx2x_acquire_phy_lock(bp);
+
+		if (load_mode == LOAD_DIAG) {
+			struct link_params *lp = &bp->link_params;
+			lp->loopback_mode = LOOPBACK_XGXS;
+			/* do PHY loopback at 10G speed, if possible */
+			if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
+				if (lp->speed_cap_mask[cfx_idx] &
+				    PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
+					lp->req_line_speed[cfx_idx] =
+					SPEED_10000;
+				else
+					lp->req_line_speed[cfx_idx] =
+					SPEED_1000;
+			}
+		}
+
+		rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+
+		bnx2x_release_phy_lock(bp);
+
+		bnx2x_calc_fc_adv(bp);
+
+		if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
+			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+			bnx2x_link_report(bp);
+		} else
+			queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+		bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
+		return rc;
+	}
+	BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+	return -EINVAL;
+}
+
+void bnx2x_link_set(struct bnx2x *bp)
+{
+	if (!BP_NOMCP(bp)) {
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+		bnx2x_phy_init(&bp->link_params, &bp->link_vars);
+		bnx2x_release_phy_lock(bp);
+
+		bnx2x_calc_fc_adv(bp);
+	} else
+		BNX2X_ERR("Bootcode is missing - can not set link\n");
+}
+
+static void bnx2x__link_reset(struct bnx2x *bp)
+{
+	if (!BP_NOMCP(bp)) {
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
+		bnx2x_release_phy_lock(bp);
+	} else
+		BNX2X_ERR("Bootcode is missing - can not reset link\n");
+}
+
+u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
+{
+	u8 rc = 0;
+
+	if (!BP_NOMCP(bp)) {
+		bnx2x_acquire_phy_lock(bp);
+		rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
+				     is_serdes);
+		bnx2x_release_phy_lock(bp);
+	} else
+		BNX2X_ERR("Bootcode is missing - can not test link\n");
+
+	return rc;
+}
+
+static void bnx2x_init_port_minmax(struct bnx2x *bp)
+{
+	u32 r_param = bp->link_vars.line_speed / 8;
+	u32 fair_periodic_timeout_usec;
+	u32 t_fair;
+
+	memset(&(bp->cmng.rs_vars), 0,
+	       sizeof(struct rate_shaping_vars_per_port));
+	memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
+
+	/* 100 usec in SDM ticks = 25 since each tick is 4 usec */
+	bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
+
+	/* this is the threshold below which no timer arming will occur
+	   1.25 coefficient is for the threshold to be a little bigger
+	   than the real time, to compensate for timer in-accuracy */
+	bp->cmng.rs_vars.rs_threshold =
+				(RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
+
+	/* resolution of fairness timer */
+	fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
+	/* for 10G it is 1000usec. for 1G it is 10000usec. */
+	t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
+
+	/* this is the threshold below which we won't arm the timer anymore */
+	bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
+
+	/* we multiply by 1e3/8 to get bytes/msec.
+	   We don't want the credits to pass a credit
+	   of the t_fair*FAIR_MEM (algorithm resolution) */
+	bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
+	/* since each tick is 4 usec */
+	bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
+}
+
+/* Calculates the sum of vn_min_rates.
+   It's needed for further normalizing of the min_rates.
+   Returns:
+     sum of vn_min_rates.
+       or
+     0 - if all the min_rates are 0.
+     In the later case fainess algorithm should be deactivated.
+     If not all min_rates are zero then those that are zeroes will be set to 1.
+ */
+static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
+{
+	int all_zero = 1;
+	int vn;
+
+	bp->vn_weight_sum = 0;
+	for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+		u32 vn_cfg = bp->mf_config[vn];
+		u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
+				   FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
+
+		/* Skip hidden vns */
+		if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
+			continue;
+
+		/* If min rate is zero - set it to 1 */
+		if (!vn_min_rate)
+			vn_min_rate = DEF_MIN_RATE;
+		else
+			all_zero = 0;
+
+		bp->vn_weight_sum += vn_min_rate;
+	}
+
+	/* if ETS or all min rates are zeros - disable fairness */
+	if (BNX2X_IS_ETS_ENABLED(bp)) {
+		bp->cmng.flags.cmng_enables &=
+					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+		DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
+	} else if (all_zero) {
+		bp->cmng.flags.cmng_enables &=
+					~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+		DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
+		   "  fairness will be disabled\n");
+	} else
+		bp->cmng.flags.cmng_enables |=
+					CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
+}
+
+static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
+{
+	struct rate_shaping_vars_per_vn m_rs_vn;
+	struct fairness_vars_per_vn m_fair_vn;
+	u32 vn_cfg = bp->mf_config[vn];
+	int func = 2*vn + BP_PORT(bp);
+	u16 vn_min_rate, vn_max_rate;
+	int i;
+
+	/* If function is hidden - set min and max to zeroes */
+	if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
+		vn_min_rate = 0;
+		vn_max_rate = 0;
+
+	} else {
+		u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
+
+		vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
+				FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
+		/* If fairness is enabled (not all min rates are zeroes) and
+		   if current min rate is zero - set it to 1.
+		   This is a requirement of the algorithm. */
+		if (bp->vn_weight_sum && (vn_min_rate == 0))
+			vn_min_rate = DEF_MIN_RATE;
+
+		if (IS_MF_SI(bp))
+			/* maxCfg in percents of linkspeed */
+			vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
+		else
+			/* maxCfg is absolute in 100Mb units */
+			vn_max_rate = maxCfg * 100;
+	}
+
+	DP(NETIF_MSG_IFUP,
+	   "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
+	   func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
+
+	memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
+	memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
+
+	/* global vn counter - maximal Mbps for this vn */
+	m_rs_vn.vn_counter.rate = vn_max_rate;
+
+	/* quota - number of bytes transmitted in this period */
+	m_rs_vn.vn_counter.quota =
+				(vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
+
+	if (bp->vn_weight_sum) {
+		/* credit for each period of the fairness algorithm:
+		   number of bytes in T_FAIR (the vn share the port rate).
+		   vn_weight_sum should not be larger than 10000, thus
+		   T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
+		   than zero */
+		m_fair_vn.vn_credit_delta =
+			max_t(u32, (vn_min_rate * (T_FAIR_COEF /
+						   (8 * bp->vn_weight_sum))),
+			      (bp->cmng.fair_vars.fair_threshold +
+							MIN_ABOVE_THRESH));
+		DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
+		   m_fair_vn.vn_credit_delta);
+	}
+
+	/* Store it to internal memory */
+	for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
+		REG_WR(bp, BAR_XSTRORM_INTMEM +
+		       XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
+		       ((u32 *)(&m_rs_vn))[i]);
+
+	for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
+		REG_WR(bp, BAR_XSTRORM_INTMEM +
+		       XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
+		       ((u32 *)(&m_fair_vn))[i]);
+}
+
+static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
+{
+	if (CHIP_REV_IS_SLOW(bp))
+		return CMNG_FNS_NONE;
+	if (IS_MF(bp))
+		return CMNG_FNS_MINMAX;
+
+	return CMNG_FNS_NONE;
+}
+
+void bnx2x_read_mf_cfg(struct bnx2x *bp)
+{
+	int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
+
+	if (BP_NOMCP(bp))
+		return; /* what should be the default bvalue in this case */
+
+	/* For 2 port configuration the absolute function number formula
+	 * is:
+	 *      abs_func = 2 * vn + BP_PORT + BP_PATH
+	 *
+	 *      and there are 4 functions per port
+	 *
+	 * For 4 port configuration it is
+	 *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
+	 *
+	 *      and there are 2 functions per port
+	 */
+	for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+		int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
+
+		if (func >= E1H_FUNC_MAX)
+			break;
+
+		bp->mf_config[vn] =
+			MF_CFG_RD(bp, func_mf_config[func].config);
+	}
+}
+
+static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
+{
+
+	if (cmng_type == CMNG_FNS_MINMAX) {
+		int vn;
+
+		/* clear cmng_enables */
+		bp->cmng.flags.cmng_enables = 0;
+
+		/* read mf conf from shmem */
+		if (read_cfg)
+			bnx2x_read_mf_cfg(bp);
+
+		/* Init rate shaping and fairness contexts */
+		bnx2x_init_port_minmax(bp);
+
+		/* vn_weight_sum and enable fairness if not 0 */
+		bnx2x_calc_vn_weight_sum(bp);
+
+		/* calculate and set min-max rate for each vn */
+		if (bp->port.pmf)
+			for (vn = VN_0; vn < E1HVN_MAX; vn++)
+				bnx2x_init_vn_minmax(bp, vn);
+
+		/* always enable rate shaping and fairness */
+		bp->cmng.flags.cmng_enables |=
+					CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
+		if (!bp->vn_weight_sum)
+			DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
+				   "  fairness will be disabled\n");
+		return;
+	}
+
+	/* rate shaping and fairness are disabled */
+	DP(NETIF_MSG_IFUP,
+	   "rate shaping and fairness are disabled\n");
+}
+
+static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int func;
+	int vn;
+
+	/* Set the attention towards other drivers on the same port */
+	for (vn = VN_0; vn < E1HVN_MAX; vn++) {
+		if (vn == BP_E1HVN(bp))
+			continue;
+
+		func = ((vn << 1) | port);
+		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+		       (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+	}
+}
+
+/* This function is called upon link interrupt */
+static void bnx2x_link_attn(struct bnx2x *bp)
+{
+	/* Make sure that we are synced with the current statistics */
+	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+	bnx2x_link_update(&bp->link_params, &bp->link_vars);
+
+	if (bp->link_vars.link_up) {
+
+		/* dropless flow control */
+		if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
+			int port = BP_PORT(bp);
+			u32 pause_enabled = 0;
+
+			if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+				pause_enabled = 1;
+
+			REG_WR(bp, BAR_USTRORM_INTMEM +
+			       USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
+			       pause_enabled);
+		}
+
+		if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
+			struct host_port_stats *pstats;
+
+			pstats = bnx2x_sp(bp, port_stats);
+			/* reset old mac stats */
+			memset(&(pstats->mac_stx[0]), 0,
+			       sizeof(struct mac_stx));
+		}
+		if (bp->state == BNX2X_STATE_OPEN)
+			bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+	}
+
+	if (bp->link_vars.link_up && bp->link_vars.line_speed) {
+		int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
+
+		if (cmng_fns != CMNG_FNS_NONE) {
+			bnx2x_cmng_fns_init(bp, false, cmng_fns);
+			storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+		} else
+			/* rate shaping and fairness are disabled */
+			DP(NETIF_MSG_IFUP,
+			   "single function mode without fairness\n");
+	}
+
+	__bnx2x_link_report(bp);
+
+	if (IS_MF(bp))
+		bnx2x_link_sync_notify(bp);
+}
+
+void bnx2x__link_status_update(struct bnx2x *bp)
+{
+	if (bp->state != BNX2X_STATE_OPEN)
+		return;
+
+	bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
+
+	if (bp->link_vars.link_up)
+		bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
+	else
+		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+	/* indicate link status */
+	bnx2x_link_report(bp);
+}
+
+static void bnx2x_pmf_update(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 val;
+
+	bp->port.pmf = 1;
+	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+
+	/*
+	 * We need the mb() to ensure the ordering between the writing to
+	 * bp->port.pmf here and reading it from the bnx2x_periodic_task().
+	 */
+	smp_mb();
+
+	/* queue a periodic task */
+	queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
+
+	bnx2x_dcbx_pmf_update(bp);
+
+	/* enable nig attention */
+	val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
+	if (bp->common.int_block == INT_BLOCK_HC) {
+		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
+		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
+	} else if (!CHIP_IS_E1x(bp)) {
+		REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
+		REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
+	}
+
+	bnx2x_stats_handle(bp, STATS_EVENT_PMF);
+}
+
+/* end of Link */
+
+/* slow path */
+
+/*
+ * General service functions
+ */
+
+/* send the MCP a request, block until there is a reply */
+u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
+{
+	int mb_idx = BP_FW_MB_IDX(bp);
+	u32 seq;
+	u32 rc = 0;
+	u32 cnt = 1;
+	u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
+
+	mutex_lock(&bp->fw_mb_mutex);
+	seq = ++bp->fw_seq;
+	SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
+	SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
+
+	DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
+			(command | seq), param);
+
+	do {
+		/* let the FW do it's magic ... */
+		msleep(delay);
+
+		rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
+
+		/* Give the FW up to 5 second (500*10ms) */
+	} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
+
+	DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
+	   cnt*delay, rc, seq);
+
+	/* is this a reply to our command? */
+	if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
+		rc &= FW_MSG_CODE_MASK;
+	else {
+		/* FW BUG! */
+		BNX2X_ERR("FW failed to respond!\n");
+		bnx2x_fw_dump(bp);
+		rc = 0;
+	}
+	mutex_unlock(&bp->fw_mb_mutex);
+
+	return rc;
+}
+
+static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
+{
+#ifdef BCM_CNIC
+	/* Statistics are not supported for CNIC Clients at the moment */
+	if (IS_FCOE_FP(fp))
+		return false;
+#endif
+	return true;
+}
+
+void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
+{
+	if (CHIP_IS_E1x(bp)) {
+		struct tstorm_eth_function_common_config tcfg = {0};
+
+		storm_memset_func_cfg(bp, &tcfg, p->func_id);
+	}
+
+	/* Enable the function in the FW */
+	storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
+	storm_memset_func_en(bp, p->func_id, 1);
+
+	/* spq */
+	if (p->func_flgs & FUNC_FLG_SPQ) {
+		storm_memset_spq_addr(bp, p->spq_map, p->func_id);
+		REG_WR(bp, XSEM_REG_FAST_MEMORY +
+		       XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
+	}
+}
+
+/**
+ * bnx2x_get_tx_only_flags - Return common flags
+ *
+ * @bp		device handle
+ * @fp		queue handle
+ * @zero_stats	TRUE if statistics zeroing is needed
+ *
+ * Return the flags that are common for the Tx-only and not normal connections.
+ */
+static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
+						   struct bnx2x_fastpath *fp,
+						   bool zero_stats)
+{
+	unsigned long flags = 0;
+
+	/* PF driver will always initialize the Queue to an ACTIVE state */
+	__set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
+
+	/* tx only connections collect statistics (on the same index as the
+	 *  parent connection). The statistics are zeroed when the parent
+	 *  connection is initialized.
+	 */
+	if (stat_counter_valid(bp, fp)) {
+		__set_bit(BNX2X_Q_FLG_STATS, &flags);
+		if (zero_stats)
+			__set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+	}
+
+	return flags;
+}
+
+static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
+					      struct bnx2x_fastpath *fp,
+					      bool leading)
+{
+	unsigned long flags = 0;
+
+	/* calculate other queue flags */
+	if (IS_MF_SD(bp))
+		__set_bit(BNX2X_Q_FLG_OV, &flags);
+
+	if (IS_FCOE_FP(fp))
+		__set_bit(BNX2X_Q_FLG_FCOE, &flags);
+
+	if (!fp->disable_tpa) {
+		__set_bit(BNX2X_Q_FLG_TPA, &flags);
+		__set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
+	}
+
+	if (leading) {
+		__set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
+		__set_bit(BNX2X_Q_FLG_MCAST, &flags);
+	}
+
+	/* Always set HW VLAN stripping */
+	__set_bit(BNX2X_Q_FLG_VLAN, &flags);
+
+
+	return flags | bnx2x_get_common_flags(bp, fp, true);
+}
+
+static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
+	struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
+	u8 cos)
+{
+	gen_init->stat_id = bnx2x_stats_id(fp);
+	gen_init->spcl_id = fp->cl_id;
+
+	/* Always use mini-jumbo MTU for FCoE L2 ring */
+	if (IS_FCOE_FP(fp))
+		gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
+	else
+		gen_init->mtu = bp->dev->mtu;
+
+	gen_init->cos = cos;
+}
+
+static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
+	struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
+	struct bnx2x_rxq_setup_params *rxq_init)
+{
+	u8 max_sge = 0;
+	u16 sge_sz = 0;
+	u16 tpa_agg_size = 0;
+
+	if (!fp->disable_tpa) {
+		pause->sge_th_hi = 250;
+		pause->sge_th_lo = 150;
+		tpa_agg_size = min_t(u32,
+			(min_t(u32, 8, MAX_SKB_FRAGS) *
+			SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
+		max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
+			SGE_PAGE_SHIFT;
+		max_sge = ((max_sge + PAGES_PER_SGE - 1) &
+			  (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
+		sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
+				    0xffff);
+	}
+
+	/* pause - not for e1 */
+	if (!CHIP_IS_E1(bp)) {
+		pause->bd_th_hi = 350;
+		pause->bd_th_lo = 250;
+		pause->rcq_th_hi = 350;
+		pause->rcq_th_lo = 250;
+
+		pause->pri_map = 1;
+	}
+
+	/* rxq setup */
+	rxq_init->dscr_map = fp->rx_desc_mapping;
+	rxq_init->sge_map = fp->rx_sge_mapping;
+	rxq_init->rcq_map = fp->rx_comp_mapping;
+	rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
+
+	/* This should be a maximum number of data bytes that may be
+	 * placed on the BD (not including paddings).
+	 */
+	rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
+		IP_HEADER_ALIGNMENT_PADDING;
+
+	rxq_init->cl_qzone_id = fp->cl_qzone_id;
+	rxq_init->tpa_agg_sz = tpa_agg_size;
+	rxq_init->sge_buf_sz = sge_sz;
+	rxq_init->max_sges_pkt = max_sge;
+	rxq_init->rss_engine_id = BP_FUNC(bp);
+
+	/* Maximum number or simultaneous TPA aggregation for this Queue.
+	 *
+	 * For PF Clients it should be the maximum avaliable number.
+	 * VF driver(s) may want to define it to a smaller value.
+	 */
+	rxq_init->max_tpa_queues =
+		(CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
+		ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
+
+	rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
+	rxq_init->fw_sb_id = fp->fw_sb_id;
+
+	if (IS_FCOE_FP(fp))
+		rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
+	else
+		rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+}
+
+static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
+	struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
+	u8 cos)
+{
+	txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping;
+	txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
+	txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
+	txq_init->fw_sb_id = fp->fw_sb_id;
+
+	/*
+	 * set the tss leading client id for TX classfication ==
+	 * leading RSS client id
+	 */
+	txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
+
+	if (IS_FCOE_FP(fp)) {
+		txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
+		txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
+	}
+}
+
+static void bnx2x_pf_init(struct bnx2x *bp)
+{
+	struct bnx2x_func_init_params func_init = {0};
+	struct event_ring_data eq_data = { {0} };
+	u16 flags;
+
+	if (!CHIP_IS_E1x(bp)) {
+		/* reset IGU PF statistics: MSIX + ATTN */
+		/* PF */
+		REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+			   BNX2X_IGU_STAS_MSG_VF_CNT*4 +
+			   (CHIP_MODE_IS_4_PORT(bp) ?
+				BP_FUNC(bp) : BP_VN(bp))*4, 0);
+		/* ATTN */
+		REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
+			   BNX2X_IGU_STAS_MSG_VF_CNT*4 +
+			   BNX2X_IGU_STAS_MSG_PF_CNT*4 +
+			   (CHIP_MODE_IS_4_PORT(bp) ?
+				BP_FUNC(bp) : BP_VN(bp))*4, 0);
+	}
+
+	/* function setup flags */
+	flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
+
+	/* This flag is relevant for E1x only.
+	 * E2 doesn't have a TPA configuration in a function level.
+	 */
+	flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
+
+	func_init.func_flgs = flags;
+	func_init.pf_id = BP_FUNC(bp);
+	func_init.func_id = BP_FUNC(bp);
+	func_init.spq_map = bp->spq_mapping;
+	func_init.spq_prod = bp->spq_prod_idx;
+
+	bnx2x_func_init(bp, &func_init);
+
+	memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
+
+	/*
+	 * Congestion management values depend on the link rate
+	 * There is no active link so initial link rate is set to 10 Gbps.
+	 * When the link comes up The congestion management values are
+	 * re-calculated according to the actual link rate.
+	 */
+	bp->link_vars.line_speed = SPEED_10000;
+	bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
+
+	/* Only the PMF sets the HW */
+	if (bp->port.pmf)
+		storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+
+	/* init Event Queue */
+	eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
+	eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
+	eq_data.producer = bp->eq_prod;
+	eq_data.index_id = HC_SP_INDEX_EQ_CONS;
+	eq_data.sb_id = DEF_SB_ID;
+	storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
+}
+
+
+static void bnx2x_e1h_disable(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+
+	bnx2x_tx_disable(bp);
+
+	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+}
+
+static void bnx2x_e1h_enable(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+
+	REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
+
+	/* Tx queue should be only reenabled */
+	netif_tx_wake_all_queues(bp->dev);
+
+	/*
+	 * Should not call netif_carrier_on since it will be called if the link
+	 * is up when checking for link state
+	 */
+}
+
+/* called due to MCP event (on pmf):
+ *	reread new bandwidth configuration
+ *	configure FW
+ *	notify others function about the change
+ */
+static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
+{
+	if (bp->link_vars.link_up) {
+		bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
+		bnx2x_link_sync_notify(bp);
+	}
+	storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+}
+
+static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
+{
+	bnx2x_config_mf_bw(bp);
+	bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
+}
+
+static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
+{
+	DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
+
+	if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
+
+		/*
+		 * This is the only place besides the function initialization
+		 * where the bp->flags can change so it is done without any
+		 * locks
+		 */
+		if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
+			DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
+			bp->flags |= MF_FUNC_DIS;
+
+			bnx2x_e1h_disable(bp);
+		} else {
+			DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
+			bp->flags &= ~MF_FUNC_DIS;
+
+			bnx2x_e1h_enable(bp);
+		}
+		dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
+	}
+	if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
+		bnx2x_config_mf_bw(bp);
+		dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
+	}
+
+	/* Report results to MCP */
+	if (dcc_event)
+		bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
+	else
+		bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
+}
+
+/* must be called under the spq lock */
+static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
+{
+	struct eth_spe *next_spe = bp->spq_prod_bd;
+
+	if (bp->spq_prod_bd == bp->spq_last_bd) {
+		bp->spq_prod_bd = bp->spq;
+		bp->spq_prod_idx = 0;
+		DP(NETIF_MSG_TIMER, "end of spq\n");
+	} else {
+		bp->spq_prod_bd++;
+		bp->spq_prod_idx++;
+	}
+	return next_spe;
+}
+
+/* must be called under the spq lock */
+static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
+{
+	int func = BP_FUNC(bp);
+
+	/*
+	 * Make sure that BD data is updated before writing the producer:
+	 * BD data is written to the memory, the producer is read from the
+	 * memory, thus we need a full memory barrier to ensure the ordering.
+	 */
+	mb();
+
+	REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
+		 bp->spq_prod_idx);
+	mmiowb();
+}
+
+/**
+ * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
+ *
+ * @cmd:	command to check
+ * @cmd_type:	command type
+ */
+static inline bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
+{
+	if ((cmd_type == NONE_CONNECTION_TYPE) ||
+	    (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
+	    (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
+	    (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
+	    (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
+	    (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
+	    (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
+		return true;
+	else
+		return false;
+
+}
+
+
+/**
+ * bnx2x_sp_post - place a single command on an SP ring
+ *
+ * @bp:		driver handle
+ * @command:	command to place (e.g. SETUP, FILTER_RULES, etc.)
+ * @cid:	SW CID the command is related to
+ * @data_hi:	command private data address (high 32 bits)
+ * @data_lo:	command private data address (low 32 bits)
+ * @cmd_type:	command type (e.g. NONE, ETH)
+ *
+ * SP data is handled as if it's always an address pair, thus data fields are
+ * not swapped to little endian in upper functions. Instead this function swaps
+ * data as if it's two u32 fields.
+ */
+int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
+		  u32 data_hi, u32 data_lo, int cmd_type)
+{
+	struct eth_spe *spe;
+	u16 type;
+	bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return -EIO;
+#endif
+
+	spin_lock_bh(&bp->spq_lock);
+
+	if (common) {
+		if (!atomic_read(&bp->eq_spq_left)) {
+			BNX2X_ERR("BUG! EQ ring full!\n");
+			spin_unlock_bh(&bp->spq_lock);
+			bnx2x_panic();
+			return -EBUSY;
+		}
+	} else if (!atomic_read(&bp->cq_spq_left)) {
+			BNX2X_ERR("BUG! SPQ ring full!\n");
+			spin_unlock_bh(&bp->spq_lock);
+			bnx2x_panic();
+			return -EBUSY;
+	}
+
+	spe = bnx2x_sp_get_next(bp);
+
+	/* CID needs port number to be encoded int it */
+	spe->hdr.conn_and_cmd_data =
+			cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
+				    HW_CID(bp, cid));
+
+	type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
+
+	type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+		 SPE_HDR_FUNCTION_ID);
+
+	spe->hdr.type = cpu_to_le16(type);
+
+	spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
+	spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
+
+	/*
+	 * It's ok if the actual decrement is issued towards the memory
+	 * somewhere between the spin_lock and spin_unlock. Thus no
+	 * more explict memory barrier is needed.
+	 */
+	if (common)
+		atomic_dec(&bp->eq_spq_left);
+	else
+		atomic_dec(&bp->cq_spq_left);
+
+
+	DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
+	   "SPQE[%x] (%x:%x)  (cmd, common?) (%d,%d)  hw_cid %x  data (%x:%x) "
+	   "type(0x%x) left (CQ, EQ) (%x,%x)\n",
+	   bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
+	   (u32)(U64_LO(bp->spq_mapping) +
+	   (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
+	   HW_CID(bp, cid), data_hi, data_lo, type,
+	   atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
+
+	bnx2x_sp_prod_update(bp);
+	spin_unlock_bh(&bp->spq_lock);
+	return 0;
+}
+
+/* acquire split MCP access lock register */
+static int bnx2x_acquire_alr(struct bnx2x *bp)
+{
+	u32 j, val;
+	int rc = 0;
+
+	might_sleep();
+	for (j = 0; j < 1000; j++) {
+		val = (1UL << 31);
+		REG_WR(bp, GRCBASE_MCP + 0x9c, val);
+		val = REG_RD(bp, GRCBASE_MCP + 0x9c);
+		if (val & (1L << 31))
+			break;
+
+		msleep(5);
+	}
+	if (!(val & (1L << 31))) {
+		BNX2X_ERR("Cannot acquire MCP access lock register\n");
+		rc = -EBUSY;
+	}
+
+	return rc;
+}
+
+/* release split MCP access lock register */
+static void bnx2x_release_alr(struct bnx2x *bp)
+{
+	REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
+}
+
+#define BNX2X_DEF_SB_ATT_IDX	0x0001
+#define BNX2X_DEF_SB_IDX	0x0002
+
+static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
+{
+	struct host_sp_status_block *def_sb = bp->def_status_blk;
+	u16 rc = 0;
+
+	barrier(); /* status block is written to by the chip */
+	if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
+		bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
+		rc |= BNX2X_DEF_SB_ATT_IDX;
+	}
+
+	if (bp->def_idx != def_sb->sp_sb.running_index) {
+		bp->def_idx = def_sb->sp_sb.running_index;
+		rc |= BNX2X_DEF_SB_IDX;
+	}
+
+	/* Do not reorder: indecies reading should complete before handling */
+	barrier();
+	return rc;
+}
+
+/*
+ * slow path service functions
+ */
+
+static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
+{
+	int port = BP_PORT(bp);
+	u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+			      MISC_REG_AEU_MASK_ATTN_FUNC_0;
+	u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
+				       NIG_REG_MASK_INTERRUPT_PORT0;
+	u32 aeu_mask;
+	u32 nig_mask = 0;
+	u32 reg_addr;
+
+	if (bp->attn_state & asserted)
+		BNX2X_ERR("IGU ERROR\n");
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+	aeu_mask = REG_RD(bp, aeu_addr);
+
+	DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
+	   aeu_mask, asserted);
+	aeu_mask &= ~(asserted & 0x3ff);
+	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+	REG_WR(bp, aeu_addr, aeu_mask);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+	bp->attn_state |= asserted;
+	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+
+	if (asserted & ATTN_HARD_WIRED_MASK) {
+		if (asserted & ATTN_NIG_FOR_FUNC) {
+
+			bnx2x_acquire_phy_lock(bp);
+
+			/* save nig interrupt mask */
+			nig_mask = REG_RD(bp, nig_int_mask_addr);
+
+			/* If nig_mask is not set, no need to call the update
+			 * function.
+			 */
+			if (nig_mask) {
+				REG_WR(bp, nig_int_mask_addr, 0);
+
+				bnx2x_link_attn(bp);
+			}
+
+			/* handle unicore attn? */
+		}
+		if (asserted & ATTN_SW_TIMER_4_FUNC)
+			DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
+
+		if (asserted & GPIO_2_FUNC)
+			DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
+
+		if (asserted & GPIO_3_FUNC)
+			DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
+
+		if (asserted & GPIO_4_FUNC)
+			DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
+
+		if (port == 0) {
+			if (asserted & ATTN_GENERAL_ATTN_1) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
+			}
+			if (asserted & ATTN_GENERAL_ATTN_2) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
+			}
+			if (asserted & ATTN_GENERAL_ATTN_3) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
+			}
+		} else {
+			if (asserted & ATTN_GENERAL_ATTN_4) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
+			}
+			if (asserted & ATTN_GENERAL_ATTN_5) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
+			}
+			if (asserted & ATTN_GENERAL_ATTN_6) {
+				DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
+				REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
+			}
+		}
+
+	} /* if hardwired */
+
+	if (bp->common.int_block == INT_BLOCK_HC)
+		reg_addr = (HC_REG_COMMAND_REG + port*32 +
+			    COMMAND_REG_ATTN_BITS_SET);
+	else
+		reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
+
+	DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
+	   (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
+	REG_WR(bp, reg_addr, asserted);
+
+	/* now set back the mask */
+	if (asserted & ATTN_NIG_FOR_FUNC) {
+		REG_WR(bp, nig_int_mask_addr, nig_mask);
+		bnx2x_release_phy_lock(bp);
+	}
+}
+
+static inline void bnx2x_fan_failure(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 ext_phy_config;
+	/* mark the failure */
+	ext_phy_config =
+		SHMEM_RD(bp,
+			 dev_info.port_hw_config[port].external_phy_config);
+
+	ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
+	ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
+	SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
+		 ext_phy_config);
+
+	/* log the failure */
+	netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
+	       " the driver to shutdown the card to prevent permanent"
+	       " damage.  Please contact OEM Support for assistance\n");
+}
+
+static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
+{
+	int port = BP_PORT(bp);
+	int reg_offset;
+	u32 val;
+
+	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+
+	if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
+
+		val = REG_RD(bp, reg_offset);
+		val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
+		REG_WR(bp, reg_offset, val);
+
+		BNX2X_ERR("SPIO5 hw attention\n");
+
+		/* Fan failure attention */
+		bnx2x_hw_reset_phy(&bp->link_params);
+		bnx2x_fan_failure(bp);
+	}
+
+	if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
+		bnx2x_acquire_phy_lock(bp);
+		bnx2x_handle_module_detect_int(&bp->link_params);
+		bnx2x_release_phy_lock(bp);
+	}
+
+	if (attn & HW_INTERRUT_ASSERT_SET_0) {
+
+		val = REG_RD(bp, reg_offset);
+		val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
+		REG_WR(bp, reg_offset, val);
+
+		BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
+			  (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
+		bnx2x_panic();
+	}
+}
+
+static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
+{
+	u32 val;
+
+	if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
+
+		val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
+		BNX2X_ERR("DB hw attention 0x%x\n", val);
+		/* DORQ discard attention */
+		if (val & 0x2)
+			BNX2X_ERR("FATAL error from DORQ\n");
+	}
+
+	if (attn & HW_INTERRUT_ASSERT_SET_1) {
+
+		int port = BP_PORT(bp);
+		int reg_offset;
+
+		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
+				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
+
+		val = REG_RD(bp, reg_offset);
+		val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
+		REG_WR(bp, reg_offset, val);
+
+		BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
+			  (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
+		bnx2x_panic();
+	}
+}
+
+static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
+{
+	u32 val;
+
+	if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
+
+		val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
+		BNX2X_ERR("CFC hw attention 0x%x\n", val);
+		/* CFC error attention */
+		if (val & 0x2)
+			BNX2X_ERR("FATAL error from CFC\n");
+	}
+
+	if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
+		val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
+		BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
+		/* RQ_USDMDP_FIFO_OVERFLOW */
+		if (val & 0x18000)
+			BNX2X_ERR("FATAL error from PXP\n");
+
+		if (!CHIP_IS_E1x(bp)) {
+			val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
+			BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
+		}
+	}
+
+	if (attn & HW_INTERRUT_ASSERT_SET_2) {
+
+		int port = BP_PORT(bp);
+		int reg_offset;
+
+		reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
+				     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
+
+		val = REG_RD(bp, reg_offset);
+		val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
+		REG_WR(bp, reg_offset, val);
+
+		BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
+			  (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
+		bnx2x_panic();
+	}
+}
+
+static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
+{
+	u32 val;
+
+	if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
+
+		if (attn & BNX2X_PMF_LINK_ASSERT) {
+			int func = BP_FUNC(bp);
+
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+			bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
+					func_mf_config[BP_ABS_FUNC(bp)].config);
+			val = SHMEM_RD(bp,
+				       func_mb[BP_FW_MB_IDX(bp)].drv_status);
+			if (val & DRV_STATUS_DCC_EVENT_MASK)
+				bnx2x_dcc_event(bp,
+					    (val & DRV_STATUS_DCC_EVENT_MASK));
+
+			if (val & DRV_STATUS_SET_MF_BW)
+				bnx2x_set_mf_bw(bp);
+
+			if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
+				bnx2x_pmf_update(bp);
+
+			if (bp->port.pmf &&
+			    (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
+				bp->dcbx_enabled > 0)
+				/* start dcbx state machine */
+				bnx2x_dcbx_set_params(bp,
+					BNX2X_DCBX_STATE_NEG_RECEIVED);
+			if (bp->link_vars.periodic_flags &
+			    PERIODIC_FLAGS_LINK_EVENT) {
+				/*  sync with link */
+				bnx2x_acquire_phy_lock(bp);
+				bp->link_vars.periodic_flags &=
+					~PERIODIC_FLAGS_LINK_EVENT;
+				bnx2x_release_phy_lock(bp);
+				if (IS_MF(bp))
+					bnx2x_link_sync_notify(bp);
+				bnx2x_link_report(bp);
+			}
+			/* Always call it here: bnx2x_link_report() will
+			 * prevent the link indication duplication.
+			 */
+			bnx2x__link_status_update(bp);
+		} else if (attn & BNX2X_MC_ASSERT_BITS) {
+
+			BNX2X_ERR("MC assert!\n");
+			bnx2x_mc_assert(bp);
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
+			bnx2x_panic();
+
+		} else if (attn & BNX2X_MCP_ASSERT) {
+
+			BNX2X_ERR("MCP assert!\n");
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
+			bnx2x_fw_dump(bp);
+
+		} else
+			BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
+	}
+
+	if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
+		BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
+		if (attn & BNX2X_GRC_TIMEOUT) {
+			val = CHIP_IS_E1(bp) ? 0 :
+					REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
+			BNX2X_ERR("GRC time-out 0x%08x\n", val);
+		}
+		if (attn & BNX2X_GRC_RSV) {
+			val = CHIP_IS_E1(bp) ? 0 :
+					REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
+			BNX2X_ERR("GRC reserved 0x%08x\n", val);
+		}
+		REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
+	}
+}
+
+/*
+ * Bits map:
+ * 0-7   - Engine0 load counter.
+ * 8-15  - Engine1 load counter.
+ * 16    - Engine0 RESET_IN_PROGRESS bit.
+ * 17    - Engine1 RESET_IN_PROGRESS bit.
+ * 18    - Engine0 ONE_IS_LOADED. Set when there is at least one active function
+ *         on the engine
+ * 19    - Engine1 ONE_IS_LOADED.
+ * 20    - Chip reset flow bit. When set none-leader must wait for both engines
+ *         leader to complete (check for both RESET_IN_PROGRESS bits and not for
+ *         just the one belonging to its engine).
+ *
+ */
+#define BNX2X_RECOVERY_GLOB_REG		MISC_REG_GENERIC_POR_1
+
+#define BNX2X_PATH0_LOAD_CNT_MASK	0x000000ff
+#define BNX2X_PATH0_LOAD_CNT_SHIFT	0
+#define BNX2X_PATH1_LOAD_CNT_MASK	0x0000ff00
+#define BNX2X_PATH1_LOAD_CNT_SHIFT	8
+#define BNX2X_PATH0_RST_IN_PROG_BIT	0x00010000
+#define BNX2X_PATH1_RST_IN_PROG_BIT	0x00020000
+#define BNX2X_GLOBAL_RESET_BIT		0x00040000
+
+/*
+ * Set the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+void bnx2x_set_reset_global(struct bnx2x *bp)
+{
+	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
+	barrier();
+	mmiowb();
+}
+
+/*
+ * Clear the GLOBAL_RESET bit.
+ *
+ * Should be run under rtnl lock
+ */
+static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
+{
+	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
+	barrier();
+	mmiowb();
+}
+
+/*
+ * Checks the GLOBAL_RESET bit.
+ *
+ * should be run under rtnl lock
+ */
+static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
+{
+	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+	DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
+	return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
+}
+
+/*
+ * Clear RESET_IN_PROGRESS bit for the current engine.
+ *
+ * Should be run under rtnl lock
+ */
+static inline void bnx2x_set_reset_done(struct bnx2x *bp)
+{
+	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	u32 bit = BP_PATH(bp) ?
+		BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+	/* Clear the bit */
+	val &= ~bit;
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+	barrier();
+	mmiowb();
+}
+
+/*
+ * Set RESET_IN_PROGRESS for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+void bnx2x_set_reset_in_progress(struct bnx2x *bp)
+{
+	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	u32 bit = BP_PATH(bp) ?
+		BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+	/* Set the bit */
+	val |= bit;
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+	barrier();
+	mmiowb();
+}
+
+/*
+ * Checks the RESET_IN_PROGRESS bit for the given engine.
+ * should be run under rtnl lock
+ */
+bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
+{
+	u32 val	= REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	u32 bit = engine ?
+		BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+
+	/* return false if bit is set */
+	return (val & bit) ? false : true;
+}
+
+/*
+ * Increment the load counter for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+void bnx2x_inc_load_cnt(struct bnx2x *bp)
+{
+	u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+			     BNX2X_PATH0_LOAD_CNT_MASK;
+	u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+			     BNX2X_PATH0_LOAD_CNT_SHIFT;
+
+	DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+	/* get the current counter value */
+	val1 = (val & mask) >> shift;
+
+	/* increment... */
+	val1++;
+
+	/* clear the old value */
+	val &= ~mask;
+
+	/* set the new one */
+	val |= ((val1 << shift) & mask);
+
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+	barrier();
+	mmiowb();
+}
+
+/**
+ * bnx2x_dec_load_cnt - decrement the load counter
+ *
+ * @bp:		driver handle
+ *
+ * Should be run under rtnl lock.
+ * Decrements the load counter for the current engine. Returns
+ * the new counter value.
+ */
+u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
+{
+	u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+			     BNX2X_PATH0_LOAD_CNT_MASK;
+	u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+			     BNX2X_PATH0_LOAD_CNT_SHIFT;
+
+	DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+
+	/* get the current counter value */
+	val1 = (val & mask) >> shift;
+
+	/* decrement... */
+	val1--;
+
+	/* clear the old value */
+	val &= ~mask;
+
+	/* set the new one */
+	val |= ((val1 << shift) & mask);
+
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
+	barrier();
+	mmiowb();
+
+	return val1;
+}
+
+/*
+ * Read the load counter for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
+{
+	u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
+			     BNX2X_PATH0_LOAD_CNT_MASK);
+	u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
+			     BNX2X_PATH0_LOAD_CNT_SHIFT);
+	u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+	DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val);
+
+	val = (val & mask) >> shift;
+
+	DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val);
+
+	return val;
+}
+
+/*
+ * Reset the load counter for the current engine.
+ *
+ * should be run under rtnl lock
+ */
+static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
+{
+	u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+	u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
+			     BNX2X_PATH0_LOAD_CNT_MASK);
+
+	REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
+}
+
+static inline void _print_next_block(int idx, const char *blk)
+{
+	if (idx)
+		pr_cont(", ");
+	pr_cont("%s", blk);
+}
+
+static inline int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
+						  bool print)
+{
+	int i = 0;
+	u32 cur_bit = 0;
+	for (i = 0; sig; i++) {
+		cur_bit = ((u32)0x1 << i);
+		if (sig & cur_bit) {
+			switch (cur_bit) {
+			case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "BRB");
+				break;
+			case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "PARSER");
+				break;
+			case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "TSDM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++,
+							  "SEARCHER");
+				break;
+			case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "TCM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "TSEMI");
+				break;
+			case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "XPB");
+				break;
+			}
+
+			/* Clear the bit */
+			sig &= ~cur_bit;
+		}
+	}
+
+	return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
+						  bool *global, bool print)
+{
+	int i = 0;
+	u32 cur_bit = 0;
+	for (i = 0; sig; i++) {
+		cur_bit = ((u32)0x1 << i);
+		if (sig & cur_bit) {
+			switch (cur_bit) {
+			case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "PBF");
+				break;
+			case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "QM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "TM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "XSDM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "XCM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "XSEMI");
+				break;
+			case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++,
+							  "DOORBELLQ");
+				break;
+			case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "NIG");
+				break;
+			case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++,
+							  "VAUX PCI CORE");
+				*global = true;
+				break;
+			case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "DEBUG");
+				break;
+			case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "USDM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "UCM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "USEMI");
+				break;
+			case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "UPB");
+				break;
+			case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "CSDM");
+				break;
+			case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "CCM");
+				break;
+			}
+
+			/* Clear the bit */
+			sig &= ~cur_bit;
+		}
+	}
+
+	return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
+						  bool print)
+{
+	int i = 0;
+	u32 cur_bit = 0;
+	for (i = 0; sig; i++) {
+		cur_bit = ((u32)0x1 << i);
+		if (sig & cur_bit) {
+			switch (cur_bit) {
+			case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "CSEMI");
+				break;
+			case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "PXP");
+				break;
+			case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++,
+					"PXPPCICLOCKCLIENT");
+				break;
+			case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "CFC");
+				break;
+			case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "CDU");
+				break;
+			case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "DMAE");
+				break;
+			case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "IGU");
+				break;
+			case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "MISC");
+				break;
+			}
+
+			/* Clear the bit */
+			sig &= ~cur_bit;
+		}
+	}
+
+	return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
+						  bool *global, bool print)
+{
+	int i = 0;
+	u32 cur_bit = 0;
+	for (i = 0; sig; i++) {
+		cur_bit = ((u32)0x1 << i);
+		if (sig & cur_bit) {
+			switch (cur_bit) {
+			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
+				if (print)
+					_print_next_block(par_num++, "MCP ROM");
+				*global = true;
+				break;
+			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
+				if (print)
+					_print_next_block(par_num++,
+							  "MCP UMP RX");
+				*global = true;
+				break;
+			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
+				if (print)
+					_print_next_block(par_num++,
+							  "MCP UMP TX");
+				*global = true;
+				break;
+			case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
+				if (print)
+					_print_next_block(par_num++,
+							  "MCP SCPAD");
+				*global = true;
+				break;
+			}
+
+			/* Clear the bit */
+			sig &= ~cur_bit;
+		}
+	}
+
+	return par_num;
+}
+
+static inline int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
+						  bool print)
+{
+	int i = 0;
+	u32 cur_bit = 0;
+	for (i = 0; sig; i++) {
+		cur_bit = ((u32)0x1 << i);
+		if (sig & cur_bit) {
+			switch (cur_bit) {
+			case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "PGLUE_B");
+				break;
+			case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
+				if (print)
+					_print_next_block(par_num++, "ATC");
+				break;
+			}
+
+			/* Clear the bit */
+			sig &= ~cur_bit;
+		}
+	}
+
+	return par_num;
+}
+
+static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
+				     u32 *sig)
+{
+	if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
+	    (sig[1] & HW_PRTY_ASSERT_SET_1) ||
+	    (sig[2] & HW_PRTY_ASSERT_SET_2) ||
+	    (sig[3] & HW_PRTY_ASSERT_SET_3) ||
+	    (sig[4] & HW_PRTY_ASSERT_SET_4)) {
+		int par_num = 0;
+		DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
+			"[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x "
+			"[4]:0x%08x\n",
+			  sig[0] & HW_PRTY_ASSERT_SET_0,
+			  sig[1] & HW_PRTY_ASSERT_SET_1,
+			  sig[2] & HW_PRTY_ASSERT_SET_2,
+			  sig[3] & HW_PRTY_ASSERT_SET_3,
+			  sig[4] & HW_PRTY_ASSERT_SET_4);
+		if (print)
+			netdev_err(bp->dev,
+				   "Parity errors detected in blocks: ");
+		par_num = bnx2x_check_blocks_with_parity0(
+			sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
+		par_num = bnx2x_check_blocks_with_parity1(
+			sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
+		par_num = bnx2x_check_blocks_with_parity2(
+			sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
+		par_num = bnx2x_check_blocks_with_parity3(
+			sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
+		par_num = bnx2x_check_blocks_with_parity4(
+			sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
+
+		if (print)
+			pr_cont("\n");
+
+		return true;
+	} else
+		return false;
+}
+
+/**
+ * bnx2x_chk_parity_attn - checks for parity attentions.
+ *
+ * @bp:		driver handle
+ * @global:	true if there was a global attention
+ * @print:	show parity attention in syslog
+ */
+bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
+{
+	struct attn_route attn = { {0} };
+	int port = BP_PORT(bp);
+
+	attn.sig[0] = REG_RD(bp,
+		MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
+			     port*4);
+	attn.sig[1] = REG_RD(bp,
+		MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
+			     port*4);
+	attn.sig[2] = REG_RD(bp,
+		MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
+			     port*4);
+	attn.sig[3] = REG_RD(bp,
+		MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
+			     port*4);
+
+	if (!CHIP_IS_E1x(bp))
+		attn.sig[4] = REG_RD(bp,
+			MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
+				     port*4);
+
+	return bnx2x_parity_attn(bp, global, print, attn.sig);
+}
+
+
+static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
+{
+	u32 val;
+	if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
+
+		val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
+		BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
+		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+				  "ADDRESS_ERROR\n");
+		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+				  "INCORRECT_RCV_BEHAVIOR\n");
+		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+				  "WAS_ERROR_ATTN\n");
+		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+				  "VF_LENGTH_VIOLATION_ATTN\n");
+		if (val &
+		    PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+				  "VF_GRC_SPACE_VIOLATION_ATTN\n");
+		if (val &
+		    PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+				  "VF_MSIX_BAR_VIOLATION_ATTN\n");
+		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+				  "TCPL_ERROR_ATTN\n");
+		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+				  "TCPL_IN_TWO_RCBS_ATTN\n");
+		if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
+			BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
+				  "CSSNOOP_FIFO_OVERFLOW\n");
+	}
+	if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
+		val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
+		BNX2X_ERR("ATC hw attention 0x%x\n", val);
+		if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
+			BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
+		if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
+			BNX2X_ERR("ATC_ATC_INT_STS_REG"
+				  "_ATC_TCPL_TO_NOT_PEND\n");
+		if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
+			BNX2X_ERR("ATC_ATC_INT_STS_REG_"
+				  "ATC_GPA_MULTIPLE_HITS\n");
+		if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
+			BNX2X_ERR("ATC_ATC_INT_STS_REG_"
+				  "ATC_RCPL_TO_EMPTY_CNT\n");
+		if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
+			BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
+		if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
+			BNX2X_ERR("ATC_ATC_INT_STS_REG_"
+				  "ATC_IREQ_LESS_THAN_STU\n");
+	}
+
+	if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
+		    AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
+		BNX2X_ERR("FATAL parity attention set4 0x%x\n",
+		(u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
+		    AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
+	}
+
+}
+
+static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
+{
+	struct attn_route attn, *group_mask;
+	int port = BP_PORT(bp);
+	int index;
+	u32 reg_addr;
+	u32 val;
+	u32 aeu_mask;
+	bool global = false;
+
+	/* need to take HW lock because MCP or other port might also
+	   try to handle this event */
+	bnx2x_acquire_alr(bp);
+
+	if (bnx2x_chk_parity_attn(bp, &global, true)) {
+#ifndef BNX2X_STOP_ON_ERROR
+		bp->recovery_state = BNX2X_RECOVERY_INIT;
+		schedule_delayed_work(&bp->sp_rtnl_task, 0);
+		/* Disable HW interrupts */
+		bnx2x_int_disable(bp);
+		/* In case of parity errors don't handle attentions so that
+		 * other function would "see" parity errors.
+		 */
+#else
+		bnx2x_panic();
+#endif
+		bnx2x_release_alr(bp);
+		return;
+	}
+
+	attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
+	attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
+	attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
+	attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
+	if (!CHIP_IS_E1x(bp))
+		attn.sig[4] =
+		      REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
+	else
+		attn.sig[4] = 0;
+
+	DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
+	   attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
+
+	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+		if (deasserted & (1 << index)) {
+			group_mask = &bp->attn_group[index];
+
+			DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
+					 "%08x %08x %08x\n",
+			   index,
+			   group_mask->sig[0], group_mask->sig[1],
+			   group_mask->sig[2], group_mask->sig[3],
+			   group_mask->sig[4]);
+
+			bnx2x_attn_int_deasserted4(bp,
+					attn.sig[4] & group_mask->sig[4]);
+			bnx2x_attn_int_deasserted3(bp,
+					attn.sig[3] & group_mask->sig[3]);
+			bnx2x_attn_int_deasserted1(bp,
+					attn.sig[1] & group_mask->sig[1]);
+			bnx2x_attn_int_deasserted2(bp,
+					attn.sig[2] & group_mask->sig[2]);
+			bnx2x_attn_int_deasserted0(bp,
+					attn.sig[0] & group_mask->sig[0]);
+		}
+	}
+
+	bnx2x_release_alr(bp);
+
+	if (bp->common.int_block == INT_BLOCK_HC)
+		reg_addr = (HC_REG_COMMAND_REG + port*32 +
+			    COMMAND_REG_ATTN_BITS_CLR);
+	else
+		reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
+
+	val = ~deasserted;
+	DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
+	   (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
+	REG_WR(bp, reg_addr, val);
+
+	if (~bp->attn_state & deasserted)
+		BNX2X_ERR("IGU ERROR\n");
+
+	reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+			  MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+	aeu_mask = REG_RD(bp, reg_addr);
+
+	DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
+	   aeu_mask, deasserted);
+	aeu_mask |= (deasserted & 0x3ff);
+	DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
+
+	REG_WR(bp, reg_addr, aeu_mask);
+	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
+
+	DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
+	bp->attn_state &= ~deasserted;
+	DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
+}
+
+static void bnx2x_attn_int(struct bnx2x *bp)
+{
+	/* read local copy of bits */
+	u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
+								attn_bits);
+	u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
+								attn_bits_ack);
+	u32 attn_state = bp->attn_state;
+
+	/* look for changed bits */
+	u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
+	u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
+
+	DP(NETIF_MSG_HW,
+	   "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
+	   attn_bits, attn_ack, asserted, deasserted);
+
+	if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
+		BNX2X_ERR("BAD attention state\n");
+
+	/* handle bits that were raised */
+	if (asserted)
+		bnx2x_attn_int_asserted(bp, asserted);
+
+	if (deasserted)
+		bnx2x_attn_int_deasserted(bp, deasserted);
+}
+
+void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
+		      u16 index, u8 op, u8 update)
+{
+	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
+
+	bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
+			     igu_addr);
+}
+
+static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
+{
+	/* No memory barriers */
+	storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
+	mmiowb(); /* keep prod updates ordered */
+}
+
+#ifdef BCM_CNIC
+static int  bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
+				      union event_ring_elem *elem)
+{
+	u8 err = elem->message.error;
+
+	if (!bp->cnic_eth_dev.starting_cid  ||
+	    (cid < bp->cnic_eth_dev.starting_cid &&
+	    cid != bp->cnic_eth_dev.iscsi_l2_cid))
+		return 1;
+
+	DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
+
+	if (unlikely(err)) {
+
+		BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
+			  cid);
+		bnx2x_panic_dump(bp);
+	}
+	bnx2x_cnic_cfc_comp(bp, cid, err);
+	return 0;
+}
+#endif
+
+static inline void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
+{
+	struct bnx2x_mcast_ramrod_params rparam;
+	int rc;
+
+	memset(&rparam, 0, sizeof(rparam));
+
+	rparam.mcast_obj = &bp->mcast_obj;
+
+	netif_addr_lock_bh(bp->dev);
+
+	/* Clear pending state for the last command */
+	bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
+
+	/* If there are pending mcast commands - send them */
+	if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
+		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+		if (rc < 0)
+			BNX2X_ERR("Failed to send pending mcast commands: %d\n",
+				  rc);
+	}
+
+	netif_addr_unlock_bh(bp->dev);
+}
+
+static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
+						   union event_ring_elem *elem)
+{
+	unsigned long ramrod_flags = 0;
+	int rc = 0;
+	u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
+	struct bnx2x_vlan_mac_obj *vlan_mac_obj;
+
+	/* Always push next commands out, don't wait here */
+	__set_bit(RAMROD_CONT, &ramrod_flags);
+
+	switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
+	case BNX2X_FILTER_MAC_PENDING:
+#ifdef BCM_CNIC
+		if (cid == BNX2X_ISCSI_ETH_CID)
+			vlan_mac_obj = &bp->iscsi_l2_mac_obj;
+		else
+#endif
+			vlan_mac_obj = &bp->fp[cid].mac_obj;
+
+		break;
+		vlan_mac_obj = &bp->fp[cid].mac_obj;
+
+	case BNX2X_FILTER_MCAST_PENDING:
+		/* This is only relevant for 57710 where multicast MACs are
+		 * configured as unicast MACs using the same ramrod.
+		 */
+		bnx2x_handle_mcast_eqe(bp);
+		return;
+	default:
+		BNX2X_ERR("Unsupported classification command: %d\n",
+			  elem->message.data.eth_event.echo);
+		return;
+	}
+
+	rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
+
+	if (rc < 0)
+		BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
+	else if (rc > 0)
+		DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
+
+}
+
+#ifdef BCM_CNIC
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
+#endif
+
+static inline void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
+{
+	netif_addr_lock_bh(bp->dev);
+
+	clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
+
+	/* Send rx_mode command again if was requested */
+	if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
+		bnx2x_set_storm_rx_mode(bp);
+#ifdef BCM_CNIC
+	else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
+				    &bp->sp_state))
+		bnx2x_set_iscsi_eth_rx_mode(bp, true);
+	else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
+				    &bp->sp_state))
+		bnx2x_set_iscsi_eth_rx_mode(bp, false);
+#endif
+
+	netif_addr_unlock_bh(bp->dev);
+}
+
+static inline struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
+	struct bnx2x *bp, u32 cid)
+{
+	DP(BNX2X_MSG_SP, "retrieving fp from cid %d", cid);
+#ifdef BCM_CNIC
+	if (cid == BNX2X_FCOE_ETH_CID)
+		return &bnx2x_fcoe(bp, q_obj);
+	else
+#endif
+		return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj);
+}
+
+static void bnx2x_eq_int(struct bnx2x *bp)
+{
+	u16 hw_cons, sw_cons, sw_prod;
+	union event_ring_elem *elem;
+	u32 cid;
+	u8 opcode;
+	int spqe_cnt = 0;
+	struct bnx2x_queue_sp_obj *q_obj;
+	struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
+	struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
+
+	hw_cons = le16_to_cpu(*bp->eq_cons_sb);
+
+	/* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
+	 * when we get the the next-page we nned to adjust so the loop
+	 * condition below will be met. The next element is the size of a
+	 * regular element and hence incrementing by 1
+	 */
+	if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
+		hw_cons++;
+
+	/* This function may never run in parallel with itself for a
+	 * specific bp, thus there is no need in "paired" read memory
+	 * barrier here.
+	 */
+	sw_cons = bp->eq_cons;
+	sw_prod = bp->eq_prod;
+
+	DP(BNX2X_MSG_SP, "EQ:  hw_cons %u  sw_cons %u bp->eq_spq_left %x\n",
+			hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
+
+	for (; sw_cons != hw_cons;
+	      sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
+
+
+		elem = &bp->eq_ring[EQ_DESC(sw_cons)];
+
+		cid = SW_CID(elem->message.data.cfc_del_event.cid);
+		opcode = elem->message.opcode;
+
+
+		/* handle eq element */
+		switch (opcode) {
+		case EVENT_RING_OPCODE_STAT_QUERY:
+			DP(NETIF_MSG_TIMER, "got statistics comp event %d\n",
+			   bp->stats_comp++);
+			/* nothing to do with stats comp */
+			goto next_spqe;
+
+		case EVENT_RING_OPCODE_CFC_DEL:
+			/* handle according to cid range */
+			/*
+			 * we may want to verify here that the bp state is
+			 * HALTING
+			 */
+			DP(BNX2X_MSG_SP,
+			   "got delete ramrod for MULTI[%d]\n", cid);
+#ifdef BCM_CNIC
+			if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
+				goto next_spqe;
+#endif
+			q_obj = bnx2x_cid_to_q_obj(bp, cid);
+
+			if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
+				break;
+
+
+
+			goto next_spqe;
+
+		case EVENT_RING_OPCODE_STOP_TRAFFIC:
+			DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n");
+			if (f_obj->complete_cmd(bp, f_obj,
+						BNX2X_F_CMD_TX_STOP))
+				break;
+			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
+			goto next_spqe;
+
+		case EVENT_RING_OPCODE_START_TRAFFIC:
+			DP(BNX2X_MSG_SP, "got START TRAFFIC\n");
+			if (f_obj->complete_cmd(bp, f_obj,
+						BNX2X_F_CMD_TX_START))
+				break;
+			bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
+			goto next_spqe;
+		case EVENT_RING_OPCODE_FUNCTION_START:
+			DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n");
+			if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
+				break;
+
+			goto next_spqe;
+
+		case EVENT_RING_OPCODE_FUNCTION_STOP:
+			DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n");
+			if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
+				break;
+
+			goto next_spqe;
+		}
+
+		switch (opcode | bp->state) {
+		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+		      BNX2X_STATE_OPEN):
+		case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
+		      BNX2X_STATE_OPENING_WAIT4_PORT):
+			cid = elem->message.data.eth_event.echo &
+				BNX2X_SWCID_MASK;
+			DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
+			   cid);
+			rss_raw->clear_pending(rss_raw);
+			break;
+
+		case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
+		case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
+		case (EVENT_RING_OPCODE_SET_MAC |
+		      BNX2X_STATE_CLOSING_WAIT4_HALT):
+		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+		      BNX2X_STATE_OPEN):
+		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+		      BNX2X_STATE_DIAG):
+		case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
+		      BNX2X_STATE_CLOSING_WAIT4_HALT):
+			DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
+			bnx2x_handle_classification_eqe(bp, elem);
+			break;
+
+		case (EVENT_RING_OPCODE_MULTICAST_RULES |
+		      BNX2X_STATE_OPEN):
+		case (EVENT_RING_OPCODE_MULTICAST_RULES |
+		      BNX2X_STATE_DIAG):
+		case (EVENT_RING_OPCODE_MULTICAST_RULES |
+		      BNX2X_STATE_CLOSING_WAIT4_HALT):
+			DP(BNX2X_MSG_SP, "got mcast ramrod\n");
+			bnx2x_handle_mcast_eqe(bp);
+			break;
+
+		case (EVENT_RING_OPCODE_FILTERS_RULES |
+		      BNX2X_STATE_OPEN):
+		case (EVENT_RING_OPCODE_FILTERS_RULES |
+		      BNX2X_STATE_DIAG):
+		case (EVENT_RING_OPCODE_FILTERS_RULES |
+		      BNX2X_STATE_CLOSING_WAIT4_HALT):
+			DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
+			bnx2x_handle_rx_mode_eqe(bp);
+			break;
+		default:
+			/* unknown event log error and continue */
+			BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
+				  elem->message.opcode, bp->state);
+		}
+next_spqe:
+		spqe_cnt++;
+	} /* for */
+
+	smp_mb__before_atomic_inc();
+	atomic_add(spqe_cnt, &bp->eq_spq_left);
+
+	bp->eq_cons = sw_cons;
+	bp->eq_prod = sw_prod;
+	/* Make sure that above mem writes were issued towards the memory */
+	smp_wmb();
+
+	/* update producer */
+	bnx2x_update_eq_prod(bp, bp->eq_prod);
+}
+
+static void bnx2x_sp_task(struct work_struct *work)
+{
+	struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
+	u16 status;
+
+	status = bnx2x_update_dsb_idx(bp);
+/*	if (status == 0)				     */
+/*		BNX2X_ERR("spurious slowpath interrupt!\n"); */
+
+	DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
+
+	/* HW attentions */
+	if (status & BNX2X_DEF_SB_ATT_IDX) {
+		bnx2x_attn_int(bp);
+		status &= ~BNX2X_DEF_SB_ATT_IDX;
+	}
+
+	/* SP events: STAT_QUERY and others */
+	if (status & BNX2X_DEF_SB_IDX) {
+#ifdef BCM_CNIC
+		struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
+
+		if ((!NO_FCOE(bp)) &&
+			(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
+			/*
+			 * Prevent local bottom-halves from running as
+			 * we are going to change the local NAPI list.
+			 */
+			local_bh_disable();
+			napi_schedule(&bnx2x_fcoe(bp, napi));
+			local_bh_enable();
+		}
+#endif
+		/* Handle EQ completions */
+		bnx2x_eq_int(bp);
+
+		bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
+			le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
+
+		status &= ~BNX2X_DEF_SB_IDX;
+	}
+
+	if (unlikely(status))
+		DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+		   status);
+
+	bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
+	     le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
+}
+
+irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
+{
+	struct net_device *dev = dev_instance;
+	struct bnx2x *bp = netdev_priv(dev);
+
+	bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
+		     IGU_INT_DISABLE, 0);
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return IRQ_HANDLED;
+#endif
+
+#ifdef BCM_CNIC
+	{
+		struct cnic_ops *c_ops;
+
+		rcu_read_lock();
+		c_ops = rcu_dereference(bp->cnic_ops);
+		if (c_ops)
+			c_ops->cnic_handler(bp->cnic_data, NULL);
+		rcu_read_unlock();
+	}
+#endif
+	queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
+
+	return IRQ_HANDLED;
+}
+
+/* end of slow path */
+
+
+void bnx2x_drv_pulse(struct bnx2x *bp)
+{
+	SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
+		 bp->fw_drv_pulse_wr_seq);
+}
+
+
+static void bnx2x_timer(unsigned long data)
+{
+	u8 cos;
+	struct bnx2x *bp = (struct bnx2x *) data;
+
+	if (!netif_running(bp->dev))
+		return;
+
+	if (poll) {
+		struct bnx2x_fastpath *fp = &bp->fp[0];
+
+		for_each_cos_in_tx_queue(fp, cos)
+			bnx2x_tx_int(bp, &fp->txdata[cos]);
+		bnx2x_rx_int(fp, 1000);
+	}
+
+	if (!BP_NOMCP(bp)) {
+		int mb_idx = BP_FW_MB_IDX(bp);
+		u32 drv_pulse;
+		u32 mcp_pulse;
+
+		++bp->fw_drv_pulse_wr_seq;
+		bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
+		/* TBD - add SYSTEM_TIME */
+		drv_pulse = bp->fw_drv_pulse_wr_seq;
+		bnx2x_drv_pulse(bp);
+
+		mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
+			     MCP_PULSE_SEQ_MASK);
+		/* The delta between driver pulse and mcp response
+		 * should be 1 (before mcp response) or 0 (after mcp response)
+		 */
+		if ((drv_pulse != mcp_pulse) &&
+		    (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
+			/* someone lost a heartbeat... */
+			BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
+				  drv_pulse, mcp_pulse);
+		}
+	}
+
+	if (bp->state == BNX2X_STATE_OPEN)
+		bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
+
+	mod_timer(&bp->timer, jiffies + bp->current_interval);
+}
+
+/* end of Statistics */
+
+/* nic init */
+
+/*
+ * nic init service functions
+ */
+
+static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
+{
+	u32 i;
+	if (!(len%4) && !(addr%4))
+		for (i = 0; i < len; i += 4)
+			REG_WR(bp, addr + i, fill);
+	else
+		for (i = 0; i < len; i++)
+			REG_WR8(bp, addr + i, fill);
+
+}
+
+/* helper: writes FP SP data to FW - data_size in dwords */
+static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
+				       int fw_sb_id,
+				       u32 *sb_data_p,
+				       u32 data_size)
+{
+	int index;
+	for (index = 0; index < data_size; index++)
+		REG_WR(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
+			sizeof(u32)*index,
+			*(sb_data_p + index));
+}
+
+static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
+{
+	u32 *sb_data_p;
+	u32 data_size = 0;
+	struct hc_status_block_data_e2 sb_data_e2;
+	struct hc_status_block_data_e1x sb_data_e1x;
+
+	/* disable the function first */
+	if (!CHIP_IS_E1x(bp)) {
+		memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+		sb_data_e2.common.state = SB_DISABLED;
+		sb_data_e2.common.p_func.vf_valid = false;
+		sb_data_p = (u32 *)&sb_data_e2;
+		data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+	} else {
+		memset(&sb_data_e1x, 0,
+		       sizeof(struct hc_status_block_data_e1x));
+		sb_data_e1x.common.state = SB_DISABLED;
+		sb_data_e1x.common.p_func.vf_valid = false;
+		sb_data_p = (u32 *)&sb_data_e1x;
+		data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+	}
+	bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
+
+	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
+			CSTORM_STATUS_BLOCK_SIZE);
+	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
+			CSTORM_SYNC_BLOCK_SIZE);
+}
+
+/* helper:  writes SP SB data to FW */
+static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
+		struct hc_sp_status_block_data *sp_sb_data)
+{
+	int func = BP_FUNC(bp);
+	int i;
+	for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
+		REG_WR(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
+			i*sizeof(u32),
+			*((u32 *)sp_sb_data + i));
+}
+
+static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
+{
+	int func = BP_FUNC(bp);
+	struct hc_sp_status_block_data sp_sb_data;
+	memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+	sp_sb_data.state = SB_DISABLED;
+	sp_sb_data.p_func.vf_valid = false;
+
+	bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
+
+	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
+			CSTORM_SP_STATUS_BLOCK_SIZE);
+	bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
+			CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
+			CSTORM_SP_SYNC_BLOCK_SIZE);
+
+}
+
+
+static inline
+void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
+					   int igu_sb_id, int igu_seg_id)
+{
+	hc_sm->igu_sb_id = igu_sb_id;
+	hc_sm->igu_seg_id = igu_seg_id;
+	hc_sm->timer_value = 0xFF;
+	hc_sm->time_to_expire = 0xFFFFFFFF;
+}
+
+static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
+			  u8 vf_valid, int fw_sb_id, int igu_sb_id)
+{
+	int igu_seg_id;
+
+	struct hc_status_block_data_e2 sb_data_e2;
+	struct hc_status_block_data_e1x sb_data_e1x;
+	struct hc_status_block_sm  *hc_sm_p;
+	int data_size;
+	u32 *sb_data_p;
+
+	if (CHIP_INT_MODE_IS_BC(bp))
+		igu_seg_id = HC_SEG_ACCESS_NORM;
+	else
+		igu_seg_id = IGU_SEG_ACCESS_NORM;
+
+	bnx2x_zero_fp_sb(bp, fw_sb_id);
+
+	if (!CHIP_IS_E1x(bp)) {
+		memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
+		sb_data_e2.common.state = SB_ENABLED;
+		sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
+		sb_data_e2.common.p_func.vf_id = vfid;
+		sb_data_e2.common.p_func.vf_valid = vf_valid;
+		sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
+		sb_data_e2.common.same_igu_sb_1b = true;
+		sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
+		sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
+		hc_sm_p = sb_data_e2.common.state_machine;
+		sb_data_p = (u32 *)&sb_data_e2;
+		data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
+	} else {
+		memset(&sb_data_e1x, 0,
+		       sizeof(struct hc_status_block_data_e1x));
+		sb_data_e1x.common.state = SB_ENABLED;
+		sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
+		sb_data_e1x.common.p_func.vf_id = 0xff;
+		sb_data_e1x.common.p_func.vf_valid = false;
+		sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
+		sb_data_e1x.common.same_igu_sb_1b = true;
+		sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
+		sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
+		hc_sm_p = sb_data_e1x.common.state_machine;
+		sb_data_p = (u32 *)&sb_data_e1x;
+		data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
+	}
+
+	bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
+				       igu_sb_id, igu_seg_id);
+	bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
+				       igu_sb_id, igu_seg_id);
+
+	DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
+
+	/* write indecies to HW */
+	bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
+}
+
+static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
+				     u16 tx_usec, u16 rx_usec)
+{
+	bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
+				    false, rx_usec);
+	bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+				       HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
+				       tx_usec);
+	bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+				       HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
+				       tx_usec);
+	bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
+				       HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
+				       tx_usec);
+}
+
+static void bnx2x_init_def_sb(struct bnx2x *bp)
+{
+	struct host_sp_status_block *def_sb = bp->def_status_blk;
+	dma_addr_t mapping = bp->def_status_blk_mapping;
+	int igu_sp_sb_index;
+	int igu_seg_id;
+	int port = BP_PORT(bp);
+	int func = BP_FUNC(bp);
+	int reg_offset;
+	u64 section;
+	int index;
+	struct hc_sp_status_block_data sp_sb_data;
+	memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
+
+	if (CHIP_INT_MODE_IS_BC(bp)) {
+		igu_sp_sb_index = DEF_SB_IGU_ID;
+		igu_seg_id = HC_SEG_ACCESS_DEF;
+	} else {
+		igu_sp_sb_index = bp->igu_dsb_id;
+		igu_seg_id = IGU_SEG_ACCESS_DEF;
+	}
+
+	/* ATTN */
+	section = ((u64)mapping) + offsetof(struct host_sp_status_block,
+					    atten_status_block);
+	def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
+
+	bp->attn_state = 0;
+
+	reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+			     MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+	for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
+		int sindex;
+		/* take care of sig[0]..sig[4] */
+		for (sindex = 0; sindex < 4; sindex++)
+			bp->attn_group[index].sig[sindex] =
+			   REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
+
+		if (!CHIP_IS_E1x(bp))
+			/*
+			 * enable5 is separate from the rest of the registers,
+			 * and therefore the address skip is 4
+			 * and not 16 between the different groups
+			 */
+			bp->attn_group[index].sig[4] = REG_RD(bp,
+					reg_offset + 0x10 + 0x4*index);
+		else
+			bp->attn_group[index].sig[4] = 0;
+	}
+
+	if (bp->common.int_block == INT_BLOCK_HC) {
+		reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
+				     HC_REG_ATTN_MSG0_ADDR_L);
+
+		REG_WR(bp, reg_offset, U64_LO(section));
+		REG_WR(bp, reg_offset + 4, U64_HI(section));
+	} else if (!CHIP_IS_E1x(bp)) {
+		REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
+		REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
+	}
+
+	section = ((u64)mapping) + offsetof(struct host_sp_status_block,
+					    sp_sb);
+
+	bnx2x_zero_sp_sb(bp);
+
+	sp_sb_data.state		= SB_ENABLED;
+	sp_sb_data.host_sb_addr.lo	= U64_LO(section);
+	sp_sb_data.host_sb_addr.hi	= U64_HI(section);
+	sp_sb_data.igu_sb_id		= igu_sp_sb_index;
+	sp_sb_data.igu_seg_id		= igu_seg_id;
+	sp_sb_data.p_func.pf_id		= func;
+	sp_sb_data.p_func.vnic_id	= BP_VN(bp);
+	sp_sb_data.p_func.vf_id		= 0xff;
+
+	bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
+
+	bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
+}
+
+void bnx2x_update_coalesce(struct bnx2x *bp)
+{
+	int i;
+
+	for_each_eth_queue(bp, i)
+		bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
+					 bp->tx_ticks, bp->rx_ticks);
+}
+
+static void bnx2x_init_sp_ring(struct bnx2x *bp)
+{
+	spin_lock_init(&bp->spq_lock);
+	atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
+
+	bp->spq_prod_idx = 0;
+	bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
+	bp->spq_prod_bd = bp->spq;
+	bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
+}
+
+static void bnx2x_init_eq_ring(struct bnx2x *bp)
+{
+	int i;
+	for (i = 1; i <= NUM_EQ_PAGES; i++) {
+		union event_ring_elem *elem =
+			&bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
+
+		elem->next_page.addr.hi =
+			cpu_to_le32(U64_HI(bp->eq_mapping +
+				   BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
+		elem->next_page.addr.lo =
+			cpu_to_le32(U64_LO(bp->eq_mapping +
+				   BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
+	}
+	bp->eq_cons = 0;
+	bp->eq_prod = NUM_EQ_DESC;
+	bp->eq_cons_sb = BNX2X_EQ_INDEX;
+	/* we want a warning message before it gets rought... */
+	atomic_set(&bp->eq_spq_left,
+		min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
+}
+
+
+/* called with netif_addr_lock_bh() */
+void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
+			 unsigned long rx_mode_flags,
+			 unsigned long rx_accept_flags,
+			 unsigned long tx_accept_flags,
+			 unsigned long ramrod_flags)
+{
+	struct bnx2x_rx_mode_ramrod_params ramrod_param;
+	int rc;
+
+	memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+	/* Prepare ramrod parameters */
+	ramrod_param.cid = 0;
+	ramrod_param.cl_id = cl_id;
+	ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
+	ramrod_param.func_id = BP_FUNC(bp);
+
+	ramrod_param.pstate = &bp->sp_state;
+	ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
+
+	ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
+	ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
+
+	set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
+
+	ramrod_param.ramrod_flags = ramrod_flags;
+	ramrod_param.rx_mode_flags = rx_mode_flags;
+
+	ramrod_param.rx_accept_flags = rx_accept_flags;
+	ramrod_param.tx_accept_flags = tx_accept_flags;
+
+	rc = bnx2x_config_rx_mode(bp, &ramrod_param);
+	if (rc < 0) {
+		BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
+		return;
+	}
+}
+
+/* called with netif_addr_lock_bh() */
+void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
+{
+	unsigned long rx_mode_flags = 0, ramrod_flags = 0;
+	unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
+
+#ifdef BCM_CNIC
+	if (!NO_FCOE(bp))
+
+		/* Configure rx_mode of FCoE Queue */
+		__set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
+#endif
+
+	switch (bp->rx_mode) {
+	case BNX2X_RX_MODE_NONE:
+		/*
+		 * 'drop all' supersedes any accept flags that may have been
+		 * passed to the function.
+		 */
+		break;
+	case BNX2X_RX_MODE_NORMAL:
+		__set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_MULTICAST, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+
+		/* internal switching mode */
+		__set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_MULTICAST, &tx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+		break;
+	case BNX2X_RX_MODE_ALLMULTI:
+		__set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+
+		/* internal switching mode */
+		__set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+		break;
+	case BNX2X_RX_MODE_PROMISC:
+		/* According to deffinition of SI mode, iface in promisc mode
+		 * should receive matched and unmatched (in resolution of port)
+		 * unicast packets.
+		 */
+		__set_bit(BNX2X_ACCEPT_UNMATCHED, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_UNICAST, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, &rx_accept_flags);
+
+		/* internal switching mode */
+		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &tx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, &tx_accept_flags);
+
+		if (IS_MF_SI(bp))
+			__set_bit(BNX2X_ACCEPT_ALL_UNICAST, &tx_accept_flags);
+		else
+			__set_bit(BNX2X_ACCEPT_UNICAST, &tx_accept_flags);
+
+		break;
+	default:
+		BNX2X_ERR("Unknown rx_mode: %d\n", bp->rx_mode);
+		return;
+	}
+
+	if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
+		__set_bit(BNX2X_ACCEPT_ANY_VLAN, &rx_accept_flags);
+		__set_bit(BNX2X_ACCEPT_ANY_VLAN, &tx_accept_flags);
+	}
+
+	__set_bit(RAMROD_RX, &ramrod_flags);
+	__set_bit(RAMROD_TX, &ramrod_flags);
+
+	bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags, rx_accept_flags,
+			    tx_accept_flags, ramrod_flags);
+}
+
+static void bnx2x_init_internal_common(struct bnx2x *bp)
+{
+	int i;
+
+	if (IS_MF_SI(bp))
+		/*
+		 * In switch independent mode, the TSTORM needs to accept
+		 * packets that failed classification, since approximate match
+		 * mac addresses aren't written to NIG LLH
+		 */
+		REG_WR8(bp, BAR_TSTRORM_INTMEM +
+			    TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
+	else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
+		REG_WR8(bp, BAR_TSTRORM_INTMEM +
+			    TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
+
+	/* Zero this manually as its initialization is
+	   currently missing in the initTool */
+	for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
+		REG_WR(bp, BAR_USTRORM_INTMEM +
+		       USTORM_AGG_DATA_OFFSET + i * 4, 0);
+	if (!CHIP_IS_E1x(bp)) {
+		REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
+			CHIP_INT_MODE_IS_BC(bp) ?
+			HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
+	}
+}
+
+static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
+{
+	switch (load_code) {
+	case FW_MSG_CODE_DRV_LOAD_COMMON:
+	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+		bnx2x_init_internal_common(bp);
+		/* no break */
+
+	case FW_MSG_CODE_DRV_LOAD_PORT:
+		/* nothing to do */
+		/* no break */
+
+	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+		/* internal memory per function is
+		   initialized inside bnx2x_pf_init */
+		break;
+
+	default:
+		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+		break;
+	}
+}
+
+static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
+{
+	return fp->bp->igu_base_sb + fp->index + CNIC_PRESENT;
+}
+
+static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
+{
+	return fp->bp->base_fw_ndsb + fp->index + CNIC_PRESENT;
+}
+
+static inline u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
+{
+	if (CHIP_IS_E1x(fp->bp))
+		return BP_L_ID(fp->bp) + fp->index;
+	else	/* We want Client ID to be the same as IGU SB ID for 57712 */
+		return bnx2x_fp_igu_sb_id(fp);
+}
+
+static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
+{
+	struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
+	u8 cos;
+	unsigned long q_type = 0;
+	u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
+
+	fp->cid = fp_idx;
+	fp->cl_id = bnx2x_fp_cl_id(fp);
+	fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
+	fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
+	/* qZone id equals to FW (per path) client id */
+	fp->cl_qzone_id  = bnx2x_fp_qzone_id(fp);
+
+	/* init shortcut */
+	fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
+	/* Setup SB indicies */
+	fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
+
+	/* Configure Queue State object */
+	__set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
+	__set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
+
+	BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
+
+	/* init tx data */
+	for_each_cos_in_tx_queue(fp, cos) {
+		bnx2x_init_txdata(bp, &fp->txdata[cos],
+				  CID_COS_TO_TX_ONLY_CID(fp->cid, cos),
+				  FP_COS_TO_TXQ(fp, cos),
+				  BNX2X_TX_SB_INDEX_BASE + cos);
+		cids[cos] = fp->txdata[cos].cid;
+	}
+
+	bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos,
+			     BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
+			     bnx2x_sp_mapping(bp, q_rdata), q_type);
+
+	/**
+	 * Configure classification DBs: Always enable Tx switching
+	 */
+	bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
+
+	DP(NETIF_MSG_IFUP, "queue[%d]:  bnx2x_init_sb(%p,%p)  "
+				   "cl_id %d  fw_sb %d  igu_sb %d\n",
+		   fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
+		   fp->igu_sb_id);
+	bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
+		      fp->fw_sb_id, fp->igu_sb_id);
+
+	bnx2x_update_fpsb_idx(fp);
+}
+
+void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
+{
+	int i;
+
+	for_each_eth_queue(bp, i)
+		bnx2x_init_eth_fp(bp, i);
+#ifdef BCM_CNIC
+	if (!NO_FCOE(bp))
+		bnx2x_init_fcoe_fp(bp);
+
+	bnx2x_init_sb(bp, bp->cnic_sb_mapping,
+		      BNX2X_VF_ID_INVALID, false,
+		      bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
+
+#endif
+
+	/* Initialize MOD_ABS interrupts */
+	bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
+			       bp->common.shmem_base, bp->common.shmem2_base,
+			       BP_PORT(bp));
+	/* ensure status block indices were read */
+	rmb();
+
+	bnx2x_init_def_sb(bp);
+	bnx2x_update_dsb_idx(bp);
+	bnx2x_init_rx_rings(bp);
+	bnx2x_init_tx_rings(bp);
+	bnx2x_init_sp_ring(bp);
+	bnx2x_init_eq_ring(bp);
+	bnx2x_init_internal(bp, load_code);
+	bnx2x_pf_init(bp);
+	bnx2x_stats_init(bp);
+
+	/* flush all before enabling interrupts */
+	mb();
+	mmiowb();
+
+	bnx2x_int_enable(bp);
+
+	/* Check for SPIO5 */
+	bnx2x_attn_int_deasserted0(bp,
+		REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
+				   AEU_INPUTS_ATTN_BITS_SPIO5);
+}
+
+/* end of nic init */
+
+/*
+ * gzip service functions
+ */
+
+static int bnx2x_gunzip_init(struct bnx2x *bp)
+{
+	bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
+					    &bp->gunzip_mapping, GFP_KERNEL);
+	if (bp->gunzip_buf  == NULL)
+		goto gunzip_nomem1;
+
+	bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
+	if (bp->strm  == NULL)
+		goto gunzip_nomem2;
+
+	bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
+	if (bp->strm->workspace == NULL)
+		goto gunzip_nomem3;
+
+	return 0;
+
+gunzip_nomem3:
+	kfree(bp->strm);
+	bp->strm = NULL;
+
+gunzip_nomem2:
+	dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+			  bp->gunzip_mapping);
+	bp->gunzip_buf = NULL;
+
+gunzip_nomem1:
+	netdev_err(bp->dev, "Cannot allocate firmware buffer for"
+	       " un-compression\n");
+	return -ENOMEM;
+}
+
+static void bnx2x_gunzip_end(struct bnx2x *bp)
+{
+	if (bp->strm) {
+		vfree(bp->strm->workspace);
+		kfree(bp->strm);
+		bp->strm = NULL;
+	}
+
+	if (bp->gunzip_buf) {
+		dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
+				  bp->gunzip_mapping);
+		bp->gunzip_buf = NULL;
+	}
+}
+
+static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
+{
+	int n, rc;
+
+	/* check gzip header */
+	if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
+		BNX2X_ERR("Bad gzip header\n");
+		return -EINVAL;
+	}
+
+	n = 10;
+
+#define FNAME				0x8
+
+	if (zbuf[3] & FNAME)
+		while ((zbuf[n++] != 0) && (n < len));
+
+	bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
+	bp->strm->avail_in = len - n;
+	bp->strm->next_out = bp->gunzip_buf;
+	bp->strm->avail_out = FW_BUF_SIZE;
+
+	rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
+	if (rc != Z_OK)
+		return rc;
+
+	rc = zlib_inflate(bp->strm, Z_FINISH);
+	if ((rc != Z_OK) && (rc != Z_STREAM_END))
+		netdev_err(bp->dev, "Firmware decompression error: %s\n",
+			   bp->strm->msg);
+
+	bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
+	if (bp->gunzip_outlen & 0x3)
+		netdev_err(bp->dev, "Firmware decompression error:"
+				    " gunzip_outlen (%d) not aligned\n",
+				bp->gunzip_outlen);
+	bp->gunzip_outlen >>= 2;
+
+	zlib_inflateEnd(bp->strm);
+
+	if (rc == Z_STREAM_END)
+		return 0;
+
+	return rc;
+}
+
+/* nic load/unload */
+
+/*
+ * General service functions
+ */
+
+/* send a NIG loopback debug packet */
+static void bnx2x_lb_pckt(struct bnx2x *bp)
+{
+	u32 wb_write[3];
+
+	/* Ethernet source and destination addresses */
+	wb_write[0] = 0x55555555;
+	wb_write[1] = 0x55555555;
+	wb_write[2] = 0x20;		/* SOP */
+	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+
+	/* NON-IP protocol */
+	wb_write[0] = 0x09000000;
+	wb_write[1] = 0x55555555;
+	wb_write[2] = 0x10;		/* EOP, eop_bvalid = 0 */
+	REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
+}
+
+/* some of the internal memories
+ * are not directly readable from the driver
+ * to test them we send debug packets
+ */
+static int bnx2x_int_mem_test(struct bnx2x *bp)
+{
+	int factor;
+	int count, i;
+	u32 val = 0;
+
+	if (CHIP_REV_IS_FPGA(bp))
+		factor = 120;
+	else if (CHIP_REV_IS_EMUL(bp))
+		factor = 200;
+	else
+		factor = 1;
+
+	/* Disable inputs of parser neighbor blocks */
+	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
+
+	/*  Write 0 to parser credits for CFC search request */
+	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+	/* send Ethernet packet */
+	bnx2x_lb_pckt(bp);
+
+	/* TODO do i reset NIG statistic? */
+	/* Wait until NIG register shows 1 packet of size 0x10 */
+	count = 1000 * factor;
+	while (count) {
+
+		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+		val = *bnx2x_sp(bp, wb_data[0]);
+		if (val == 0x10)
+			break;
+
+		msleep(10);
+		count--;
+	}
+	if (val != 0x10) {
+		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
+		return -1;
+	}
+
+	/* Wait until PRS register shows 1 packet */
+	count = 1000 * factor;
+	while (count) {
+		val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+		if (val == 1)
+			break;
+
+		msleep(10);
+		count--;
+	}
+	if (val != 0x1) {
+		BNX2X_ERR("PRS timeout val = 0x%x\n", val);
+		return -2;
+	}
+
+	/* Reset and init BRB, PRS */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+	msleep(50);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+	msleep(50);
+	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+
+	DP(NETIF_MSG_HW, "part2\n");
+
+	/* Disable inputs of parser neighbor blocks */
+	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
+	REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
+	REG_WR(bp, CFC_REG_DEBUG0, 0x1);
+	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
+
+	/* Write 0 to parser credits for CFC search request */
+	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
+
+	/* send 10 Ethernet packets */
+	for (i = 0; i < 10; i++)
+		bnx2x_lb_pckt(bp);
+
+	/* Wait until NIG register shows 10 + 1
+	   packets of size 11*0x10 = 0xb0 */
+	count = 1000 * factor;
+	while (count) {
+
+		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+		val = *bnx2x_sp(bp, wb_data[0]);
+		if (val == 0xb0)
+			break;
+
+		msleep(10);
+		count--;
+	}
+	if (val != 0xb0) {
+		BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
+		return -3;
+	}
+
+	/* Wait until PRS register shows 2 packets */
+	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+	if (val != 2)
+		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
+
+	/* Write 1 to parser credits for CFC search request */
+	REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
+
+	/* Wait until PRS register shows 3 packets */
+	msleep(10 * factor);
+	/* Wait until NIG register shows 1 packet of size 0x10 */
+	val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
+	if (val != 3)
+		BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
+
+	/* clear NIG EOP FIFO */
+	for (i = 0; i < 11; i++)
+		REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
+	val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
+	if (val != 1) {
+		BNX2X_ERR("clear of NIG failed\n");
+		return -4;
+	}
+
+	/* Reset and init BRB, PRS, NIG */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
+	msleep(50);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
+	msleep(50);
+	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+#ifndef BCM_CNIC
+	/* set NIC mode */
+	REG_WR(bp, PRS_REG_NIC_MODE, 1);
+#endif
+
+	/* Enable inputs of parser neighbor blocks */
+	REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
+	REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
+	REG_WR(bp, CFC_REG_DEBUG0, 0x0);
+	REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
+
+	DP(NETIF_MSG_HW, "done\n");
+
+	return 0; /* OK */
+}
+
+static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
+{
+	REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+	if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
+	else
+		REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
+	REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+	/*
+	 * mask read length error interrupts in brb for parser
+	 * (parsing unit and 'checksum and crc' unit)
+	 * these errors are legal (PU reads fixed length and CAC can cause
+	 * read length error on truncated packets)
+	 */
+	REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
+	REG_WR(bp, QM_REG_QM_INT_MASK, 0);
+	REG_WR(bp, TM_REG_TM_INT_MASK, 0);
+	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
+	REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
+	REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
+/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
+/*	REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
+	REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
+	REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
+	REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
+/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
+/*	REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
+	REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
+	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
+	REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
+	REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
+/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
+/*	REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
+
+	if (CHIP_REV_IS_FPGA(bp))
+		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
+	else if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
+			   (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
+				| PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
+				| PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
+				| PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
+				| PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
+	else
+		REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
+	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
+	REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
+	REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
+/*	REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
+
+	if (!CHIP_IS_E1x(bp))
+		/* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
+		REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
+
+	REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
+	REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
+/*	REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
+	REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18);		/* bit 3,4 masked */
+}
+
+static void bnx2x_reset_common(struct bnx2x *bp)
+{
+	u32 val = 0x1400;
+
+	/* reset_common */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+	       0xd3ffff7f);
+
+	if (CHIP_IS_E3(bp)) {
+		val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+		val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+	}
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
+}
+
+static void bnx2x_setup_dmae(struct bnx2x *bp)
+{
+	bp->dmae_ready = 0;
+	spin_lock_init(&bp->dmae_lock);
+}
+
+static void bnx2x_init_pxp(struct bnx2x *bp)
+{
+	u16 devctl;
+	int r_order, w_order;
+
+	pci_read_config_word(bp->pdev,
+			     pci_pcie_cap(bp->pdev) + PCI_EXP_DEVCTL, &devctl);
+	DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
+	w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+	if (bp->mrrs == -1)
+		r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+	else {
+		DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
+		r_order = bp->mrrs;
+	}
+
+	bnx2x_init_pxp_arb(bp, r_order, w_order);
+}
+
+static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
+{
+	int is_required;
+	u32 val;
+	int port;
+
+	if (BP_NOMCP(bp))
+		return;
+
+	is_required = 0;
+	val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
+	      SHARED_HW_CFG_FAN_FAILURE_MASK;
+
+	if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
+		is_required = 1;
+
+	/*
+	 * The fan failure mechanism is usually related to the PHY type since
+	 * the power consumption of the board is affected by the PHY. Currently,
+	 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
+	 */
+	else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
+		for (port = PORT_0; port < PORT_MAX; port++) {
+			is_required |=
+				bnx2x_fan_failure_det_req(
+					bp,
+					bp->common.shmem_base,
+					bp->common.shmem2_base,
+					port);
+		}
+
+	DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
+
+	if (is_required == 0)
+		return;
+
+	/* Fan failure is indicated by SPIO 5 */
+	bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
+		       MISC_REGISTERS_SPIO_INPUT_HI_Z);
+
+	/* set to active low mode */
+	val = REG_RD(bp, MISC_REG_SPIO_INT);
+	val |= ((1 << MISC_REGISTERS_SPIO_5) <<
+					MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
+	REG_WR(bp, MISC_REG_SPIO_INT, val);
+
+	/* enable interrupt to signal the IGU */
+	val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+	val |= (1 << MISC_REGISTERS_SPIO_5);
+	REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
+}
+
+static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
+{
+	u32 offset = 0;
+
+	if (CHIP_IS_E1(bp))
+		return;
+	if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
+		return;
+
+	switch (BP_ABS_FUNC(bp)) {
+	case 0:
+		offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
+		break;
+	case 1:
+		offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
+		break;
+	case 2:
+		offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
+		break;
+	case 3:
+		offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
+		break;
+	case 4:
+		offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
+		break;
+	case 5:
+		offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
+		break;
+	case 6:
+		offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
+		break;
+	case 7:
+		offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
+		break;
+	default:
+		return;
+	}
+
+	REG_WR(bp, offset, pretend_func_num);
+	REG_RD(bp, offset);
+	DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
+}
+
+void bnx2x_pf_disable(struct bnx2x *bp)
+{
+	u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
+	val &= ~IGU_PF_CONF_FUNC_EN;
+
+	REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
+	REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+	REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
+}
+
+static inline void bnx2x__common_init_phy(struct bnx2x *bp)
+{
+	u32 shmem_base[2], shmem2_base[2];
+	shmem_base[0] =  bp->common.shmem_base;
+	shmem2_base[0] = bp->common.shmem2_base;
+	if (!CHIP_IS_E1x(bp)) {
+		shmem_base[1] =
+			SHMEM2_RD(bp, other_shmem_base_addr);
+		shmem2_base[1] =
+			SHMEM2_RD(bp, other_shmem2_base_addr);
+	}
+	bnx2x_acquire_phy_lock(bp);
+	bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
+			      bp->common.chip_id);
+	bnx2x_release_phy_lock(bp);
+}
+
+/**
+ * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
+ *
+ * @bp:		driver handle
+ */
+static int bnx2x_init_hw_common(struct bnx2x *bp)
+{
+	u32 val;
+
+	DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_ABS_FUNC(bp));
+
+	bnx2x_reset_common(bp);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
+
+	val = 0xfffc;
+	if (CHIP_IS_E3(bp)) {
+		val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+		val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+	}
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
+
+	bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
+
+	if (!CHIP_IS_E1x(bp)) {
+		u8 abs_func_id;
+
+		/**
+		 * 4-port mode or 2-port mode we need to turn of master-enable
+		 * for everyone, after that, turn it back on for self.
+		 * so, we disregard multi-function or not, and always disable
+		 * for all functions on the given path, this means 0,2,4,6 for
+		 * path 0 and 1,3,5,7 for path 1
+		 */
+		for (abs_func_id = BP_PATH(bp);
+		     abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
+			if (abs_func_id == BP_ABS_FUNC(bp)) {
+				REG_WR(bp,
+				    PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
+				    1);
+				continue;
+			}
+
+			bnx2x_pretend_func(bp, abs_func_id);
+			/* clear pf enable */
+			bnx2x_pf_disable(bp);
+			bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+		}
+	}
+
+	bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
+	if (CHIP_IS_E1(bp)) {
+		/* enable HW interrupt from PXP on USDM overflow
+		   bit 16 on INT_MASK_0 */
+		REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
+	}
+
+	bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
+	bnx2x_init_pxp(bp);
+
+#ifdef __BIG_ENDIAN
+	REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
+	REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
+	REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
+	REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
+	REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
+	/* make sure this value is 0 */
+	REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
+
+/*	REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
+	REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
+	REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
+	REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
+	REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
+#endif
+
+	bnx2x_ilt_init_page_size(bp, INITOP_SET);
+
+	if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
+		REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
+
+	/* let the HW do it's magic ... */
+	msleep(100);
+	/* finish PXP init */
+	val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
+	if (val != 1) {
+		BNX2X_ERR("PXP2 CFG failed\n");
+		return -EBUSY;
+	}
+	val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
+	if (val != 1) {
+		BNX2X_ERR("PXP2 RD_INIT failed\n");
+		return -EBUSY;
+	}
+
+	/* Timers bug workaround E2 only. We need to set the entire ILT to
+	 * have entries with value "0" and valid bit on.
+	 * This needs to be done by the first PF that is loaded in a path
+	 * (i.e. common phase)
+	 */
+	if (!CHIP_IS_E1x(bp)) {
+/* In E2 there is a bug in the timers block that can cause function 6 / 7
+ * (i.e. vnic3) to start even if it is marked as "scan-off".
+ * This occurs when a different function (func2,3) is being marked
+ * as "scan-off". Real-life scenario for example: if a driver is being
+ * load-unloaded while func6,7 are down. This will cause the timer to access
+ * the ilt, translate to a logical address and send a request to read/write.
+ * Since the ilt for the function that is down is not valid, this will cause
+ * a translation error which is unrecoverable.
+ * The Workaround is intended to make sure that when this happens nothing fatal
+ * will occur. The workaround:
+ *	1.  First PF driver which loads on a path will:
+ *		a.  After taking the chip out of reset, by using pretend,
+ *		    it will write "0" to the following registers of
+ *		    the other vnics.
+ *		    REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
+ *		    REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
+ *		    REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
+ *		    And for itself it will write '1' to
+ *		    PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
+ *		    dmae-operations (writing to pram for example.)
+ *		    note: can be done for only function 6,7 but cleaner this
+ *			  way.
+ *		b.  Write zero+valid to the entire ILT.
+ *		c.  Init the first_timers_ilt_entry, last_timers_ilt_entry of
+ *		    VNIC3 (of that port). The range allocated will be the
+ *		    entire ILT. This is needed to prevent  ILT range error.
+ *	2.  Any PF driver load flow:
+ *		a.  ILT update with the physical addresses of the allocated
+ *		    logical pages.
+ *		b.  Wait 20msec. - note that this timeout is needed to make
+ *		    sure there are no requests in one of the PXP internal
+ *		    queues with "old" ILT addresses.
+ *		c.  PF enable in the PGLC.
+ *		d.  Clear the was_error of the PF in the PGLC. (could have
+ *		    occured while driver was down)
+ *		e.  PF enable in the CFC (WEAK + STRONG)
+ *		f.  Timers scan enable
+ *	3.  PF driver unload flow:
+ *		a.  Clear the Timers scan_en.
+ *		b.  Polling for scan_on=0 for that PF.
+ *		c.  Clear the PF enable bit in the PXP.
+ *		d.  Clear the PF enable in the CFC (WEAK + STRONG)
+ *		e.  Write zero+valid to all ILT entries (The valid bit must
+ *		    stay set)
+ *		f.  If this is VNIC 3 of a port then also init
+ *		    first_timers_ilt_entry to zero and last_timers_ilt_entry
+ *		    to the last enrty in the ILT.
+ *
+ *	Notes:
+ *	Currently the PF error in the PGLC is non recoverable.
+ *	In the future the there will be a recovery routine for this error.
+ *	Currently attention is masked.
+ *	Having an MCP lock on the load/unload process does not guarantee that
+ *	there is no Timer disable during Func6/7 enable. This is because the
+ *	Timers scan is currently being cleared by the MCP on FLR.
+ *	Step 2.d can be done only for PF6/7 and the driver can also check if
+ *	there is error before clearing it. But the flow above is simpler and
+ *	more general.
+ *	All ILT entries are written by zero+valid and not just PF6/7
+ *	ILT entries since in the future the ILT entries allocation for
+ *	PF-s might be dynamic.
+ */
+		struct ilt_client_info ilt_cli;
+		struct bnx2x_ilt ilt;
+		memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+		memset(&ilt, 0, sizeof(struct bnx2x_ilt));
+
+		/* initialize dummy TM client */
+		ilt_cli.start = 0;
+		ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+		ilt_cli.client_num = ILT_CLIENT_TM;
+
+		/* Step 1: set zeroes to all ilt page entries with valid bit on
+		 * Step 2: set the timers first/last ilt entry to point
+		 * to the entire range to prevent ILT range error for 3rd/4th
+		 * vnic	(this code assumes existance of the vnic)
+		 *
+		 * both steps performed by call to bnx2x_ilt_client_init_op()
+		 * with dummy TM client
+		 *
+		 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
+		 * and his brother are split registers
+		 */
+		bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
+		bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
+		bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
+
+		REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
+		REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
+		REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
+	}
+
+
+	REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
+	REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
+
+	if (!CHIP_IS_E1x(bp)) {
+		int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
+				(CHIP_REV_IS_FPGA(bp) ? 400 : 0);
+		bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
+
+		bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
+
+		/* let the HW do it's magic ... */
+		do {
+			msleep(200);
+			val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
+		} while (factor-- && (val != 1));
+
+		if (val != 1) {
+			BNX2X_ERR("ATC_INIT failed\n");
+			return -EBUSY;
+		}
+	}
+
+	bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
+
+	/* clean the DMAE memory */
+	bp->dmae_ready = 1;
+	bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
+
+	bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
+
+	bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
+
+	bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
+
+	bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
+
+	bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
+	bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
+	bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
+	bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
+
+	bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
+
+
+	/* QM queues pointers table */
+	bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
+
+	/* soft reset pulse */
+	REG_WR(bp, QM_REG_SOFT_RESET, 1);
+	REG_WR(bp, QM_REG_SOFT_RESET, 0);
+
+#ifdef BCM_CNIC
+	bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
+#endif
+
+	bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
+	REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
+	if (!CHIP_REV_IS_SLOW(bp))
+		/* enable hw interrupt from doorbell Q */
+		REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
+
+	bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
+
+	bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
+	REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
+
+	if (!CHIP_IS_E1(bp))
+		REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
+
+	if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp))
+		/* Bit-map indicating which L2 hdrs may appear
+		 * after the basic Ethernet header
+		 */
+		REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
+		       bp->path_has_ovlan ? 7 : 6);
+
+	bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
+
+	if (!CHIP_IS_E1x(bp)) {
+		/* reset VFC memories */
+		REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+			   VFC_MEMORIES_RST_REG_CAM_RST |
+			   VFC_MEMORIES_RST_REG_RAM_RST);
+		REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
+			   VFC_MEMORIES_RST_REG_CAM_RST |
+			   VFC_MEMORIES_RST_REG_RAM_RST);
+
+		msleep(20);
+	}
+
+	bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
+
+	/* sync semi rtc */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+	       0x80000000);
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+	       0x80000000);
+
+	bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
+
+	if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
+		       bp->path_has_ovlan ? 7 : 6);
+
+	REG_WR(bp, SRC_REG_SOFT_RST, 1);
+
+	bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
+
+#ifdef BCM_CNIC
+	REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
+	REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
+	REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
+	REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
+	REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
+	REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
+	REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
+	REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
+	REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
+	REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
+#endif
+	REG_WR(bp, SRC_REG_SOFT_RST, 0);
+
+	if (sizeof(union cdu_context) != 1024)
+		/* we currently assume that a context is 1024 bytes */
+		dev_alert(&bp->pdev->dev, "please adjust the size "
+					  "of cdu_context(%ld)\n",
+			 (long)sizeof(union cdu_context));
+
+	bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
+	val = (4 << 24) + (0 << 12) + 1024;
+	REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
+
+	bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
+	REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
+	/* enable context validation interrupt from CFC */
+	REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
+
+	/* set the thresholds to prevent CFC/CDU race */
+	REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
+
+	bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
+
+	if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
+		REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
+
+	bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
+	bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
+
+	/* Reset PCIE errors for debug */
+	REG_WR(bp, 0x2814, 0xffffffff);
+	REG_WR(bp, 0x3820, 0xffffffff);
+
+	if (!CHIP_IS_E1x(bp)) {
+		REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
+			   (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
+				PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
+		REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
+			   (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
+				PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
+				PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
+		REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
+			   (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
+				PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
+				PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
+	}
+
+	bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
+	if (!CHIP_IS_E1(bp)) {
+		/* in E3 this done in per-port section */
+		if (!CHIP_IS_E3(bp))
+			REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
+	}
+	if (CHIP_IS_E1H(bp))
+		/* not applicable for E2 (and above ...) */
+		REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
+
+	if (CHIP_REV_IS_SLOW(bp))
+		msleep(200);
+
+	/* finish CFC init */
+	val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
+	if (val != 1) {
+		BNX2X_ERR("CFC LL_INIT failed\n");
+		return -EBUSY;
+	}
+	val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
+	if (val != 1) {
+		BNX2X_ERR("CFC AC_INIT failed\n");
+		return -EBUSY;
+	}
+	val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
+	if (val != 1) {
+		BNX2X_ERR("CFC CAM_INIT failed\n");
+		return -EBUSY;
+	}
+	REG_WR(bp, CFC_REG_DEBUG0, 0);
+
+	if (CHIP_IS_E1(bp)) {
+		/* read NIG statistic
+		   to see if this is our first up since powerup */
+		bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
+		val = *bnx2x_sp(bp, wb_data[0]);
+
+		/* do internal memory self test */
+		if ((val == 0) && bnx2x_int_mem_test(bp)) {
+			BNX2X_ERR("internal mem self test failed\n");
+			return -EBUSY;
+		}
+	}
+
+	bnx2x_setup_fan_failure_detection(bp);
+
+	/* clear PXP2 attentions */
+	REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
+
+	bnx2x_enable_blocks_attention(bp);
+	bnx2x_enable_blocks_parity(bp);
+
+	if (!BP_NOMCP(bp)) {
+		if (CHIP_IS_E1x(bp))
+			bnx2x__common_init_phy(bp);
+	} else
+		BNX2X_ERR("Bootcode is missing - can not initialize link\n");
+
+	return 0;
+}
+
+/**
+ * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
+ *
+ * @bp:		driver handle
+ */
+static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
+{
+	int rc = bnx2x_init_hw_common(bp);
+
+	if (rc)
+		return rc;
+
+	/* In E2 2-PORT mode, same ext phy is used for the two paths */
+	if (!BP_NOMCP(bp))
+		bnx2x__common_init_phy(bp);
+
+	return 0;
+}
+
+static int bnx2x_init_hw_port(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
+	u32 low, high;
+	u32 val;
+
+	bnx2x__link_reset(bp);
+
+	DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
+
+	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
+
+	bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+	bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+	bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
+
+	/* Timers bug workaround: disables the pf_master bit in pglue at
+	 * common phase, we need to enable it here before any dmae access are
+	 * attempted. Therefore we manually added the enable-master to the
+	 * port phase (it also happens in the function phase)
+	 */
+	if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+
+	bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+	bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+	bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
+	bnx2x_init_block(bp, BLOCK_QM, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XCM, init_phase);
+
+	/* QM cid (connection) count */
+	bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
+
+#ifdef BCM_CNIC
+	bnx2x_init_block(bp, BLOCK_TM, init_phase);
+	REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
+	REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
+#endif
+
+	bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+
+	if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
+		bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+
+		if (IS_MF(bp))
+			low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
+		else if (bp->dev->mtu > 4096) {
+			if (bp->flags & ONE_PORT_FLAG)
+				low = 160;
+			else {
+				val = bp->dev->mtu;
+				/* (24*1024 + val*4)/256 */
+				low = 96 + (val/64) +
+						((val % 64) ? 1 : 0);
+			}
+		} else
+			low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
+		high = low + 56;	/* 14*1024/256 */
+		REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
+		REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
+	}
+
+	if (CHIP_MODE_IS_4_PORT(bp))
+		REG_WR(bp, (BP_PORT(bp) ?
+			    BRB1_REG_MAC_GUARANTIED_1 :
+			    BRB1_REG_MAC_GUARANTIED_0), 40);
+
+
+	bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+	if (CHIP_IS_E3B0(bp))
+		/* Ovlan exists only if we are in multi-function +
+		 * switch-dependent mode, in switch-independent there
+		 * is no ovlan headers
+		 */
+		REG_WR(bp, BP_PORT(bp) ?
+		       PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
+		       PRS_REG_HDRS_AFTER_BASIC_PORT_0,
+		       (bp->path_has_ovlan ? 7 : 6));
+
+	bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+	bnx2x_init_block(bp, BLOCK_XPB, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_PBF, init_phase);
+
+	if (CHIP_IS_E1x(bp)) {
+		/* configure PBF to work without PAUSE mtu 9000 */
+		REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
+
+		/* update threshold */
+		REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
+		/* update init credit */
+		REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
+
+		/* probe changes */
+		REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
+		udelay(50);
+		REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
+	}
+
+#ifdef BCM_CNIC
+	bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+#endif
+	bnx2x_init_block(bp, BLOCK_CDU, init_phase);
+	bnx2x_init_block(bp, BLOCK_CFC, init_phase);
+
+	if (CHIP_IS_E1(bp)) {
+		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+	}
+	bnx2x_init_block(bp, BLOCK_HC, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_IGU, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
+	/* init aeu_mask_attn_func_0/1:
+	 *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
+	 *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
+	 *             bits 4-7 are used for "per vn group attention" */
+	val = IS_MF(bp) ? 0xF7 : 0x7;
+	/* Enable DCBX attention for all but E1 */
+	val |= CHIP_IS_E1(bp) ? 0 : 0x10;
+	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
+
+	bnx2x_init_block(bp, BLOCK_NIG, init_phase);
+
+	if (!CHIP_IS_E1x(bp)) {
+		/* Bit-map indicating which L2 hdrs may appear after the
+		 * basic Ethernet header
+		 */
+		REG_WR(bp, BP_PORT(bp) ?
+			   NIG_REG_P1_HDRS_AFTER_BASIC :
+			   NIG_REG_P0_HDRS_AFTER_BASIC,
+			   IS_MF_SD(bp) ? 7 : 6);
+
+		if (CHIP_IS_E3(bp))
+			REG_WR(bp, BP_PORT(bp) ?
+				   NIG_REG_LLH1_MF_MODE :
+				   NIG_REG_LLH_MF_MODE, IS_MF(bp));
+	}
+	if (!CHIP_IS_E3(bp))
+		REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
+
+	if (!CHIP_IS_E1(bp)) {
+		/* 0x2 disable mf_ov, 0x1 enable */
+		REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
+		       (IS_MF_SD(bp) ? 0x1 : 0x2));
+
+		if (!CHIP_IS_E1x(bp)) {
+			val = 0;
+			switch (bp->mf_mode) {
+			case MULTI_FUNCTION_SD:
+				val = 1;
+				break;
+			case MULTI_FUNCTION_SI:
+				val = 2;
+				break;
+			}
+
+			REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
+						  NIG_REG_LLH0_CLS_TYPE), val);
+		}
+		{
+			REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
+			REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
+			REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
+		}
+	}
+
+
+	/* If SPIO5 is set to generate interrupts, enable it for this port */
+	val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
+	if (val & (1 << MISC_REGISTERS_SPIO_5)) {
+		u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
+				       MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
+		val = REG_RD(bp, reg_addr);
+		val |= AEU_INPUTS_ATTN_BITS_SPIO5;
+		REG_WR(bp, reg_addr, val);
+	}
+
+	return 0;
+}
+
+static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
+{
+	int reg;
+
+	if (CHIP_IS_E1(bp))
+		reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
+	else
+		reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
+
+	bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
+}
+
+static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
+{
+	bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
+}
+
+static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
+{
+	u32 i, base = FUNC_ILT_BASE(func);
+	for (i = base; i < base + ILT_PER_FUNC; i++)
+		bnx2x_ilt_wr(bp, i, 0);
+}
+
+static int bnx2x_init_hw_func(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int func = BP_FUNC(bp);
+	int init_phase = PHASE_PF0 + func;
+	struct bnx2x_ilt *ilt = BP_ILT(bp);
+	u16 cdu_ilt_start;
+	u32 addr, val;
+	u32 main_mem_base, main_mem_size, main_mem_prty_clr;
+	int i, main_mem_width;
+
+	DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
+
+	/* FLR cleanup - hmmm */
+	if (!CHIP_IS_E1x(bp))
+		bnx2x_pf_flr_clnup(bp);
+
+	/* set MSI reconfigure capability */
+	if (bp->common.int_block == INT_BLOCK_HC) {
+		addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
+		val = REG_RD(bp, addr);
+		val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
+		REG_WR(bp, addr, val);
+	}
+
+	bnx2x_init_block(bp, BLOCK_PXP, init_phase);
+	bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
+
+	ilt = BP_ILT(bp);
+	cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
+
+	for (i = 0; i < L2_ILT_LINES(bp); i++) {
+		ilt->lines[cdu_ilt_start + i].page =
+			bp->context.vcxt + (ILT_PAGE_CIDS * i);
+		ilt->lines[cdu_ilt_start + i].page_mapping =
+			bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
+		/* cdu ilt pages are allocated manually so there's no need to
+		set the size */
+	}
+	bnx2x_ilt_init_op(bp, INITOP_SET);
+
+#ifdef BCM_CNIC
+	bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
+
+	/* T1 hash bits value determines the T1 number of entries */
+	REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
+#endif
+
+#ifndef BCM_CNIC
+	/* set NIC mode */
+	REG_WR(bp, PRS_REG_NIC_MODE, 1);
+#endif  /* BCM_CNIC */
+
+	if (!CHIP_IS_E1x(bp)) {
+		u32 pf_conf = IGU_PF_CONF_FUNC_EN;
+
+		/* Turn on a single ISR mode in IGU if driver is going to use
+		 * INT#x or MSI
+		 */
+		if (!(bp->flags & USING_MSIX_FLAG))
+			pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
+		/*
+		 * Timers workaround bug: function init part.
+		 * Need to wait 20msec after initializing ILT,
+		 * needed to make sure there are no requests in
+		 * one of the PXP internal queues with "old" ILT addresses
+		 */
+		msleep(20);
+		/*
+		 * Master enable - Due to WB DMAE writes performed before this
+		 * register is re-initialized as part of the regular function
+		 * init
+		 */
+		REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
+		/* Enable the function in IGU */
+		REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
+	}
+
+	bp->dmae_ready = 1;
+
+	bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
+
+	if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
+
+	bnx2x_init_block(bp, BLOCK_ATC, init_phase);
+	bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
+	bnx2x_init_block(bp, BLOCK_NIG, init_phase);
+	bnx2x_init_block(bp, BLOCK_SRC, init_phase);
+	bnx2x_init_block(bp, BLOCK_MISC, init_phase);
+	bnx2x_init_block(bp, BLOCK_TCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_UCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XCM, init_phase);
+	bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_USEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
+
+	if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, QM_REG_PF_EN, 1);
+
+	if (!CHIP_IS_E1x(bp)) {
+		REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+		REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+		REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+		REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
+	}
+	bnx2x_init_block(bp, BLOCK_QM, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_TM, init_phase);
+	bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
+	bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
+	bnx2x_init_block(bp, BLOCK_PRS, init_phase);
+	bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_USDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
+	bnx2x_init_block(bp, BLOCK_UPB, init_phase);
+	bnx2x_init_block(bp, BLOCK_XPB, init_phase);
+	bnx2x_init_block(bp, BLOCK_PBF, init_phase);
+	if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, PBF_REG_DISABLE_PF, 0);
+
+	bnx2x_init_block(bp, BLOCK_CDU, init_phase);
+
+	bnx2x_init_block(bp, BLOCK_CFC, init_phase);
+
+	if (!CHIP_IS_E1x(bp))
+		REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
+
+	if (IS_MF(bp)) {
+		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
+		REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
+	}
+
+	bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
+
+	/* HC init per function */
+	if (bp->common.int_block == INT_BLOCK_HC) {
+		if (CHIP_IS_E1H(bp)) {
+			REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+
+			REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+			REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+		}
+		bnx2x_init_block(bp, BLOCK_HC, init_phase);
+
+	} else {
+		int num_segs, sb_idx, prod_offset;
+
+		REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
+
+		if (!CHIP_IS_E1x(bp)) {
+			REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
+			REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
+		}
+
+		bnx2x_init_block(bp, BLOCK_IGU, init_phase);
+
+		if (!CHIP_IS_E1x(bp)) {
+			int dsb_idx = 0;
+			/**
+			 * Producer memory:
+			 * E2 mode: address 0-135 match to the mapping memory;
+			 * 136 - PF0 default prod; 137 - PF1 default prod;
+			 * 138 - PF2 default prod; 139 - PF3 default prod;
+			 * 140 - PF0 attn prod;    141 - PF1 attn prod;
+			 * 142 - PF2 attn prod;    143 - PF3 attn prod;
+			 * 144-147 reserved.
+			 *
+			 * E1.5 mode - In backward compatible mode;
+			 * for non default SB; each even line in the memory
+			 * holds the U producer and each odd line hold
+			 * the C producer. The first 128 producers are for
+			 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
+			 * producers are for the DSB for each PF.
+			 * Each PF has five segments: (the order inside each
+			 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
+			 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
+			 * 144-147 attn prods;
+			 */
+			/* non-default-status-blocks */
+			num_segs = CHIP_INT_MODE_IS_BC(bp) ?
+				IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
+			for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
+				prod_offset = (bp->igu_base_sb + sb_idx) *
+					num_segs;
+
+				for (i = 0; i < num_segs; i++) {
+					addr = IGU_REG_PROD_CONS_MEMORY +
+							(prod_offset + i) * 4;
+					REG_WR(bp, addr, 0);
+				}
+				/* send consumer update with value 0 */
+				bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
+					     USTORM_ID, 0, IGU_INT_NOP, 1);
+				bnx2x_igu_clear_sb(bp,
+						   bp->igu_base_sb + sb_idx);
+			}
+
+			/* default-status-blocks */
+			num_segs = CHIP_INT_MODE_IS_BC(bp) ?
+				IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
+
+			if (CHIP_MODE_IS_4_PORT(bp))
+				dsb_idx = BP_FUNC(bp);
+			else
+				dsb_idx = BP_E1HVN(bp);
+
+			prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
+				       IGU_BC_BASE_DSB_PROD + dsb_idx :
+				       IGU_NORM_BASE_DSB_PROD + dsb_idx);
+
+			for (i = 0; i < (num_segs * E1HVN_MAX);
+			     i += E1HVN_MAX) {
+				addr = IGU_REG_PROD_CONS_MEMORY +
+							(prod_offset + i)*4;
+				REG_WR(bp, addr, 0);
+			}
+			/* send consumer update with 0 */
+			if (CHIP_INT_MODE_IS_BC(bp)) {
+				bnx2x_ack_sb(bp, bp->igu_dsb_id,
+					     USTORM_ID, 0, IGU_INT_NOP, 1);
+				bnx2x_ack_sb(bp, bp->igu_dsb_id,
+					     CSTORM_ID, 0, IGU_INT_NOP, 1);
+				bnx2x_ack_sb(bp, bp->igu_dsb_id,
+					     XSTORM_ID, 0, IGU_INT_NOP, 1);
+				bnx2x_ack_sb(bp, bp->igu_dsb_id,
+					     TSTORM_ID, 0, IGU_INT_NOP, 1);
+				bnx2x_ack_sb(bp, bp->igu_dsb_id,
+					     ATTENTION_ID, 0, IGU_INT_NOP, 1);
+			} else {
+				bnx2x_ack_sb(bp, bp->igu_dsb_id,
+					     USTORM_ID, 0, IGU_INT_NOP, 1);
+				bnx2x_ack_sb(bp, bp->igu_dsb_id,
+					     ATTENTION_ID, 0, IGU_INT_NOP, 1);
+			}
+			bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
+
+			/* !!! these should become driver const once
+			   rf-tool supports split-68 const */
+			REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
+			REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
+			REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
+			REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
+			REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
+			REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
+		}
+	}
+
+	/* Reset PCIE errors for debug */
+	REG_WR(bp, 0x2114, 0xffffffff);
+	REG_WR(bp, 0x2120, 0xffffffff);
+
+	if (CHIP_IS_E1x(bp)) {
+		main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
+		main_mem_base = HC_REG_MAIN_MEMORY +
+				BP_PORT(bp) * (main_mem_size * 4);
+		main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
+		main_mem_width = 8;
+
+		val = REG_RD(bp, main_mem_prty_clr);
+		if (val)
+			DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
+					  "block during "
+					  "function init (0x%x)!\n", val);
+
+		/* Clear "false" parity errors in MSI-X table */
+		for (i = main_mem_base;
+		     i < main_mem_base + main_mem_size * 4;
+		     i += main_mem_width) {
+			bnx2x_read_dmae(bp, i, main_mem_width / 4);
+			bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
+					 i, main_mem_width / 4);
+		}
+		/* Clear HC parity attention */
+		REG_RD(bp, main_mem_prty_clr);
+	}
+
+#ifdef BNX2X_STOP_ON_ERROR
+	/* Enable STORMs SP logging */
+	REG_WR8(bp, BAR_USTRORM_INTMEM +
+	       USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+	REG_WR8(bp, BAR_TSTRORM_INTMEM +
+	       TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+	REG_WR8(bp, BAR_CSTRORM_INTMEM +
+	       CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+	REG_WR8(bp, BAR_XSTRORM_INTMEM +
+	       XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
+#endif
+
+	bnx2x_phy_probe(&bp->link_params);
+
+	return 0;
+}
+
+
+void bnx2x_free_mem(struct bnx2x *bp)
+{
+	/* fastpath */
+	bnx2x_free_fp_mem(bp);
+	/* end of fastpath */
+
+	BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
+		       sizeof(struct host_sp_status_block));
+
+	BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+		       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+	BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
+		       sizeof(struct bnx2x_slowpath));
+
+	BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
+		       bp->context.size);
+
+	bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
+
+	BNX2X_FREE(bp->ilt->lines);
+
+#ifdef BCM_CNIC
+	if (!CHIP_IS_E1x(bp))
+		BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
+			       sizeof(struct host_hc_status_block_e2));
+	else
+		BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
+			       sizeof(struct host_hc_status_block_e1x));
+
+	BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
+#endif
+
+	BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
+
+	BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
+		       BCM_PAGE_SIZE * NUM_EQ_PAGES);
+}
+
+static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
+{
+	int num_groups;
+
+	/* number of eth_queues */
+	u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+
+	/* Total number of FW statistics requests =
+	 * 1 for port stats + 1 for PF stats + num_eth_queues */
+	bp->fw_stats_num = 2 + num_queue_stats;
+
+
+	/* Request is built from stats_query_header and an array of
+	 * stats_query_cmd_group each of which contains
+	 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
+	 * configured in the stats_query_header.
+	 */
+	num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
+		(((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+
+	bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
+			num_groups * sizeof(struct stats_query_cmd_group);
+
+	/* Data for statistics requests + stats_conter
+	 *
+	 * stats_counter holds per-STORM counters that are incremented
+	 * when STORM has finished with the current request.
+	 */
+	bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
+		sizeof(struct per_pf_stats) +
+		sizeof(struct per_queue_stats) * num_queue_stats +
+		sizeof(struct stats_counter);
+
+	BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
+			bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+
+	/* Set shortcuts */
+	bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
+	bp->fw_stats_req_mapping = bp->fw_stats_mapping;
+
+	bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
+		((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
+
+	bp->fw_stats_data_mapping = bp->fw_stats_mapping +
+				   bp->fw_stats_req_sz;
+	return 0;
+
+alloc_mem_err:
+	BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
+		       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+	return -ENOMEM;
+}
+
+
+int bnx2x_alloc_mem(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+	if (!CHIP_IS_E1x(bp))
+		/* size = the status block + ramrod buffers */
+		BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
+				sizeof(struct host_hc_status_block_e2));
+	else
+		BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
+				sizeof(struct host_hc_status_block_e1x));
+
+	/* allocate searcher T2 table */
+	BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+#endif
+
+
+	BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
+			sizeof(struct host_sp_status_block));
+
+	BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
+			sizeof(struct bnx2x_slowpath));
+
+	/* Allocated memory for FW statistics  */
+	if (bnx2x_alloc_fw_stats_mem(bp))
+		goto alloc_mem_err;
+
+	bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
+
+	BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
+			bp->context.size);
+
+	BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
+
+	if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
+		goto alloc_mem_err;
+
+	/* Slow path ring */
+	BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
+
+	/* EQ */
+	BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
+			BCM_PAGE_SIZE * NUM_EQ_PAGES);
+
+
+	/* fastpath */
+	/* need to be done at the end, since it's self adjusting to amount
+	 * of memory available for RSS queues
+	 */
+	if (bnx2x_alloc_fp_mem(bp))
+		goto alloc_mem_err;
+	return 0;
+
+alloc_mem_err:
+	bnx2x_free_mem(bp);
+	return -ENOMEM;
+}
+
+/*
+ * Init service functions
+ */
+
+int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+		      struct bnx2x_vlan_mac_obj *obj, bool set,
+		      int mac_type, unsigned long *ramrod_flags)
+{
+	int rc;
+	struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+
+	memset(&ramrod_param, 0, sizeof(ramrod_param));
+
+	/* Fill general parameters */
+	ramrod_param.vlan_mac_obj = obj;
+	ramrod_param.ramrod_flags = *ramrod_flags;
+
+	/* Fill a user request section if needed */
+	if (!test_bit(RAMROD_CONT, ramrod_flags)) {
+		memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
+
+		__set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
+
+		/* Set the command: ADD or DEL */
+		if (set)
+			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+		else
+			ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+	}
+
+	rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+	if (rc < 0)
+		BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
+	return rc;
+}
+
+int bnx2x_del_all_macs(struct bnx2x *bp,
+		       struct bnx2x_vlan_mac_obj *mac_obj,
+		       int mac_type, bool wait_for_comp)
+{
+	int rc;
+	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+
+	/* Wait for completion of requested */
+	if (wait_for_comp)
+		__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+
+	/* Set the mac type of addresses we want to clear */
+	__set_bit(mac_type, &vlan_mac_flags);
+
+	rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
+	if (rc < 0)
+		BNX2X_ERR("Failed to delete MACs: %d\n", rc);
+
+	return rc;
+}
+
+int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
+{
+	unsigned long ramrod_flags = 0;
+
+	DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
+
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	/* Eth MAC is set on RSS leading client (fp[0]) */
+	return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set,
+				 BNX2X_ETH_MAC, &ramrod_flags);
+}
+
+int bnx2x_setup_leading(struct bnx2x *bp)
+{
+	return bnx2x_setup_queue(bp, &bp->fp[0], 1);
+}
+
+/**
+ * bnx2x_set_int_mode - configure interrupt mode
+ *
+ * @bp:		driver handle
+ *
+ * In case of MSI-X it will also try to enable MSI-X.
+ */
+static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
+{
+	switch (int_mode) {
+	case INT_MODE_MSI:
+		bnx2x_enable_msi(bp);
+		/* falling through... */
+	case INT_MODE_INTx:
+		bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+		DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
+		break;
+	default:
+		/* Set number of queues according to bp->multi_mode value */
+		bnx2x_set_num_queues(bp);
+
+		DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
+		   bp->num_queues);
+
+		/* if we can't use MSI-X we only need one fp,
+		 * so try to enable MSI-X with the requested number of fp's
+		 * and fallback to MSI or legacy INTx with one fp
+		 */
+		if (bnx2x_enable_msix(bp)) {
+			/* failed to enable MSI-X */
+			if (bp->multi_mode)
+				DP(NETIF_MSG_IFUP,
+					  "Multi requested but failed to "
+					  "enable MSI-X (%d), "
+					  "set number of queues to %d\n",
+				   bp->num_queues,
+				   1 + NON_ETH_CONTEXT_USE);
+			bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
+
+			/* Try to enable MSI */
+			if (!(bp->flags & DISABLE_MSI_FLAG))
+				bnx2x_enable_msi(bp);
+		}
+		break;
+	}
+}
+
+/* must be called prioir to any HW initializations */
+static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
+{
+	return L2_ILT_LINES(bp);
+}
+
+void bnx2x_ilt_set_info(struct bnx2x *bp)
+{
+	struct ilt_client_info *ilt_client;
+	struct bnx2x_ilt *ilt = BP_ILT(bp);
+	u16 line = 0;
+
+	ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
+	DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
+
+	/* CDU */
+	ilt_client = &ilt->clients[ILT_CLIENT_CDU];
+	ilt_client->client_num = ILT_CLIENT_CDU;
+	ilt_client->page_size = CDU_ILT_PAGE_SZ;
+	ilt_client->flags = ILT_CLIENT_SKIP_MEM;
+	ilt_client->start = line;
+	line += bnx2x_cid_ilt_lines(bp);
+#ifdef BCM_CNIC
+	line += CNIC_ILT_LINES;
+#endif
+	ilt_client->end = line - 1;
+
+	DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
+					 "flags 0x%x, hw psz %d\n",
+	   ilt_client->start,
+	   ilt_client->end,
+	   ilt_client->page_size,
+	   ilt_client->flags,
+	   ilog2(ilt_client->page_size >> 12));
+
+	/* QM */
+	if (QM_INIT(bp->qm_cid_count)) {
+		ilt_client = &ilt->clients[ILT_CLIENT_QM];
+		ilt_client->client_num = ILT_CLIENT_QM;
+		ilt_client->page_size = QM_ILT_PAGE_SZ;
+		ilt_client->flags = 0;
+		ilt_client->start = line;
+
+		/* 4 bytes for each cid */
+		line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
+							 QM_ILT_PAGE_SZ);
+
+		ilt_client->end = line - 1;
+
+		DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
+						 "flags 0x%x, hw psz %d\n",
+		   ilt_client->start,
+		   ilt_client->end,
+		   ilt_client->page_size,
+		   ilt_client->flags,
+		   ilog2(ilt_client->page_size >> 12));
+
+	}
+	/* SRC */
+	ilt_client = &ilt->clients[ILT_CLIENT_SRC];
+#ifdef BCM_CNIC
+	ilt_client->client_num = ILT_CLIENT_SRC;
+	ilt_client->page_size = SRC_ILT_PAGE_SZ;
+	ilt_client->flags = 0;
+	ilt_client->start = line;
+	line += SRC_ILT_LINES;
+	ilt_client->end = line - 1;
+
+	DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
+					 "flags 0x%x, hw psz %d\n",
+	   ilt_client->start,
+	   ilt_client->end,
+	   ilt_client->page_size,
+	   ilt_client->flags,
+	   ilog2(ilt_client->page_size >> 12));
+
+#else
+	ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
+#endif
+
+	/* TM */
+	ilt_client = &ilt->clients[ILT_CLIENT_TM];
+#ifdef BCM_CNIC
+	ilt_client->client_num = ILT_CLIENT_TM;
+	ilt_client->page_size = TM_ILT_PAGE_SZ;
+	ilt_client->flags = 0;
+	ilt_client->start = line;
+	line += TM_ILT_LINES;
+	ilt_client->end = line - 1;
+
+	DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
+					 "flags 0x%x, hw psz %d\n",
+	   ilt_client->start,
+	   ilt_client->end,
+	   ilt_client->page_size,
+	   ilt_client->flags,
+	   ilog2(ilt_client->page_size >> 12));
+
+#else
+	ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
+#endif
+	BUG_ON(line > ILT_MAX_LINES);
+}
+
+/**
+ * bnx2x_pf_q_prep_init - prepare INIT transition parameters
+ *
+ * @bp:			driver handle
+ * @fp:			pointer to fastpath
+ * @init_params:	pointer to parameters structure
+ *
+ * parameters configured:
+ *      - HC configuration
+ *      - Queue's CDU context
+ */
+static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
+	struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
+{
+
+	u8 cos;
+	/* FCoE Queue uses Default SB, thus has no HC capabilities */
+	if (!IS_FCOE_FP(fp)) {
+		__set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
+		__set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
+
+		/* If HC is supporterd, enable host coalescing in the transition
+		 * to INIT state.
+		 */
+		__set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
+		__set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
+
+		/* HC rate */
+		init_params->rx.hc_rate = bp->rx_ticks ?
+			(1000000 / bp->rx_ticks) : 0;
+		init_params->tx.hc_rate = bp->tx_ticks ?
+			(1000000 / bp->tx_ticks) : 0;
+
+		/* FW SB ID */
+		init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
+			fp->fw_sb_id;
+
+		/*
+		 * CQ index among the SB indices: FCoE clients uses the default
+		 * SB, therefore it's different.
+		 */
+		init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
+		init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
+	}
+
+	/* set maximum number of COSs supported by this queue */
+	init_params->max_cos = fp->max_cos;
+
+	DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d",
+	    fp->index, init_params->max_cos);
+
+	/* set the context pointers queue object */
+	for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++)
+		init_params->cxts[cos] =
+			&bp->context.vcxt[fp->txdata[cos].cid].eth;
+}
+
+int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+			struct bnx2x_queue_state_params *q_params,
+			struct bnx2x_queue_setup_tx_only_params *tx_only_params,
+			int tx_index, bool leading)
+{
+	memset(tx_only_params, 0, sizeof(*tx_only_params));
+
+	/* Set the command */
+	q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
+
+	/* Set tx-only QUEUE flags: don't zero statistics */
+	tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
+
+	/* choose the index of the cid to send the slow path on */
+	tx_only_params->cid_index = tx_index;
+
+	/* Set general TX_ONLY_SETUP parameters */
+	bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
+
+	/* Set Tx TX_ONLY_SETUP parameters */
+	bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
+
+	DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:"
+			 "cos %d, primary cid %d, cid %d, "
+			 "client id %d, sp-client id %d, flags %lx",
+	   tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
+	   q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
+	   tx_only_params->gen_params.spcl_id, tx_only_params->flags);
+
+	/* send the ramrod */
+	return bnx2x_queue_state_change(bp, q_params);
+}
+
+
+/**
+ * bnx2x_setup_queue - setup queue
+ *
+ * @bp:		driver handle
+ * @fp:		pointer to fastpath
+ * @leading:	is leading
+ *
+ * This function performs 2 steps in a Queue state machine
+ *      actually: 1) RESET->INIT 2) INIT->SETUP
+ */
+
+int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+		       bool leading)
+{
+	struct bnx2x_queue_state_params q_params = {0};
+	struct bnx2x_queue_setup_params *setup_params =
+						&q_params.params.setup;
+	struct bnx2x_queue_setup_tx_only_params *tx_only_params =
+						&q_params.params.tx_only;
+	int rc;
+	u8 tx_index;
+
+	DP(BNX2X_MSG_SP, "setting up queue %d", fp->index);
+
+	/* reset IGU state skip FCoE L2 queue */
+	if (!IS_FCOE_FP(fp))
+		bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
+			     IGU_INT_ENABLE, 0);
+
+	q_params.q_obj = &fp->q_obj;
+	/* We want to wait for completion in this context */
+	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+	/* Prepare the INIT parameters */
+	bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
+
+	/* Set the command */
+	q_params.cmd = BNX2X_Q_CMD_INIT;
+
+	/* Change the state to INIT */
+	rc = bnx2x_queue_state_change(bp, &q_params);
+	if (rc) {
+		BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
+		return rc;
+	}
+
+	DP(BNX2X_MSG_SP, "init complete");
+
+
+	/* Now move the Queue to the SETUP state... */
+	memset(setup_params, 0, sizeof(*setup_params));
+
+	/* Set QUEUE flags */
+	setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
+
+	/* Set general SETUP parameters */
+	bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
+				FIRST_TX_COS_INDEX);
+
+	bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
+			    &setup_params->rxq_params);
+
+	bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
+			   FIRST_TX_COS_INDEX);
+
+	/* Set the command */
+	q_params.cmd = BNX2X_Q_CMD_SETUP;
+
+	/* Change the state to SETUP */
+	rc = bnx2x_queue_state_change(bp, &q_params);
+	if (rc) {
+		BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
+		return rc;
+	}
+
+	/* loop through the relevant tx-only indices */
+	for (tx_index = FIRST_TX_ONLY_COS_INDEX;
+	      tx_index < fp->max_cos;
+	      tx_index++) {
+
+		/* prepare and send tx-only ramrod*/
+		rc = bnx2x_setup_tx_only(bp, fp, &q_params,
+					  tx_only_params, tx_index, leading);
+		if (rc) {
+			BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
+				  fp->index, tx_index);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int bnx2x_stop_queue(struct bnx2x *bp, int index)
+{
+	struct bnx2x_fastpath *fp = &bp->fp[index];
+	struct bnx2x_fp_txdata *txdata;
+	struct bnx2x_queue_state_params q_params = {0};
+	int rc, tx_index;
+
+	DP(BNX2X_MSG_SP, "stopping queue %d cid %d", index, fp->cid);
+
+	q_params.q_obj = &fp->q_obj;
+	/* We want to wait for completion in this context */
+	__set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+
+
+	/* close tx-only connections */
+	for (tx_index = FIRST_TX_ONLY_COS_INDEX;
+	     tx_index < fp->max_cos;
+	     tx_index++){
+
+		/* ascertain this is a normal queue*/
+		txdata = &fp->txdata[tx_index];
+
+		DP(BNX2X_MSG_SP, "stopping tx-only queue %d",
+							txdata->txq_index);
+
+		/* send halt terminate on tx-only connection */
+		q_params.cmd = BNX2X_Q_CMD_TERMINATE;
+		memset(&q_params.params.terminate, 0,
+		       sizeof(q_params.params.terminate));
+		q_params.params.terminate.cid_index = tx_index;
+
+		rc = bnx2x_queue_state_change(bp, &q_params);
+		if (rc)
+			return rc;
+
+		/* send halt terminate on tx-only connection */
+		q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
+		memset(&q_params.params.cfc_del, 0,
+		       sizeof(q_params.params.cfc_del));
+		q_params.params.cfc_del.cid_index = tx_index;
+		rc = bnx2x_queue_state_change(bp, &q_params);
+		if (rc)
+			return rc;
+	}
+	/* Stop the primary connection: */
+	/* ...halt the connection */
+	q_params.cmd = BNX2X_Q_CMD_HALT;
+	rc = bnx2x_queue_state_change(bp, &q_params);
+	if (rc)
+		return rc;
+
+	/* ...terminate the connection */
+	q_params.cmd = BNX2X_Q_CMD_TERMINATE;
+	memset(&q_params.params.terminate, 0,
+	       sizeof(q_params.params.terminate));
+	q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
+	rc = bnx2x_queue_state_change(bp, &q_params);
+	if (rc)
+		return rc;
+	/* ...delete cfc entry */
+	q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
+	memset(&q_params.params.cfc_del, 0,
+	       sizeof(q_params.params.cfc_del));
+	q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
+	return bnx2x_queue_state_change(bp, &q_params);
+}
+
+
+static void bnx2x_reset_func(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int func = BP_FUNC(bp);
+	int i;
+
+	/* Disable the function in the FW */
+	REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
+	REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
+	REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
+	REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
+
+	/* FP SBs */
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		REG_WR8(bp, BAR_CSTRORM_INTMEM +
+			   CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
+			   SB_DISABLED);
+	}
+
+#ifdef BCM_CNIC
+	/* CNIC SB */
+	REG_WR8(bp, BAR_CSTRORM_INTMEM +
+		CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(bnx2x_cnic_fw_sb_id(bp)),
+		SB_DISABLED);
+#endif
+	/* SP SB */
+	REG_WR8(bp, BAR_CSTRORM_INTMEM +
+		   CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
+		   SB_DISABLED);
+
+	for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
+		REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
+		       0);
+
+	/* Configure IGU */
+	if (bp->common.int_block == INT_BLOCK_HC) {
+		REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
+		REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
+	} else {
+		REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
+		REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
+	}
+
+#ifdef BCM_CNIC
+	/* Disable Timer scan */
+	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
+	/*
+	 * Wait for at least 10ms and up to 2 second for the timers scan to
+	 * complete
+	 */
+	for (i = 0; i < 200; i++) {
+		msleep(10);
+		if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
+			break;
+	}
+#endif
+	/* Clear ILT */
+	bnx2x_clear_func_ilt(bp, func);
+
+	/* Timers workaround bug for E2: if this is vnic-3,
+	 * we need to set the entire ilt range for this timers.
+	 */
+	if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
+		struct ilt_client_info ilt_cli;
+		/* use dummy TM client */
+		memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
+		ilt_cli.start = 0;
+		ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
+		ilt_cli.client_num = ILT_CLIENT_TM;
+
+		bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
+	}
+
+	/* this assumes that reset_port() called before reset_func()*/
+	if (!CHIP_IS_E1x(bp))
+		bnx2x_pf_disable(bp);
+
+	bp->dmae_ready = 0;
+}
+
+static void bnx2x_reset_port(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 val;
+
+	/* Reset physical Link */
+	bnx2x__link_reset(bp);
+
+	REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
+
+	/* Do not rcv packets to BRB */
+	REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
+	/* Do not direct rcv packets that are not for MCP to the BRB */
+	REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+			   NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+
+	/* Configure AEU */
+	REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
+
+	msleep(100);
+	/* Check for BRB port occupancy */
+	val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
+	if (val)
+		DP(NETIF_MSG_IFDOWN,
+		   "BRB1 is not empty  %d blocks are occupied\n", val);
+
+	/* TODO: Close Doorbell port? */
+}
+
+static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
+{
+	struct bnx2x_func_state_params func_params = {0};
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_HW_RESET;
+
+	func_params.params.hw_init.load_phase = load_code;
+
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+static inline int bnx2x_func_stop(struct bnx2x *bp)
+{
+	struct bnx2x_func_state_params func_params = {0};
+	int rc;
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_STOP;
+
+	/*
+	 * Try to stop the function the 'good way'. If fails (in case
+	 * of a parity error during bnx2x_chip_cleanup()) and we are
+	 * not in a debug mode, perform a state transaction in order to
+	 * enable further HW_RESET transaction.
+	 */
+	rc = bnx2x_func_state_change(bp, &func_params);
+	if (rc) {
+#ifdef BNX2X_STOP_ON_ERROR
+		return rc;
+#else
+		BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry "
+			  "transaction\n");
+		__set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
+		return bnx2x_func_state_change(bp, &func_params);
+#endif
+	}
+
+	return 0;
+}
+
+/**
+ * bnx2x_send_unload_req - request unload mode from the MCP.
+ *
+ * @bp:			driver handle
+ * @unload_mode:	requested function's unload mode
+ *
+ * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
+ */
+u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
+{
+	u32 reset_code = 0;
+	int port = BP_PORT(bp);
+
+	/* Select the UNLOAD request mode */
+	if (unload_mode == UNLOAD_NORMAL)
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+	else if (bp->flags & NO_WOL_FLAG)
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
+
+	else if (bp->wol) {
+		u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
+		u8 *mac_addr = bp->dev->dev_addr;
+		u32 val;
+		/* The mac address is written to entries 1-4 to
+		   preserve entry 0 which is used by the PMF */
+		u8 entry = (BP_E1HVN(bp) + 1)*8;
+
+		val = (mac_addr[0] << 8) | mac_addr[1];
+		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
+
+		val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+		      (mac_addr[4] << 8) | mac_addr[5];
+		EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
+
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
+
+	} else
+		reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+	/* Send the request to the MCP */
+	if (!BP_NOMCP(bp))
+		reset_code = bnx2x_fw_command(bp, reset_code, 0);
+	else {
+		int path = BP_PATH(bp);
+
+		DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d]      "
+				     "%d, %d, %d\n",
+		   path, load_count[path][0], load_count[path][1],
+		   load_count[path][2]);
+		load_count[path][0]--;
+		load_count[path][1 + port]--;
+		DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d]  "
+				     "%d, %d, %d\n",
+		   path, load_count[path][0], load_count[path][1],
+		   load_count[path][2]);
+		if (load_count[path][0] == 0)
+			reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
+		else if (load_count[path][1 + port] == 0)
+			reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
+		else
+			reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
+	}
+
+	return reset_code;
+}
+
+/**
+ * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
+ *
+ * @bp:		driver handle
+ */
+void bnx2x_send_unload_done(struct bnx2x *bp)
+{
+	/* Report UNLOAD_DONE to MCP */
+	if (!BP_NOMCP(bp))
+		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+}
+
+static inline int bnx2x_func_wait_started(struct bnx2x *bp)
+{
+	int tout = 50;
+	int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
+
+	if (!bp->port.pmf)
+		return 0;
+
+	/*
+	 * (assumption: No Attention from MCP at this stage)
+	 * PMF probably in the middle of TXdisable/enable transaction
+	 * 1. Sync IRS for default SB
+	 * 2. Sync SP queue - this guarantes us that attention handling started
+	 * 3. Wait, that TXdisable/enable transaction completes
+	 *
+	 * 1+2 guranty that if DCBx attention was scheduled it already changed
+	 * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy
+	 * received complettion for the transaction the state is TX_STOPPED.
+	 * State will return to STARTED after completion of TX_STOPPED-->STARTED
+	 * transaction.
+	 */
+
+	/* make sure default SB ISR is done */
+	if (msix)
+		synchronize_irq(bp->msix_table[0].vector);
+	else
+		synchronize_irq(bp->pdev->irq);
+
+	flush_workqueue(bnx2x_wq);
+
+	while (bnx2x_func_get_state(bp, &bp->func_obj) !=
+				BNX2X_F_STATE_STARTED && tout--)
+		msleep(20);
+
+	if (bnx2x_func_get_state(bp, &bp->func_obj) !=
+						BNX2X_F_STATE_STARTED) {
+#ifdef BNX2X_STOP_ON_ERROR
+		return -EBUSY;
+#else
+		/*
+		 * Failed to complete the transaction in a "good way"
+		 * Force both transactions with CLR bit
+		 */
+		struct bnx2x_func_state_params func_params = {0};
+
+		DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! "
+			  "Forcing STARTED-->TX_ST0PPED-->STARTED\n");
+
+		func_params.f_obj = &bp->func_obj;
+		__set_bit(RAMROD_DRV_CLR_ONLY,
+					&func_params.ramrod_flags);
+
+		/* STARTED-->TX_ST0PPED */
+		func_params.cmd = BNX2X_F_CMD_TX_STOP;
+		bnx2x_func_state_change(bp, &func_params);
+
+		/* TX_ST0PPED-->STARTED */
+		func_params.cmd = BNX2X_F_CMD_TX_START;
+		return bnx2x_func_state_change(bp, &func_params);
+#endif
+	}
+
+	return 0;
+}
+
+void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
+{
+	int port = BP_PORT(bp);
+	int i, rc = 0;
+	u8 cos;
+	struct bnx2x_mcast_ramrod_params rparam = {0};
+	u32 reset_code;
+
+	/* Wait until tx fastpath tasks complete */
+	for_each_tx_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		for_each_cos_in_tx_queue(fp, cos)
+			rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]);
+#ifdef BNX2X_STOP_ON_ERROR
+		if (rc)
+			return;
+#endif
+	}
+
+	/* Give HW time to discard old tx messages */
+	usleep_range(1000, 1000);
+
+	/* Clean all ETH MACs */
+	rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false);
+	if (rc < 0)
+		BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
+
+	/* Clean up UC list  */
+	rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
+				true);
+	if (rc < 0)
+		BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: "
+			  "%d\n", rc);
+
+	/* Disable LLH */
+	if (!CHIP_IS_E1(bp))
+		REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
+
+	/* Set "drop all" (stop Rx).
+	 * We need to take a netif_addr_lock() here in order to prevent
+	 * a race between the completion code and this code.
+	 */
+	netif_addr_lock_bh(bp->dev);
+	/* Schedule the rx_mode command */
+	if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+		set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+	else
+		bnx2x_set_storm_rx_mode(bp);
+
+	/* Cleanup multicast configuration */
+	rparam.mcast_obj = &bp->mcast_obj;
+	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+	if (rc < 0)
+		BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
+
+	netif_addr_unlock_bh(bp->dev);
+
+
+
+	/*
+	 * Send the UNLOAD_REQUEST to the MCP. This will return if
+	 * this function should perform FUNC, PORT or COMMON HW
+	 * reset.
+	 */
+	reset_code = bnx2x_send_unload_req(bp, unload_mode);
+
+	/*
+	 * (assumption: No Attention from MCP at this stage)
+	 * PMF probably in the middle of TXdisable/enable transaction
+	 */
+	rc = bnx2x_func_wait_started(bp);
+	if (rc) {
+		BNX2X_ERR("bnx2x_func_wait_started failed\n");
+#ifdef BNX2X_STOP_ON_ERROR
+		return;
+#endif
+	}
+
+	/* Close multi and leading connections
+	 * Completions for ramrods are collected in a synchronous way
+	 */
+	for_each_queue(bp, i)
+		if (bnx2x_stop_queue(bp, i))
+#ifdef BNX2X_STOP_ON_ERROR
+			return;
+#else
+			goto unload_error;
+#endif
+	/* If SP settings didn't get completed so far - something
+	 * very wrong has happen.
+	 */
+	if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
+		BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
+
+#ifndef BNX2X_STOP_ON_ERROR
+unload_error:
+#endif
+	rc = bnx2x_func_stop(bp);
+	if (rc) {
+		BNX2X_ERR("Function stop failed!\n");
+#ifdef BNX2X_STOP_ON_ERROR
+		return;
+#endif
+	}
+
+	/* Disable HW interrupts, NAPI */
+	bnx2x_netif_stop(bp, 1);
+
+	/* Release IRQs */
+	bnx2x_free_irq(bp);
+
+	/* Reset the chip */
+	rc = bnx2x_reset_hw(bp, reset_code);
+	if (rc)
+		BNX2X_ERR("HW_RESET failed\n");
+
+
+	/* Report UNLOAD_DONE to MCP */
+	bnx2x_send_unload_done(bp);
+}
+
+void bnx2x_disable_close_the_gate(struct bnx2x *bp)
+{
+	u32 val;
+
+	DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
+
+	if (CHIP_IS_E1(bp)) {
+		int port = BP_PORT(bp);
+		u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+			MISC_REG_AEU_MASK_ATTN_FUNC_0;
+
+		val = REG_RD(bp, addr);
+		val &= ~(0x300);
+		REG_WR(bp, addr, val);
+	} else {
+		val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
+		val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
+			 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
+		REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
+	}
+}
+
+/* Close gates #2, #3 and #4: */
+static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
+{
+	u32 val;
+
+	/* Gates #2 and #4a are closed/opened for "not E1" only */
+	if (!CHIP_IS_E1(bp)) {
+		/* #4 */
+		REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
+		/* #2 */
+		REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
+	}
+
+	/* #3 */
+	if (CHIP_IS_E1x(bp)) {
+		/* Prevent interrupts from HC on both ports */
+		val = REG_RD(bp, HC_REG_CONFIG_1);
+		REG_WR(bp, HC_REG_CONFIG_1,
+		       (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
+		       (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
+
+		val = REG_RD(bp, HC_REG_CONFIG_0);
+		REG_WR(bp, HC_REG_CONFIG_0,
+		       (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
+		       (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
+	} else {
+		/* Prevent incomming interrupts in IGU */
+		val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
+		REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
+		       (!close) ?
+		       (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
+		       (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
+	}
+
+	DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
+		close ? "closing" : "opening");
+	mmiowb();
+}
+
+#define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
+
+static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
+{
+	/* Do some magic... */
+	u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+	*magic_val = val & SHARED_MF_CLP_MAGIC;
+	MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
+}
+
+/**
+ * bnx2x_clp_reset_done - restore the value of the `magic' bit.
+ *
+ * @bp:		driver handle
+ * @magic_val:	old value of the `magic' bit.
+ */
+static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
+{
+	/* Restore the `magic' bit value... */
+	u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
+	MF_CFG_WR(bp, shared_mf_config.clp_mb,
+		(val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
+}
+
+/**
+ * bnx2x_reset_mcp_prep - prepare for MCP reset.
+ *
+ * @bp:		driver handle
+ * @magic_val:	old value of 'magic' bit.
+ *
+ * Takes care of CLP configurations.
+ */
+static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
+{
+	u32 shmem;
+	u32 validity_offset;
+
+	DP(NETIF_MSG_HW, "Starting\n");
+
+	/* Set `magic' bit in order to save MF config */
+	if (!CHIP_IS_E1(bp))
+		bnx2x_clp_reset_prep(bp, magic_val);
+
+	/* Get shmem offset */
+	shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+	validity_offset = offsetof(struct shmem_region, validity_map[0]);
+
+	/* Clear validity map flags */
+	if (shmem > 0)
+		REG_WR(bp, shmem + validity_offset, 0);
+}
+
+#define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
+#define MCP_ONE_TIMEOUT  100    /* 100 ms */
+
+/**
+ * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
+ *
+ * @bp:	driver handle
+ */
+static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
+{
+	/* special handling for emulation and FPGA,
+	   wait 10 times longer */
+	if (CHIP_REV_IS_SLOW(bp))
+		msleep(MCP_ONE_TIMEOUT*10);
+	else
+		msleep(MCP_ONE_TIMEOUT);
+}
+
+/*
+ * initializes bp->common.shmem_base and waits for validity signature to appear
+ */
+static int bnx2x_init_shmem(struct bnx2x *bp)
+{
+	int cnt = 0;
+	u32 val = 0;
+
+	do {
+		bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+		if (bp->common.shmem_base) {
+			val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
+			if (val & SHR_MEM_VALIDITY_MB)
+				return 0;
+		}
+
+		bnx2x_mcp_wait_one(bp);
+
+	} while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
+
+	BNX2X_ERR("BAD MCP validity signature\n");
+
+	return -ENODEV;
+}
+
+static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
+{
+	int rc = bnx2x_init_shmem(bp);
+
+	/* Restore the `magic' bit value */
+	if (!CHIP_IS_E1(bp))
+		bnx2x_clp_reset_done(bp, magic_val);
+
+	return rc;
+}
+
+static void bnx2x_pxp_prep(struct bnx2x *bp)
+{
+	if (!CHIP_IS_E1(bp)) {
+		REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
+		REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
+		mmiowb();
+	}
+}
+
+/*
+ * Reset the whole chip except for:
+ *      - PCIE core
+ *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
+ *              one reset bit)
+ *      - IGU
+ *      - MISC (including AEU)
+ *      - GRC
+ *      - RBCN, RBCP
+ */
+static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
+{
+	u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
+	u32 global_bits2, stay_reset2;
+
+	/*
+	 * Bits that have to be set in reset_mask2 if we want to reset 'global'
+	 * (per chip) blocks.
+	 */
+	global_bits2 =
+		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
+		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
+
+	/* Don't reset the following blocks */
+	not_reset_mask1 =
+		MISC_REGISTERS_RESET_REG_1_RST_HC |
+		MISC_REGISTERS_RESET_REG_1_RST_PXPV |
+		MISC_REGISTERS_RESET_REG_1_RST_PXP;
+
+	not_reset_mask2 =
+		MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
+		MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
+		MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
+		MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
+		MISC_REGISTERS_RESET_REG_2_RST_RBCN |
+		MISC_REGISTERS_RESET_REG_2_RST_GRC  |
+		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
+		MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
+		MISC_REGISTERS_RESET_REG_2_RST_ATC |
+		MISC_REGISTERS_RESET_REG_2_PGLC;
+
+	/*
+	 * Keep the following blocks in reset:
+	 *  - all xxMACs are handled by the bnx2x_link code.
+	 */
+	stay_reset2 =
+		MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
+		MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
+		MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
+		MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
+		MISC_REGISTERS_RESET_REG_2_UMAC0 |
+		MISC_REGISTERS_RESET_REG_2_UMAC1 |
+		MISC_REGISTERS_RESET_REG_2_XMAC |
+		MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
+
+	/* Full reset masks according to the chip */
+	reset_mask1 = 0xffffffff;
+
+	if (CHIP_IS_E1(bp))
+		reset_mask2 = 0xffff;
+	else if (CHIP_IS_E1H(bp))
+		reset_mask2 = 0x1ffff;
+	else if (CHIP_IS_E2(bp))
+		reset_mask2 = 0xfffff;
+	else /* CHIP_IS_E3 */
+		reset_mask2 = 0x3ffffff;
+
+	/* Don't reset global blocks unless we need to */
+	if (!global)
+		reset_mask2 &= ~global_bits2;
+
+	/*
+	 * In case of attention in the QM, we need to reset PXP
+	 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
+	 * because otherwise QM reset would release 'close the gates' shortly
+	 * before resetting the PXP, then the PSWRQ would send a write
+	 * request to PGLUE. Then when PXP is reset, PGLUE would try to
+	 * read the payload data from PSWWR, but PSWWR would not
+	 * respond. The write queue in PGLUE would stuck, dmae commands
+	 * would not return. Therefore it's important to reset the second
+	 * reset register (containing the
+	 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
+	 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
+	 * bit).
+	 */
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+	       reset_mask2 & (~not_reset_mask2));
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+	       reset_mask1 & (~not_reset_mask1));
+
+	barrier();
+	mmiowb();
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
+	       reset_mask2 & (~stay_reset2));
+
+	barrier();
+	mmiowb();
+
+	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
+	mmiowb();
+}
+
+/**
+ * bnx2x_er_poll_igu_vq - poll for pending writes bit.
+ * It should get cleared in no more than 1s.
+ *
+ * @bp:	driver handle
+ *
+ * It should get cleared in no more than 1s. Returns 0 if
+ * pending writes bit gets cleared.
+ */
+static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
+{
+	u32 cnt = 1000;
+	u32 pend_bits = 0;
+
+	do {
+		pend_bits  = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
+
+		if (pend_bits == 0)
+			break;
+
+		usleep_range(1000, 1000);
+	} while (cnt-- > 0);
+
+	if (cnt <= 0) {
+		BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
+			  pend_bits);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int bnx2x_process_kill(struct bnx2x *bp, bool global)
+{
+	int cnt = 1000;
+	u32 val = 0;
+	u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
+
+
+	/* Empty the Tetris buffer, wait for 1s */
+	do {
+		sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
+		blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
+		port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
+		port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
+		pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
+		if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
+		    ((port_is_idle_0 & 0x1) == 0x1) &&
+		    ((port_is_idle_1 & 0x1) == 0x1) &&
+		    (pgl_exp_rom2 == 0xffffffff))
+			break;
+		usleep_range(1000, 1000);
+	} while (cnt-- > 0);
+
+	if (cnt <= 0) {
+		DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
+			  " are still"
+			  " outstanding read requests after 1s!\n");
+		DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
+			  " port_is_idle_0=0x%08x,"
+			  " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
+			  sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
+			  pgl_exp_rom2);
+		return -EAGAIN;
+	}
+
+	barrier();
+
+	/* Close gates #2, #3 and #4 */
+	bnx2x_set_234_gates(bp, true);
+
+	/* Poll for IGU VQs for 57712 and newer chips */
+	if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
+		return -EAGAIN;
+
+
+	/* TBD: Indicate that "process kill" is in progress to MCP */
+
+	/* Clear "unprepared" bit */
+	REG_WR(bp, MISC_REG_UNPREPARED, 0);
+	barrier();
+
+	/* Make sure all is written to the chip before the reset */
+	mmiowb();
+
+	/* Wait for 1ms to empty GLUE and PCI-E core queues,
+	 * PSWHST, GRC and PSWRD Tetris buffer.
+	 */
+	usleep_range(1000, 1000);
+
+	/* Prepare to chip reset: */
+	/* MCP */
+	if (global)
+		bnx2x_reset_mcp_prep(bp, &val);
+
+	/* PXP */
+	bnx2x_pxp_prep(bp);
+	barrier();
+
+	/* reset the chip */
+	bnx2x_process_kill_chip_reset(bp, global);
+	barrier();
+
+	/* Recover after reset: */
+	/* MCP */
+	if (global && bnx2x_reset_mcp_comp(bp, val))
+		return -EAGAIN;
+
+	/* TBD: Add resetting the NO_MCP mode DB here */
+
+	/* PXP */
+	bnx2x_pxp_prep(bp);
+
+	/* Open the gates #2, #3 and #4 */
+	bnx2x_set_234_gates(bp, false);
+
+	/* TBD: IGU/AEU preparation bring back the AEU/IGU to a
+	 * reset state, re-enable attentions. */
+
+	return 0;
+}
+
+int bnx2x_leader_reset(struct bnx2x *bp)
+{
+	int rc = 0;
+	bool global = bnx2x_reset_is_global(bp);
+
+	/* Try to recover after the failure */
+	if (bnx2x_process_kill(bp, global)) {
+		netdev_err(bp->dev, "Something bad had happen on engine %d! "
+				    "Aii!\n", BP_PATH(bp));
+		rc = -EAGAIN;
+		goto exit_leader_reset;
+	}
+
+	/*
+	 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
+	 * state.
+	 */
+	bnx2x_set_reset_done(bp);
+	if (global)
+		bnx2x_clear_reset_global(bp);
+
+exit_leader_reset:
+	bp->is_leader = 0;
+	bnx2x_release_leader_lock(bp);
+	smp_mb();
+	return rc;
+}
+
+static inline void bnx2x_recovery_failed(struct bnx2x *bp)
+{
+	netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
+
+	/* Disconnect this device */
+	netif_device_detach(bp->dev);
+
+	/*
+	 * Block ifup for all function on this engine until "process kill"
+	 * or power cycle.
+	 */
+	bnx2x_set_reset_in_progress(bp);
+
+	/* Shut down the power */
+	bnx2x_set_power_state(bp, PCI_D3hot);
+
+	bp->recovery_state = BNX2X_RECOVERY_FAILED;
+
+	smp_mb();
+}
+
+/*
+ * Assumption: runs under rtnl lock. This together with the fact
+ * that it's called only from bnx2x_sp_rtnl() ensure that it
+ * will never be called when netif_running(bp->dev) is false.
+ */
+static void bnx2x_parity_recover(struct bnx2x *bp)
+{
+	bool global = false;
+
+	DP(NETIF_MSG_HW, "Handling parity\n");
+	while (1) {
+		switch (bp->recovery_state) {
+		case BNX2X_RECOVERY_INIT:
+			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
+			bnx2x_chk_parity_attn(bp, &global, false);
+
+			/* Try to get a LEADER_LOCK HW lock */
+			if (bnx2x_trylock_leader_lock(bp)) {
+				bnx2x_set_reset_in_progress(bp);
+				/*
+				 * Check if there is a global attention and if
+				 * there was a global attention, set the global
+				 * reset bit.
+				 */
+
+				if (global)
+					bnx2x_set_reset_global(bp);
+
+				bp->is_leader = 1;
+			}
+
+			/* Stop the driver */
+			/* If interface has been removed - break */
+			if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
+				return;
+
+			bp->recovery_state = BNX2X_RECOVERY_WAIT;
+
+			/*
+			 * Reset MCP command sequence number and MCP mail box
+			 * sequence as we are going to reset the MCP.
+			 */
+			if (global) {
+				bp->fw_seq = 0;
+				bp->fw_drv_pulse_wr_seq = 0;
+			}
+
+			/* Ensure "is_leader", MCP command sequence and
+			 * "recovery_state" update values are seen on other
+			 * CPUs.
+			 */
+			smp_mb();
+			break;
+
+		case BNX2X_RECOVERY_WAIT:
+			DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
+			if (bp->is_leader) {
+				int other_engine = BP_PATH(bp) ? 0 : 1;
+				u32 other_load_counter =
+					bnx2x_get_load_cnt(bp, other_engine);
+				u32 load_counter =
+					bnx2x_get_load_cnt(bp, BP_PATH(bp));
+				global = bnx2x_reset_is_global(bp);
+
+				/*
+				 * In case of a parity in a global block, let
+				 * the first leader that performs a
+				 * leader_reset() reset the global blocks in
+				 * order to clear global attentions. Otherwise
+				 * the the gates will remain closed for that
+				 * engine.
+				 */
+				if (load_counter ||
+				    (global && other_load_counter)) {
+					/* Wait until all other functions get
+					 * down.
+					 */
+					schedule_delayed_work(&bp->sp_rtnl_task,
+								HZ/10);
+					return;
+				} else {
+					/* If all other functions got down -
+					 * try to bring the chip back to
+					 * normal. In any case it's an exit
+					 * point for a leader.
+					 */
+					if (bnx2x_leader_reset(bp)) {
+						bnx2x_recovery_failed(bp);
+						return;
+					}
+
+					/* If we are here, means that the
+					 * leader has succeeded and doesn't
+					 * want to be a leader any more. Try
+					 * to continue as a none-leader.
+					 */
+					break;
+				}
+			} else { /* non-leader */
+				if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
+					/* Try to get a LEADER_LOCK HW lock as
+					 * long as a former leader may have
+					 * been unloaded by the user or
+					 * released a leadership by another
+					 * reason.
+					 */
+					if (bnx2x_trylock_leader_lock(bp)) {
+						/* I'm a leader now! Restart a
+						 * switch case.
+						 */
+						bp->is_leader = 1;
+						break;
+					}
+
+					schedule_delayed_work(&bp->sp_rtnl_task,
+								HZ/10);
+					return;
+
+				} else {
+					/*
+					 * If there was a global attention, wait
+					 * for it to be cleared.
+					 */
+					if (bnx2x_reset_is_global(bp)) {
+						schedule_delayed_work(
+							&bp->sp_rtnl_task,
+							HZ/10);
+						return;
+					}
+
+					if (bnx2x_nic_load(bp, LOAD_NORMAL))
+						bnx2x_recovery_failed(bp);
+					else {
+						bp->recovery_state =
+							BNX2X_RECOVERY_DONE;
+						smp_mb();
+					}
+
+					return;
+				}
+			}
+		default:
+			return;
+		}
+	}
+}
+
+/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
+ * scheduled on a general queue in order to prevent a dead lock.
+ */
+static void bnx2x_sp_rtnl_task(struct work_struct *work)
+{
+	struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
+
+	rtnl_lock();
+
+	if (!netif_running(bp->dev))
+		goto sp_rtnl_exit;
+
+	/* if stop on error is defined no recovery flows should be executed */
+#ifdef BNX2X_STOP_ON_ERROR
+	BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined "
+		  "so reset not done to allow debug dump,\n"
+		  "you will need to reboot when done\n");
+	goto sp_rtnl_not_reset;
+#endif
+
+	if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
+		/*
+		 * Clear all pending SP commands as we are going to reset the
+		 * function anyway.
+		 */
+		bp->sp_rtnl_state = 0;
+		smp_mb();
+
+		bnx2x_parity_recover(bp);
+
+		goto sp_rtnl_exit;
+	}
+
+	if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
+		/*
+		 * Clear all pending SP commands as we are going to reset the
+		 * function anyway.
+		 */
+		bp->sp_rtnl_state = 0;
+		smp_mb();
+
+		bnx2x_nic_unload(bp, UNLOAD_NORMAL);
+		bnx2x_nic_load(bp, LOAD_NORMAL);
+
+		goto sp_rtnl_exit;
+	}
+#ifdef BNX2X_STOP_ON_ERROR
+sp_rtnl_not_reset:
+#endif
+	if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
+		bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
+
+sp_rtnl_exit:
+	rtnl_unlock();
+}
+
+/* end of nic load/unload */
+
+static void bnx2x_period_task(struct work_struct *work)
+{
+	struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
+
+	if (!netif_running(bp->dev))
+		goto period_task_exit;
+
+	if (CHIP_REV_IS_SLOW(bp)) {
+		BNX2X_ERR("period task called on emulation, ignoring\n");
+		goto period_task_exit;
+	}
+
+	bnx2x_acquire_phy_lock(bp);
+	/*
+	 * The barrier is needed to ensure the ordering between the writing to
+	 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
+	 * the reading here.
+	 */
+	smp_mb();
+	if (bp->port.pmf) {
+		bnx2x_period_func(&bp->link_params, &bp->link_vars);
+
+		/* Re-queue task in 1 sec */
+		queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
+	}
+
+	bnx2x_release_phy_lock(bp);
+period_task_exit:
+	return;
+}
+
+/*
+ * Init service functions
+ */
+
+static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
+{
+	u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
+	u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
+	return base + (BP_ABS_FUNC(bp)) * stride;
+}
+
+static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
+{
+	u32 reg = bnx2x_get_pretend_reg(bp);
+
+	/* Flush all outstanding writes */
+	mmiowb();
+
+	/* Pretend to be function 0 */
+	REG_WR(bp, reg, 0);
+	REG_RD(bp, reg);	/* Flush the GRC transaction (in the chip) */
+
+	/* From now we are in the "like-E1" mode */
+	bnx2x_int_disable(bp);
+
+	/* Flush all outstanding writes */
+	mmiowb();
+
+	/* Restore the original function */
+	REG_WR(bp, reg, BP_ABS_FUNC(bp));
+	REG_RD(bp, reg);
+}
+
+static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
+{
+	if (CHIP_IS_E1(bp))
+		bnx2x_int_disable(bp);
+	else
+		bnx2x_undi_int_disable_e1h(bp);
+}
+
+static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
+{
+	u32 val;
+
+	/* Check if there is any driver already loaded */
+	val = REG_RD(bp, MISC_REG_UNPREPARED);
+	if (val == 0x1) {
+		/* Check if it is the UNDI driver
+		 * UNDI driver initializes CID offset for normal bell to 0x7
+		 */
+		bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+		val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
+		if (val == 0x7) {
+			u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+			/* save our pf_num */
+			int orig_pf_num = bp->pf_num;
+			int port;
+			u32 swap_en, swap_val, value;
+
+			/* clear the UNDI indication */
+			REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+
+			BNX2X_DEV_INFO("UNDI is active! reset device\n");
+
+			/* try unload UNDI on port 0 */
+			bp->pf_num = 0;
+			bp->fw_seq =
+			      (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
+				DRV_MSG_SEQ_NUMBER_MASK);
+			reset_code = bnx2x_fw_command(bp, reset_code, 0);
+
+			/* if UNDI is loaded on the other port */
+			if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
+
+				/* send "DONE" for previous unload */
+				bnx2x_fw_command(bp,
+						 DRV_MSG_CODE_UNLOAD_DONE, 0);
+
+				/* unload UNDI on port 1 */
+				bp->pf_num = 1;
+				bp->fw_seq =
+			      (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
+					DRV_MSG_SEQ_NUMBER_MASK);
+				reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
+
+				bnx2x_fw_command(bp, reset_code, 0);
+			}
+
+			/* now it's safe to release the lock */
+			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+
+			bnx2x_undi_int_disable(bp);
+			port = BP_PORT(bp);
+
+			/* close input traffic and wait for it */
+			/* Do not rcv packets to BRB */
+			REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
+					   NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
+			/* Do not direct rcv packets that are not for MCP to
+			 * the BRB */
+			REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
+					   NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
+			/* clear AEU */
+			REG_WR(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
+					   MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
+			msleep(10);
+
+			/* save NIG port swap info */
+			swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
+			swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
+			/* reset device */
+			REG_WR(bp,
+			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
+			       0xd3ffffff);
+
+			value = 0x1400;
+			if (CHIP_IS_E3(bp)) {
+				value |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
+				value |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
+			}
+
+			REG_WR(bp,
+			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
+			       value);
+
+			/* take the NIG out of reset and restore swap values */
+			REG_WR(bp,
+			       GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
+			       MISC_REGISTERS_RESET_REG_1_RST_NIG);
+			REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
+			REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
+
+			/* send unload done to the MCP */
+			bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+
+			/* restore our func and fw_seq */
+			bp->pf_num = orig_pf_num;
+			bp->fw_seq =
+			      (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
+				DRV_MSG_SEQ_NUMBER_MASK);
+		} else
+			bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
+	}
+}
+
+static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
+{
+	u32 val, val2, val3, val4, id;
+	u16 pmc;
+
+	/* Get the chip revision id and number. */
+	/* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
+	val = REG_RD(bp, MISC_REG_CHIP_NUM);
+	id = ((val & 0xffff) << 16);
+	val = REG_RD(bp, MISC_REG_CHIP_REV);
+	id |= ((val & 0xf) << 12);
+	val = REG_RD(bp, MISC_REG_CHIP_METAL);
+	id |= ((val & 0xff) << 4);
+	val = REG_RD(bp, MISC_REG_BOND_ID);
+	id |= (val & 0xf);
+	bp->common.chip_id = id;
+
+	/* Set doorbell size */
+	bp->db_size = (1 << BNX2X_DB_SHIFT);
+
+	if (!CHIP_IS_E1x(bp)) {
+		val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+		if ((val & 1) == 0)
+			val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
+		else
+			val = (val >> 1) & 1;
+		BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
+						       "2_PORT_MODE");
+		bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
+						 CHIP_2_PORT_MODE;
+
+		if (CHIP_MODE_IS_4_PORT(bp))
+			bp->pfid = (bp->pf_num >> 1);	/* 0..3 */
+		else
+			bp->pfid = (bp->pf_num & 0x6);	/* 0, 2, 4, 6 */
+	} else {
+		bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
+		bp->pfid = bp->pf_num;			/* 0..7 */
+	}
+
+	bp->link_params.chip_id = bp->common.chip_id;
+	BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
+
+	val = (REG_RD(bp, 0x2874) & 0x55);
+	if ((bp->common.chip_id & 0x1) ||
+	    (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
+		bp->flags |= ONE_PORT_FLAG;
+		BNX2X_DEV_INFO("single port device\n");
+	}
+
+	val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
+	bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
+				 (val & MCPR_NVM_CFG4_FLASH_SIZE));
+	BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
+		       bp->common.flash_size, bp->common.flash_size);
+
+	bnx2x_init_shmem(bp);
+
+
+
+	bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
+					MISC_REG_GENERIC_CR_1 :
+					MISC_REG_GENERIC_CR_0));
+
+	bp->link_params.shmem_base = bp->common.shmem_base;
+	bp->link_params.shmem2_base = bp->common.shmem2_base;
+	BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
+		       bp->common.shmem_base, bp->common.shmem2_base);
+
+	if (!bp->common.shmem_base) {
+		BNX2X_DEV_INFO("MCP not active\n");
+		bp->flags |= NO_MCP_FLAG;
+		return;
+	}
+
+	bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
+	BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
+
+	bp->link_params.hw_led_mode = ((bp->common.hw_config &
+					SHARED_HW_CFG_LED_MODE_MASK) >>
+				       SHARED_HW_CFG_LED_MODE_SHIFT);
+
+	bp->link_params.feature_config_flags = 0;
+	val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
+	if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
+		bp->link_params.feature_config_flags |=
+				FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+	else
+		bp->link_params.feature_config_flags &=
+				~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
+
+	val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
+	bp->common.bc_ver = val;
+	BNX2X_DEV_INFO("bc_ver %X\n", val);
+	if (val < BNX2X_BC_VER) {
+		/* for now only warn
+		 * later we might need to enforce this */
+		BNX2X_ERR("This driver needs bc_ver %X but found %X, "
+			  "please upgrade BC\n", BNX2X_BC_VER, val);
+	}
+	bp->link_params.feature_config_flags |=
+				(val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
+				FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
+
+	bp->link_params.feature_config_flags |=
+		(val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
+		FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
+
+	bp->link_params.feature_config_flags |=
+		(val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
+		FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+
+	pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
+	bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
+
+	BNX2X_DEV_INFO("%sWoL capable\n",
+		       (bp->flags & NO_WOL_FLAG) ? "not " : "");
+
+	val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
+	val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
+	val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
+	val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
+
+	dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
+		 val, val2, val3, val4);
+}
+
+#define IGU_FID(val)	GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
+#define IGU_VEC(val)	GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
+
+static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
+{
+	int pfid = BP_FUNC(bp);
+	int vn = BP_E1HVN(bp);
+	int igu_sb_id;
+	u32 val;
+	u8 fid, igu_sb_cnt = 0;
+
+	bp->igu_base_sb = 0xff;
+	if (CHIP_INT_MODE_IS_BC(bp)) {
+		igu_sb_cnt = bp->igu_sb_cnt;
+		bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
+			FP_SB_MAX_E1x;
+
+		bp->igu_dsb_id =  E1HVN_MAX * FP_SB_MAX_E1x +
+			(CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
+
+		return;
+	}
+
+	/* IGU in normal mode - read CAM */
+	for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
+	     igu_sb_id++) {
+		val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
+		if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
+			continue;
+		fid = IGU_FID(val);
+		if ((fid & IGU_FID_ENCODE_IS_PF)) {
+			if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
+				continue;
+			if (IGU_VEC(val) == 0)
+				/* default status block */
+				bp->igu_dsb_id = igu_sb_id;
+			else {
+				if (bp->igu_base_sb == 0xff)
+					bp->igu_base_sb = igu_sb_id;
+				igu_sb_cnt++;
+			}
+		}
+	}
+
+#ifdef CONFIG_PCI_MSI
+	/*
+	 * It's expected that number of CAM entries for this functions is equal
+	 * to the number evaluated based on the MSI-X table size. We want a
+	 * harsh warning if these values are different!
+	 */
+	WARN_ON(bp->igu_sb_cnt != igu_sb_cnt);
+#endif
+
+	if (igu_sb_cnt == 0)
+		BNX2X_ERR("CAM configuration error\n");
+}
+
+static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
+						    u32 switch_cfg)
+{
+	int cfg_size = 0, idx, port = BP_PORT(bp);
+
+	/* Aggregation of supported attributes of all external phys */
+	bp->port.supported[0] = 0;
+	bp->port.supported[1] = 0;
+	switch (bp->link_params.num_phys) {
+	case 1:
+		bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
+		cfg_size = 1;
+		break;
+	case 2:
+		bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
+		cfg_size = 1;
+		break;
+	case 3:
+		if (bp->link_params.multi_phy_config &
+		    PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
+			bp->port.supported[1] =
+				bp->link_params.phy[EXT_PHY1].supported;
+			bp->port.supported[0] =
+				bp->link_params.phy[EXT_PHY2].supported;
+		} else {
+			bp->port.supported[0] =
+				bp->link_params.phy[EXT_PHY1].supported;
+			bp->port.supported[1] =
+				bp->link_params.phy[EXT_PHY2].supported;
+		}
+		cfg_size = 2;
+		break;
+	}
+
+	if (!(bp->port.supported[0] || bp->port.supported[1])) {
+		BNX2X_ERR("NVRAM config error. BAD phy config."
+			  "PHY1 config 0x%x, PHY2 config 0x%x\n",
+			   SHMEM_RD(bp,
+			   dev_info.port_hw_config[port].external_phy_config),
+			   SHMEM_RD(bp,
+			   dev_info.port_hw_config[port].external_phy_config2));
+			return;
+	}
+
+	if (CHIP_IS_E3(bp))
+		bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
+	else {
+		switch (switch_cfg) {
+		case SWITCH_CFG_1G:
+			bp->port.phy_addr = REG_RD(
+				bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
+			break;
+		case SWITCH_CFG_10G:
+			bp->port.phy_addr = REG_RD(
+				bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
+			break;
+		default:
+			BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
+				  bp->port.link_config[0]);
+			return;
+		}
+	}
+	BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
+	/* mask what we support according to speed_cap_mask per configuration */
+	for (idx = 0; idx < cfg_size; idx++) {
+		if (!(bp->link_params.speed_cap_mask[idx] &
+				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
+			bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
+
+		if (!(bp->link_params.speed_cap_mask[idx] &
+				PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
+			bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
+
+		if (!(bp->link_params.speed_cap_mask[idx] &
+				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
+			bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
+
+		if (!(bp->link_params.speed_cap_mask[idx] &
+				PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
+			bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
+
+		if (!(bp->link_params.speed_cap_mask[idx] &
+					PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
+			bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
+						     SUPPORTED_1000baseT_Full);
+
+		if (!(bp->link_params.speed_cap_mask[idx] &
+					PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
+			bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
+
+		if (!(bp->link_params.speed_cap_mask[idx] &
+					PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
+			bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
+
+	}
+
+	BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
+		       bp->port.supported[1]);
+}
+
+static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
+{
+	u32 link_config, idx, cfg_size = 0;
+	bp->port.advertising[0] = 0;
+	bp->port.advertising[1] = 0;
+	switch (bp->link_params.num_phys) {
+	case 1:
+	case 2:
+		cfg_size = 1;
+		break;
+	case 3:
+		cfg_size = 2;
+		break;
+	}
+	for (idx = 0; idx < cfg_size; idx++) {
+		bp->link_params.req_duplex[idx] = DUPLEX_FULL;
+		link_config = bp->port.link_config[idx];
+		switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
+		case PORT_FEATURE_LINK_SPEED_AUTO:
+			if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_AUTO_NEG;
+				bp->port.advertising[idx] |=
+					bp->port.supported[idx];
+			} else {
+				/* force 10G, no AN */
+				bp->link_params.req_line_speed[idx] =
+					SPEED_10000;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_10000baseT_Full |
+					 ADVERTISED_FIBRE);
+				continue;
+			}
+			break;
+
+		case PORT_FEATURE_LINK_SPEED_10M_FULL:
+			if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_10;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_10baseT_Full |
+					 ADVERTISED_TP);
+			} else {
+				BNX2X_ERR("NVRAM config error. "
+					    "Invalid link_config 0x%x"
+					    "  speed_cap_mask 0x%x\n",
+					    link_config,
+				    bp->link_params.speed_cap_mask[idx]);
+				return;
+			}
+			break;
+
+		case PORT_FEATURE_LINK_SPEED_10M_HALF:
+			if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_10;
+				bp->link_params.req_duplex[idx] =
+					DUPLEX_HALF;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_10baseT_Half |
+					 ADVERTISED_TP);
+			} else {
+				BNX2X_ERR("NVRAM config error. "
+					    "Invalid link_config 0x%x"
+					    "  speed_cap_mask 0x%x\n",
+					    link_config,
+					  bp->link_params.speed_cap_mask[idx]);
+				return;
+			}
+			break;
+
+		case PORT_FEATURE_LINK_SPEED_100M_FULL:
+			if (bp->port.supported[idx] &
+			    SUPPORTED_100baseT_Full) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_100;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_100baseT_Full |
+					 ADVERTISED_TP);
+			} else {
+				BNX2X_ERR("NVRAM config error. "
+					    "Invalid link_config 0x%x"
+					    "  speed_cap_mask 0x%x\n",
+					    link_config,
+					  bp->link_params.speed_cap_mask[idx]);
+				return;
+			}
+			break;
+
+		case PORT_FEATURE_LINK_SPEED_100M_HALF:
+			if (bp->port.supported[idx] &
+			    SUPPORTED_100baseT_Half) {
+				bp->link_params.req_line_speed[idx] =
+								SPEED_100;
+				bp->link_params.req_duplex[idx] =
+								DUPLEX_HALF;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_100baseT_Half |
+					 ADVERTISED_TP);
+			} else {
+				BNX2X_ERR("NVRAM config error. "
+				    "Invalid link_config 0x%x"
+				    "  speed_cap_mask 0x%x\n",
+				    link_config,
+				    bp->link_params.speed_cap_mask[idx]);
+				return;
+			}
+			break;
+
+		case PORT_FEATURE_LINK_SPEED_1G:
+			if (bp->port.supported[idx] &
+			    SUPPORTED_1000baseT_Full) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_1000;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_1000baseT_Full |
+					 ADVERTISED_TP);
+			} else {
+				BNX2X_ERR("NVRAM config error. "
+				    "Invalid link_config 0x%x"
+				    "  speed_cap_mask 0x%x\n",
+				    link_config,
+				    bp->link_params.speed_cap_mask[idx]);
+				return;
+			}
+			break;
+
+		case PORT_FEATURE_LINK_SPEED_2_5G:
+			if (bp->port.supported[idx] &
+			    SUPPORTED_2500baseX_Full) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_2500;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_2500baseX_Full |
+						ADVERTISED_TP);
+			} else {
+				BNX2X_ERR("NVRAM config error. "
+				    "Invalid link_config 0x%x"
+				    "  speed_cap_mask 0x%x\n",
+				    link_config,
+				    bp->link_params.speed_cap_mask[idx]);
+				return;
+			}
+			break;
+
+		case PORT_FEATURE_LINK_SPEED_10G_CX4:
+			if (bp->port.supported[idx] &
+			    SUPPORTED_10000baseT_Full) {
+				bp->link_params.req_line_speed[idx] =
+					SPEED_10000;
+				bp->port.advertising[idx] |=
+					(ADVERTISED_10000baseT_Full |
+						ADVERTISED_FIBRE);
+			} else {
+				BNX2X_ERR("NVRAM config error. "
+				    "Invalid link_config 0x%x"
+				    "  speed_cap_mask 0x%x\n",
+				    link_config,
+				    bp->link_params.speed_cap_mask[idx]);
+				return;
+			}
+			break;
+		case PORT_FEATURE_LINK_SPEED_20G:
+			bp->link_params.req_line_speed[idx] = SPEED_20000;
+
+			break;
+		default:
+			BNX2X_ERR("NVRAM config error. "
+				  "BAD link speed link_config 0x%x\n",
+				  link_config);
+				bp->link_params.req_line_speed[idx] =
+							SPEED_AUTO_NEG;
+				bp->port.advertising[idx] =
+						bp->port.supported[idx];
+			break;
+		}
+
+		bp->link_params.req_flow_ctrl[idx] = (link_config &
+					 PORT_FEATURE_FLOW_CONTROL_MASK);
+		if ((bp->link_params.req_flow_ctrl[idx] ==
+		     BNX2X_FLOW_CTRL_AUTO) &&
+		    !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
+			bp->link_params.req_flow_ctrl[idx] =
+				BNX2X_FLOW_CTRL_NONE;
+		}
+
+		BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl"
+			       " 0x%x advertising 0x%x\n",
+			       bp->link_params.req_line_speed[idx],
+			       bp->link_params.req_duplex[idx],
+			       bp->link_params.req_flow_ctrl[idx],
+			       bp->port.advertising[idx]);
+	}
+}
+
+static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
+{
+	mac_hi = cpu_to_be16(mac_hi);
+	mac_lo = cpu_to_be32(mac_lo);
+	memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
+	memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
+}
+
+static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	u32 config;
+	u32 ext_phy_type, ext_phy_config;
+
+	bp->link_params.bp = bp;
+	bp->link_params.port = port;
+
+	bp->link_params.lane_config =
+		SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
+
+	bp->link_params.speed_cap_mask[0] =
+		SHMEM_RD(bp,
+			 dev_info.port_hw_config[port].speed_capability_mask);
+	bp->link_params.speed_cap_mask[1] =
+		SHMEM_RD(bp,
+			 dev_info.port_hw_config[port].speed_capability_mask2);
+	bp->port.link_config[0] =
+		SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
+
+	bp->port.link_config[1] =
+		SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
+
+	bp->link_params.multi_phy_config =
+		SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
+	/* If the device is capable of WoL, set the default state according
+	 * to the HW
+	 */
+	config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
+	bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
+		   (config & PORT_FEATURE_WOL_ENABLED));
+
+	BNX2X_DEV_INFO("lane_config 0x%08x  "
+		       "speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
+		       bp->link_params.lane_config,
+		       bp->link_params.speed_cap_mask[0],
+		       bp->port.link_config[0]);
+
+	bp->link_params.switch_cfg = (bp->port.link_config[0] &
+				      PORT_FEATURE_CONNECTED_SWITCH_MASK);
+	bnx2x_phy_probe(&bp->link_params);
+	bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
+
+	bnx2x_link_settings_requested(bp);
+
+	/*
+	 * If connected directly, work with the internal PHY, otherwise, work
+	 * with the external PHY
+	 */
+	ext_phy_config =
+		SHMEM_RD(bp,
+			 dev_info.port_hw_config[port].external_phy_config);
+	ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
+	if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
+		bp->mdio.prtad = bp->port.phy_addr;
+
+	else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
+		 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+		bp->mdio.prtad =
+			XGXS_EXT_PHY_ADDR(ext_phy_config);
+
+	/*
+	 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
+	 * In MF mode, it is set to cover self test cases
+	 */
+	if (IS_MF(bp))
+		bp->port.need_hw_lock = 1;
+	else
+		bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
+							bp->common.shmem_base,
+							bp->common.shmem2_base);
+}
+
+#ifdef BCM_CNIC
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+	int port = BP_PORT(bp);
+	int func = BP_ABS_FUNC(bp);
+
+	u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+				drv_lic_key[port].max_iscsi_conn);
+	u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+				drv_lic_key[port].max_fcoe_conn);
+
+	/* Get the number of maximum allowed iSCSI and FCoE connections */
+	bp->cnic_eth_dev.max_iscsi_conn =
+		(max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
+		BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+
+	bp->cnic_eth_dev.max_fcoe_conn =
+		(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
+		BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
+
+	/* Read the WWN: */
+	if (!IS_MF(bp)) {
+		/* Port info */
+		bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+			SHMEM_RD(bp,
+				dev_info.port_hw_config[port].
+				 fcoe_wwn_port_name_upper);
+		bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+			SHMEM_RD(bp,
+				dev_info.port_hw_config[port].
+				 fcoe_wwn_port_name_lower);
+
+		/* Node info */
+		bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+			SHMEM_RD(bp,
+				dev_info.port_hw_config[port].
+				 fcoe_wwn_node_name_upper);
+		bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+			SHMEM_RD(bp,
+				dev_info.port_hw_config[port].
+				 fcoe_wwn_node_name_lower);
+	} else if (!IS_MF_SD(bp)) {
+		u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
+
+		/*
+		 * Read the WWN info only if the FCoE feature is enabled for
+		 * this function.
+		 */
+		if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+			/* Port info */
+			bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+				MF_CFG_RD(bp, func_ext_config[func].
+						fcoe_wwn_port_name_upper);
+			bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+				MF_CFG_RD(bp, func_ext_config[func].
+						fcoe_wwn_port_name_lower);
+
+			/* Node info */
+			bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+				MF_CFG_RD(bp, func_ext_config[func].
+						fcoe_wwn_node_name_upper);
+			bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+				MF_CFG_RD(bp, func_ext_config[func].
+						fcoe_wwn_node_name_lower);
+		}
+	}
+
+	BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
+		       bp->cnic_eth_dev.max_iscsi_conn,
+		       bp->cnic_eth_dev.max_fcoe_conn);
+
+	/*
+	 * If maximum allowed number of connections is zero -
+	 * disable the feature.
+	 */
+	if (!bp->cnic_eth_dev.max_iscsi_conn)
+		bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+	if (!bp->cnic_eth_dev.max_fcoe_conn)
+		bp->flags |= NO_FCOE_FLAG;
+}
+#endif
+
+static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
+{
+	u32 val, val2;
+	int func = BP_ABS_FUNC(bp);
+	int port = BP_PORT(bp);
+#ifdef BCM_CNIC
+	u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
+	u8 *fip_mac = bp->fip_mac;
+#endif
+
+	/* Zero primary MAC configuration */
+	memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
+	if (BP_NOMCP(bp)) {
+		BNX2X_ERROR("warning: random MAC workaround active\n");
+		random_ether_addr(bp->dev->dev_addr);
+	} else if (IS_MF(bp)) {
+		val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+		val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
+		if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
+		    (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
+			bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+#ifdef BCM_CNIC
+		/* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+		 * FCoE MAC then the appropriate feature should be disabled.
+		 */
+		if (IS_MF_SI(bp)) {
+			u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
+			if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
+				val2 = MF_CFG_RD(bp, func_ext_config[func].
+						     iscsi_mac_addr_upper);
+				val = MF_CFG_RD(bp, func_ext_config[func].
+						    iscsi_mac_addr_lower);
+				bnx2x_set_mac_buf(iscsi_mac, val, val2);
+				BNX2X_DEV_INFO("Read iSCSI MAC: "
+					       BNX2X_MAC_FMT"\n",
+					       BNX2X_MAC_PRN_LIST(iscsi_mac));
+			} else
+				bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
+
+			if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
+				val2 = MF_CFG_RD(bp, func_ext_config[func].
+						     fcoe_mac_addr_upper);
+				val = MF_CFG_RD(bp, func_ext_config[func].
+						    fcoe_mac_addr_lower);
+				bnx2x_set_mac_buf(fip_mac, val, val2);
+				BNX2X_DEV_INFO("Read FCoE L2 MAC to "
+					       BNX2X_MAC_FMT"\n",
+					       BNX2X_MAC_PRN_LIST(fip_mac));
+
+			} else
+				bp->flags |= NO_FCOE_FLAG;
+		}
+#endif
+	} else {
+		/* in SF read MACs from port configuration */
+		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+		val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+		bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+#ifdef BCM_CNIC
+		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+				    iscsi_mac_upper);
+		val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+				   iscsi_mac_lower);
+		bnx2x_set_mac_buf(iscsi_mac, val, val2);
+
+		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+				    fcoe_fip_mac_upper);
+		val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+				   fcoe_fip_mac_lower);
+		bnx2x_set_mac_buf(fip_mac, val, val2);
+#endif
+	}
+
+	memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
+	memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
+
+#ifdef BCM_CNIC
+	/* Set the FCoE MAC in MF_SD mode */
+	if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp))
+		memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
+
+	/* Disable iSCSI if MAC configuration is
+	 * invalid.
+	 */
+	if (!is_valid_ether_addr(iscsi_mac)) {
+		bp->flags |= NO_ISCSI_FLAG;
+		memset(iscsi_mac, 0, ETH_ALEN);
+	}
+
+	/* Disable FCoE if MAC configuration is
+	 * invalid.
+	 */
+	if (!is_valid_ether_addr(fip_mac)) {
+		bp->flags |= NO_FCOE_FLAG;
+		memset(bp->fip_mac, 0, ETH_ALEN);
+	}
+#endif
+
+	if (!is_valid_ether_addr(bp->dev->dev_addr))
+		dev_err(&bp->pdev->dev,
+			"bad Ethernet MAC address configuration: "
+			BNX2X_MAC_FMT", change it manually before bringing up "
+			"the appropriate network interface\n",
+			BNX2X_MAC_PRN_LIST(bp->dev->dev_addr));
+}
+
+static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
+{
+	int /*abs*/func = BP_ABS_FUNC(bp);
+	int vn;
+	u32 val = 0;
+	int rc = 0;
+
+	bnx2x_get_common_hwinfo(bp);
+
+	/*
+	 * initialize IGU parameters
+	 */
+	if (CHIP_IS_E1x(bp)) {
+		bp->common.int_block = INT_BLOCK_HC;
+
+		bp->igu_dsb_id = DEF_SB_IGU_ID;
+		bp->igu_base_sb = 0;
+	} else {
+		bp->common.int_block = INT_BLOCK_IGU;
+		val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
+
+		if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+			int tout = 5000;
+
+			BNX2X_DEV_INFO("FORCING Normal Mode\n");
+
+			val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
+			REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
+			REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
+
+			while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+				tout--;
+				usleep_range(1000, 1000);
+			}
+
+			if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
+				dev_err(&bp->pdev->dev,
+					"FORCING Normal Mode failed!!!\n");
+				return -EPERM;
+			}
+		}
+
+		if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
+			BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
+			bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
+		} else
+			BNX2X_DEV_INFO("IGU Normal Mode\n");
+
+		bnx2x_get_igu_cam_info(bp);
+
+	}
+
+	/*
+	 * set base FW non-default (fast path) status block id, this value is
+	 * used to initialize the fw_sb_id saved on the fp/queue structure to
+	 * determine the id used by the FW.
+	 */
+	if (CHIP_IS_E1x(bp))
+		bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
+	else /*
+	      * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
+	      * the same queue are indicated on the same IGU SB). So we prefer
+	      * FW and IGU SBs to be the same value.
+	      */
+		bp->base_fw_ndsb = bp->igu_base_sb;
+
+	BNX2X_DEV_INFO("igu_dsb_id %d  igu_base_sb %d  igu_sb_cnt %d\n"
+		       "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
+		       bp->igu_sb_cnt, bp->base_fw_ndsb);
+
+	/*
+	 * Initialize MF configuration
+	 */
+
+	bp->mf_ov = 0;
+	bp->mf_mode = 0;
+	vn = BP_E1HVN(bp);
+
+	if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
+		BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
+			       bp->common.shmem2_base, SHMEM2_RD(bp, size),
+			      (u32)offsetof(struct shmem2_region, mf_cfg_addr));
+
+		if (SHMEM2_HAS(bp, mf_cfg_addr))
+			bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
+		else
+			bp->common.mf_cfg_base = bp->common.shmem_base +
+				offsetof(struct shmem_region, func_mb) +
+				E1H_FUNC_MAX * sizeof(struct drv_func_mb);
+		/*
+		 * get mf configuration:
+		 * 1. existence of MF configuration
+		 * 2. MAC address must be legal (check only upper bytes)
+		 *    for  Switch-Independent mode;
+		 *    OVLAN must be legal for Switch-Dependent mode
+		 * 3. SF_MODE configures specific MF mode
+		 */
+		if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+			/* get mf configuration */
+			val = SHMEM_RD(bp,
+				       dev_info.shared_feature_config.config);
+			val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
+
+			switch (val) {
+			case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
+				val = MF_CFG_RD(bp, func_mf_config[func].
+						mac_upper);
+				/* check for legal mac (upper bytes)*/
+				if (val != 0xffff) {
+					bp->mf_mode = MULTI_FUNCTION_SI;
+					bp->mf_config[vn] = MF_CFG_RD(bp,
+						   func_mf_config[func].config);
+				} else
+					BNX2X_DEV_INFO("illegal MAC address "
+						       "for SI\n");
+				break;
+			case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
+				/* get OV configuration */
+				val = MF_CFG_RD(bp,
+					func_mf_config[FUNC_0].e1hov_tag);
+				val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
+
+				if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+					bp->mf_mode = MULTI_FUNCTION_SD;
+					bp->mf_config[vn] = MF_CFG_RD(bp,
+						func_mf_config[func].config);
+				} else
+					BNX2X_DEV_INFO("illegal OV for SD\n");
+				break;
+			default:
+				/* Unknown configuration: reset mf_config */
+				bp->mf_config[vn] = 0;
+				BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val);
+			}
+		}
+
+		BNX2X_DEV_INFO("%s function mode\n",
+			       IS_MF(bp) ? "multi" : "single");
+
+		switch (bp->mf_mode) {
+		case MULTI_FUNCTION_SD:
+			val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+			      FUNC_MF_CFG_E1HOV_TAG_MASK;
+			if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+				bp->mf_ov = val;
+				bp->path_has_ovlan = true;
+
+				BNX2X_DEV_INFO("MF OV for func %d is %d "
+					       "(0x%04x)\n", func, bp->mf_ov,
+					       bp->mf_ov);
+			} else {
+				dev_err(&bp->pdev->dev,
+					"No valid MF OV for func %d, "
+					"aborting\n", func);
+				return -EPERM;
+			}
+			break;
+		case MULTI_FUNCTION_SI:
+			BNX2X_DEV_INFO("func %d is in MF "
+				       "switch-independent mode\n", func);
+			break;
+		default:
+			if (vn) {
+				dev_err(&bp->pdev->dev,
+					"VN %d is in a single function mode, "
+					"aborting\n", vn);
+				return -EPERM;
+			}
+			break;
+		}
+
+		/* check if other port on the path needs ovlan:
+		 * Since MF configuration is shared between ports
+		 * Possible mixed modes are only
+		 * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
+		 */
+		if (CHIP_MODE_IS_4_PORT(bp) &&
+		    !bp->path_has_ovlan &&
+		    !IS_MF(bp) &&
+		    bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+			u8 other_port = !BP_PORT(bp);
+			u8 other_func = BP_PATH(bp) + 2*other_port;
+			val = MF_CFG_RD(bp,
+					func_mf_config[other_func].e1hov_tag);
+			if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
+				bp->path_has_ovlan = true;
+		}
+	}
+
+	/* adjust igu_sb_cnt to MF for E1x */
+	if (CHIP_IS_E1x(bp) && IS_MF(bp))
+		bp->igu_sb_cnt /= E1HVN_MAX;
+
+	/* port info */
+	bnx2x_get_port_hwinfo(bp);
+
+	if (!BP_NOMCP(bp)) {
+		bp->fw_seq =
+			(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+			 DRV_MSG_SEQ_NUMBER_MASK);
+		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+	}
+
+	/* Get MAC addresses */
+	bnx2x_get_mac_hwinfo(bp);
+
+#ifdef BCM_CNIC
+	bnx2x_get_cnic_info(bp);
+#endif
+
+	/* Get current FW pulse sequence */
+	if (!BP_NOMCP(bp)) {
+		int mb_idx = BP_FW_MB_IDX(bp);
+
+		bp->fw_drv_pulse_wr_seq =
+				(SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
+				 DRV_PULSE_SEQ_MASK);
+		BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+	}
+
+	return rc;
+}
+
+static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
+{
+	int cnt, i, block_end, rodi;
+	char vpd_data[BNX2X_VPD_LEN+1];
+	char str_id_reg[VENDOR_ID_LEN+1];
+	char str_id_cap[VENDOR_ID_LEN+1];
+	u8 len;
+
+	cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+	memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
+
+	if (cnt < BNX2X_VPD_LEN)
+		goto out_not_found;
+
+	i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+			     PCI_VPD_LRDT_RO_DATA);
+	if (i < 0)
+		goto out_not_found;
+
+
+	block_end = i + PCI_VPD_LRDT_TAG_SIZE +
+		    pci_vpd_lrdt_size(&vpd_data[i]);
+
+	i += PCI_VPD_LRDT_TAG_SIZE;
+
+	if (block_end > BNX2X_VPD_LEN)
+		goto out_not_found;
+
+	rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+				   PCI_VPD_RO_KEYWORD_MFR_ID);
+	if (rodi < 0)
+		goto out_not_found;
+
+	len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+	if (len != VENDOR_ID_LEN)
+		goto out_not_found;
+
+	rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+	/* vendor specific info */
+	snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
+	snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
+	if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
+	    !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
+
+		rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
+						PCI_VPD_RO_KEYWORD_VENDOR0);
+		if (rodi >= 0) {
+			len = pci_vpd_info_field_size(&vpd_data[rodi]);
+
+			rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+			if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
+				memcpy(bp->fw_ver, &vpd_data[rodi], len);
+				bp->fw_ver[len] = ' ';
+			}
+		}
+		return;
+	}
+out_not_found:
+	return;
+}
+
+static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
+{
+	u32 flags = 0;
+
+	if (CHIP_REV_IS_FPGA(bp))
+		SET_FLAGS(flags, MODE_FPGA);
+	else if (CHIP_REV_IS_EMUL(bp))
+		SET_FLAGS(flags, MODE_EMUL);
+	else
+		SET_FLAGS(flags, MODE_ASIC);
+
+	if (CHIP_MODE_IS_4_PORT(bp))
+		SET_FLAGS(flags, MODE_PORT4);
+	else
+		SET_FLAGS(flags, MODE_PORT2);
+
+	if (CHIP_IS_E2(bp))
+		SET_FLAGS(flags, MODE_E2);
+	else if (CHIP_IS_E3(bp)) {
+		SET_FLAGS(flags, MODE_E3);
+		if (CHIP_REV(bp) == CHIP_REV_Ax)
+			SET_FLAGS(flags, MODE_E3_A0);
+		else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
+			SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
+	}
+
+	if (IS_MF(bp)) {
+		SET_FLAGS(flags, MODE_MF);
+		switch (bp->mf_mode) {
+		case MULTI_FUNCTION_SD:
+			SET_FLAGS(flags, MODE_MF_SD);
+			break;
+		case MULTI_FUNCTION_SI:
+			SET_FLAGS(flags, MODE_MF_SI);
+			break;
+		}
+	} else
+		SET_FLAGS(flags, MODE_SF);
+
+#if defined(__LITTLE_ENDIAN)
+	SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
+#else /*(__BIG_ENDIAN)*/
+	SET_FLAGS(flags, MODE_BIG_ENDIAN);
+#endif
+	INIT_MODE_FLAGS(bp) = flags;
+}
+
+static int __devinit bnx2x_init_bp(struct bnx2x *bp)
+{
+	int func;
+	int timer_interval;
+	int rc;
+
+	mutex_init(&bp->port.phy_mutex);
+	mutex_init(&bp->fw_mb_mutex);
+	spin_lock_init(&bp->stats_lock);
+#ifdef BCM_CNIC
+	mutex_init(&bp->cnic_mutex);
+#endif
+
+	INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
+	INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
+	INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
+	rc = bnx2x_get_hwinfo(bp);
+	if (rc)
+		return rc;
+
+	bnx2x_set_modes_bitmap(bp);
+
+	rc = bnx2x_alloc_mem_bp(bp);
+	if (rc)
+		return rc;
+
+	bnx2x_read_fwinfo(bp);
+
+	func = BP_FUNC(bp);
+
+	/* need to reset chip if undi was active */
+	if (!BP_NOMCP(bp))
+		bnx2x_undi_unload(bp);
+
+	if (CHIP_REV_IS_FPGA(bp))
+		dev_err(&bp->pdev->dev, "FPGA detected\n");
+
+	if (BP_NOMCP(bp) && (func == 0))
+		dev_err(&bp->pdev->dev, "MCP disabled, "
+					"must load devices in order!\n");
+
+	bp->multi_mode = multi_mode;
+
+	/* Set TPA flags */
+	if (disable_tpa) {
+		bp->flags &= ~TPA_ENABLE_FLAG;
+		bp->dev->features &= ~NETIF_F_LRO;
+	} else {
+		bp->flags |= TPA_ENABLE_FLAG;
+		bp->dev->features |= NETIF_F_LRO;
+	}
+	bp->disable_tpa = disable_tpa;
+
+	if (CHIP_IS_E1(bp))
+		bp->dropless_fc = 0;
+	else
+		bp->dropless_fc = dropless_fc;
+
+	bp->mrrs = mrrs;
+
+	bp->tx_ring_size = MAX_TX_AVAIL;
+
+	/* make sure that the numbers are in the right granularity */
+	bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
+	bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
+
+	timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
+	bp->current_interval = (poll ? poll : timer_interval);
+
+	init_timer(&bp->timer);
+	bp->timer.expires = jiffies + bp->current_interval;
+	bp->timer.data = (unsigned long) bp;
+	bp->timer.function = bnx2x_timer;
+
+	bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
+	bnx2x_dcbx_init_params(bp);
+
+#ifdef BCM_CNIC
+	if (CHIP_IS_E1x(bp))
+		bp->cnic_base_cl_id = FP_SB_MAX_E1x;
+	else
+		bp->cnic_base_cl_id = FP_SB_MAX_E2;
+#endif
+
+	/* multiple tx priority */
+	if (CHIP_IS_E1x(bp))
+		bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
+	if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
+		bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
+	if (CHIP_IS_E3B0(bp))
+		bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+
+	return rc;
+}
+
+
+/****************************************************************************
+* General service functions
+****************************************************************************/
+
+/*
+ * net_device service functions
+ */
+
+/* called with rtnl_lock */
+static int bnx2x_open(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	bool global = false;
+	int other_engine = BP_PATH(bp) ? 0 : 1;
+	u32 other_load_counter, load_counter;
+
+	netif_carrier_off(dev);
+
+	bnx2x_set_power_state(bp, PCI_D0);
+
+	other_load_counter = bnx2x_get_load_cnt(bp, other_engine);
+	load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp));
+
+	/*
+	 * If parity had happen during the unload, then attentions
+	 * and/or RECOVERY_IN_PROGRES may still be set. In this case we
+	 * want the first function loaded on the current engine to
+	 * complete the recovery.
+	 */
+	if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
+	    bnx2x_chk_parity_attn(bp, &global, true))
+		do {
+			/*
+			 * If there are attentions and they are in a global
+			 * blocks, set the GLOBAL_RESET bit regardless whether
+			 * it will be this function that will complete the
+			 * recovery or not.
+			 */
+			if (global)
+				bnx2x_set_reset_global(bp);
+
+			/*
+			 * Only the first function on the current engine should
+			 * try to recover in open. In case of attentions in
+			 * global blocks only the first in the chip should try
+			 * to recover.
+			 */
+			if ((!load_counter &&
+			     (!global || !other_load_counter)) &&
+			    bnx2x_trylock_leader_lock(bp) &&
+			    !bnx2x_leader_reset(bp)) {
+				netdev_info(bp->dev, "Recovered in open\n");
+				break;
+			}
+
+			/* recovery has failed... */
+			bnx2x_set_power_state(bp, PCI_D3hot);
+			bp->recovery_state = BNX2X_RECOVERY_FAILED;
+
+			netdev_err(bp->dev, "Recovery flow hasn't been properly"
+			" completed yet. Try again later. If u still see this"
+			" message after a few retries then power cycle is"
+			" required.\n");
+
+			return -EAGAIN;
+		} while (0);
+
+	bp->recovery_state = BNX2X_RECOVERY_DONE;
+	return bnx2x_nic_load(bp, LOAD_OPEN);
+}
+
+/* called with rtnl_lock */
+static int bnx2x_close(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	/* Unload the driver, release IRQs */
+	bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+
+	/* Power off */
+	bnx2x_set_power_state(bp, PCI_D3hot);
+
+	return 0;
+}
+
+static inline int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
+					 struct bnx2x_mcast_ramrod_params *p)
+{
+	int mc_count = netdev_mc_count(bp->dev);
+	struct bnx2x_mcast_list_elem *mc_mac =
+		kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
+	struct netdev_hw_addr *ha;
+
+	if (!mc_mac)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&p->mcast_list);
+
+	netdev_for_each_mc_addr(ha, bp->dev) {
+		mc_mac->mac = bnx2x_mc_addr(ha);
+		list_add_tail(&mc_mac->link, &p->mcast_list);
+		mc_mac++;
+	}
+
+	p->mcast_list_len = mc_count;
+
+	return 0;
+}
+
+static inline void bnx2x_free_mcast_macs_list(
+	struct bnx2x_mcast_ramrod_params *p)
+{
+	struct bnx2x_mcast_list_elem *mc_mac =
+		list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
+				 link);
+
+	WARN_ON(!mc_mac);
+	kfree(mc_mac);
+}
+
+/**
+ * bnx2x_set_uc_list - configure a new unicast MACs list.
+ *
+ * @bp: driver handle
+ *
+ * We will use zero (0) as a MAC type for these MACs.
+ */
+static inline int bnx2x_set_uc_list(struct bnx2x *bp)
+{
+	int rc;
+	struct net_device *dev = bp->dev;
+	struct netdev_hw_addr *ha;
+	struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+	unsigned long ramrod_flags = 0;
+
+	/* First schedule a cleanup up of old configuration */
+	rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
+	if (rc < 0) {
+		BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
+		return rc;
+	}
+
+	netdev_for_each_uc_addr(ha, dev) {
+		rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
+				       BNX2X_UC_LIST_MAC, &ramrod_flags);
+		if (rc < 0) {
+			BNX2X_ERR("Failed to schedule ADD operations: %d\n",
+				  rc);
+			return rc;
+		}
+	}
+
+	/* Execute the pending commands */
+	__set_bit(RAMROD_CONT, &ramrod_flags);
+	return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
+				 BNX2X_UC_LIST_MAC, &ramrod_flags);
+}
+
+static inline int bnx2x_set_mc_list(struct bnx2x *bp)
+{
+	struct net_device *dev = bp->dev;
+	struct bnx2x_mcast_ramrod_params rparam = {0};
+	int rc = 0;
+
+	rparam.mcast_obj = &bp->mcast_obj;
+
+	/* first, clear all configured multicast MACs */
+	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+	if (rc < 0) {
+		BNX2X_ERR("Failed to clear multicast "
+			  "configuration: %d\n", rc);
+		return rc;
+	}
+
+	/* then, configure a new MACs list */
+	if (netdev_mc_count(dev)) {
+		rc = bnx2x_init_mcast_macs_list(bp, &rparam);
+		if (rc) {
+			BNX2X_ERR("Failed to create multicast MACs "
+				  "list: %d\n", rc);
+			return rc;
+		}
+
+		/* Now add the new MACs */
+		rc = bnx2x_config_mcast(bp, &rparam,
+					BNX2X_MCAST_CMD_ADD);
+		if (rc < 0)
+			BNX2X_ERR("Failed to set a new multicast "
+				  "configuration: %d\n", rc);
+
+		bnx2x_free_mcast_macs_list(&rparam);
+	}
+
+	return rc;
+}
+
+
+/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
+void bnx2x_set_rx_mode(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	u32 rx_mode = BNX2X_RX_MODE_NORMAL;
+
+	if (bp->state != BNX2X_STATE_OPEN) {
+		DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
+		return;
+	}
+
+	DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
+
+	if (dev->flags & IFF_PROMISC)
+		rx_mode = BNX2X_RX_MODE_PROMISC;
+	else if ((dev->flags & IFF_ALLMULTI) ||
+		 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
+		  CHIP_IS_E1(bp)))
+		rx_mode = BNX2X_RX_MODE_ALLMULTI;
+	else {
+		/* some multicasts */
+		if (bnx2x_set_mc_list(bp) < 0)
+			rx_mode = BNX2X_RX_MODE_ALLMULTI;
+
+		if (bnx2x_set_uc_list(bp) < 0)
+			rx_mode = BNX2X_RX_MODE_PROMISC;
+	}
+
+	bp->rx_mode = rx_mode;
+
+	/* Schedule the rx_mode command */
+	if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
+		set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
+		return;
+	}
+
+	bnx2x_set_storm_rx_mode(bp);
+}
+
+/* called with rtnl_lock */
+static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
+			   int devad, u16 addr)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	u16 value;
+	int rc;
+
+	DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
+	   prtad, devad, addr);
+
+	/* The HW expects different devad if CL22 is used */
+	devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
+
+	bnx2x_acquire_phy_lock(bp);
+	rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
+	bnx2x_release_phy_lock(bp);
+	DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
+
+	if (!rc)
+		rc = value;
+	return rc;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
+			    u16 addr, u16 value)
+{
+	struct bnx2x *bp = netdev_priv(netdev);
+	int rc;
+
+	DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
+			   " value 0x%x\n", prtad, devad, addr, value);
+
+	/* The HW expects different devad if CL22 is used */
+	devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
+
+	bnx2x_acquire_phy_lock(bp);
+	rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
+	bnx2x_release_phy_lock(bp);
+	return rc;
+}
+
+/* called with rtnl_lock */
+static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct mii_ioctl_data *mdio = if_mii(ifr);
+
+	DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
+	   mdio->phy_id, mdio->reg_num, mdio->val_in);
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+
+	return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void poll_bnx2x(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+
+	disable_irq(bp->pdev->irq);
+	bnx2x_interrupt(bp->pdev->irq, dev);
+	enable_irq(bp->pdev->irq);
+}
+#endif
+
+static const struct net_device_ops bnx2x_netdev_ops = {
+	.ndo_open		= bnx2x_open,
+	.ndo_stop		= bnx2x_close,
+	.ndo_start_xmit		= bnx2x_start_xmit,
+	.ndo_select_queue	= bnx2x_select_queue,
+	.ndo_set_rx_mode	= bnx2x_set_rx_mode,
+	.ndo_set_mac_address	= bnx2x_change_mac_addr,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_do_ioctl		= bnx2x_ioctl,
+	.ndo_change_mtu		= bnx2x_change_mtu,
+	.ndo_fix_features	= bnx2x_fix_features,
+	.ndo_set_features	= bnx2x_set_features,
+	.ndo_tx_timeout		= bnx2x_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= poll_bnx2x,
+#endif
+	.ndo_setup_tc		= bnx2x_setup_tc,
+
+#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
+	.ndo_fcoe_get_wwn	= bnx2x_fcoe_get_wwn,
+#endif
+};
+
+static inline int bnx2x_set_coherency_mask(struct bnx2x *bp)
+{
+	struct device *dev = &bp->pdev->dev;
+
+	if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
+		bp->flags |= USING_DAC_FLAG;
+		if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
+			dev_err(dev, "dma_set_coherent_mask failed, "
+				     "aborting\n");
+			return -EIO;
+		}
+	} else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
+		dev_err(dev, "System does not support DMA, aborting\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
+				    struct net_device *dev,
+				    unsigned long board_type)
+{
+	struct bnx2x *bp;
+	int rc;
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+	bp = netdev_priv(dev);
+
+	bp->dev = dev;
+	bp->pdev = pdev;
+	bp->flags = 0;
+	bp->pf_num = PCI_FUNC(pdev->devfn);
+
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		dev_err(&bp->pdev->dev,
+			"Cannot enable PCI device, aborting\n");
+		goto err_out;
+	}
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		dev_err(&bp->pdev->dev,
+			"Cannot find PCI device base address, aborting\n");
+		rc = -ENODEV;
+		goto err_out_disable;
+	}
+
+	if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+		dev_err(&bp->pdev->dev, "Cannot find second PCI device"
+		       " base address, aborting\n");
+		rc = -ENODEV;
+		goto err_out_disable;
+	}
+
+	if (atomic_read(&pdev->enable_cnt) == 1) {
+		rc = pci_request_regions(pdev, DRV_MODULE_NAME);
+		if (rc) {
+			dev_err(&bp->pdev->dev,
+				"Cannot obtain PCI resources, aborting\n");
+			goto err_out_disable;
+		}
+
+		pci_set_master(pdev);
+		pci_save_state(pdev);
+	}
+
+	bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+	if (bp->pm_cap == 0) {
+		dev_err(&bp->pdev->dev,
+			"Cannot find power management capability, aborting\n");
+		rc = -EIO;
+		goto err_out_release;
+	}
+
+	if (!pci_is_pcie(pdev)) {
+		dev_err(&bp->pdev->dev,	"Not PCI Express, aborting\n");
+		rc = -EIO;
+		goto err_out_release;
+	}
+
+	rc = bnx2x_set_coherency_mask(bp);
+	if (rc)
+		goto err_out_release;
+
+	dev->mem_start = pci_resource_start(pdev, 0);
+	dev->base_addr = dev->mem_start;
+	dev->mem_end = pci_resource_end(pdev, 0);
+
+	dev->irq = pdev->irq;
+
+	bp->regview = pci_ioremap_bar(pdev, 0);
+	if (!bp->regview) {
+		dev_err(&bp->pdev->dev,
+			"Cannot map register space, aborting\n");
+		rc = -ENOMEM;
+		goto err_out_release;
+	}
+
+	bnx2x_set_power_state(bp, PCI_D0);
+
+	/* clean indirect addresses */
+	pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
+			       PCICFG_VENDOR_ID_OFFSET);
+	REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
+	REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
+	REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
+	REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
+
+	/*
+	 * Enable internal target-read (in case we are probed after PF FLR).
+	 * Must be done prior to any BAR read access. Only for 57712 and up
+	 */
+	if (board_type != BCM57710 &&
+	    board_type != BCM57711 &&
+	    board_type != BCM57711E)
+		REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
+
+	/* Reset the load counter */
+	bnx2x_clear_load_cnt(bp);
+
+	dev->watchdog_timeo = TX_TIMEOUT;
+
+	dev->netdev_ops = &bnx2x_netdev_ops;
+	bnx2x_set_ethtool_ops(dev);
+
+	dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+		NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX;
+
+	dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+		NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
+
+	dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
+	if (bp->flags & USING_DAC_FLAG)
+		dev->features |= NETIF_F_HIGHDMA;
+
+	/* Add Loopback capability to the device */
+	dev->hw_features |= NETIF_F_LOOPBACK;
+
+#ifdef BCM_DCBNL
+	dev->dcbnl_ops = &bnx2x_dcbnl_ops;
+#endif
+
+	/* get_port_hwinfo() will set prtad and mmds properly */
+	bp->mdio.prtad = MDIO_PRTAD_NONE;
+	bp->mdio.mmds = 0;
+	bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
+	bp->mdio.dev = dev;
+	bp->mdio.mdio_read = bnx2x_mdio_read;
+	bp->mdio.mdio_write = bnx2x_mdio_write;
+
+	return 0;
+
+err_out_release:
+	if (atomic_read(&pdev->enable_cnt) == 1)
+		pci_release_regions(pdev);
+
+err_out_disable:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+err_out:
+	return rc;
+}
+
+static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
+						 int *width, int *speed)
+{
+	u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
+
+	*width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
+
+	/* return value of 1=2.5GHz 2=5GHz */
+	*speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
+}
+
+static int bnx2x_check_firmware(struct bnx2x *bp)
+{
+	const struct firmware *firmware = bp->firmware;
+	struct bnx2x_fw_file_hdr *fw_hdr;
+	struct bnx2x_fw_file_section *sections;
+	u32 offset, len, num_ops;
+	u16 *ops_offsets;
+	int i;
+	const u8 *fw_ver;
+
+	if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
+		return -EINVAL;
+
+	fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
+	sections = (struct bnx2x_fw_file_section *)fw_hdr;
+
+	/* Make sure none of the offsets and sizes make us read beyond
+	 * the end of the firmware data */
+	for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
+		offset = be32_to_cpu(sections[i].offset);
+		len = be32_to_cpu(sections[i].len);
+		if (offset + len > firmware->size) {
+			dev_err(&bp->pdev->dev,
+				"Section %d length is out of bounds\n", i);
+			return -EINVAL;
+		}
+	}
+
+	/* Likewise for the init_ops offsets */
+	offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
+	ops_offsets = (u16 *)(firmware->data + offset);
+	num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
+
+	for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
+		if (be16_to_cpu(ops_offsets[i]) > num_ops) {
+			dev_err(&bp->pdev->dev,
+				"Section offset %d is out of bounds\n", i);
+			return -EINVAL;
+		}
+	}
+
+	/* Check FW version */
+	offset = be32_to_cpu(fw_hdr->fw_version.offset);
+	fw_ver = firmware->data + offset;
+	if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
+	    (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
+	    (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
+	    (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
+		dev_err(&bp->pdev->dev,
+			"Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
+		       fw_ver[0], fw_ver[1], fw_ver[2],
+		       fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
+		       BCM_5710_FW_MINOR_VERSION,
+		       BCM_5710_FW_REVISION_VERSION,
+		       BCM_5710_FW_ENGINEERING_VERSION);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+	const __be32 *source = (const __be32 *)_source;
+	u32 *target = (u32 *)_target;
+	u32 i;
+
+	for (i = 0; i < n/4; i++)
+		target[i] = be32_to_cpu(source[i]);
+}
+
+/*
+   Ops array is stored in the following format:
+   {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
+ */
+static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
+{
+	const __be32 *source = (const __be32 *)_source;
+	struct raw_op *target = (struct raw_op *)_target;
+	u32 i, j, tmp;
+
+	for (i = 0, j = 0; i < n/8; i++, j += 2) {
+		tmp = be32_to_cpu(source[j]);
+		target[i].op = (tmp >> 24) & 0xff;
+		target[i].offset = tmp & 0xffffff;
+		target[i].raw_data = be32_to_cpu(source[j + 1]);
+	}
+}
+
+/**
+ * IRO array is stored in the following format:
+ * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
+ */
+static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
+{
+	const __be32 *source = (const __be32 *)_source;
+	struct iro *target = (struct iro *)_target;
+	u32 i, j, tmp;
+
+	for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
+		target[i].base = be32_to_cpu(source[j]);
+		j++;
+		tmp = be32_to_cpu(source[j]);
+		target[i].m1 = (tmp >> 16) & 0xffff;
+		target[i].m2 = tmp & 0xffff;
+		j++;
+		tmp = be32_to_cpu(source[j]);
+		target[i].m3 = (tmp >> 16) & 0xffff;
+		target[i].size = tmp & 0xffff;
+		j++;
+	}
+}
+
+static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
+{
+	const __be16 *source = (const __be16 *)_source;
+	u16 *target = (u16 *)_target;
+	u32 i;
+
+	for (i = 0; i < n/2; i++)
+		target[i] = be16_to_cpu(source[i]);
+}
+
+#define BNX2X_ALLOC_AND_SET(arr, lbl, func)				\
+do {									\
+	u32 len = be32_to_cpu(fw_hdr->arr.len);				\
+	bp->arr = kmalloc(len, GFP_KERNEL);				\
+	if (!bp->arr) {							\
+		pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
+		goto lbl;						\
+	}								\
+	func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),	\
+	     (u8 *)bp->arr, len);					\
+} while (0)
+
+int bnx2x_init_firmware(struct bnx2x *bp)
+{
+	const char *fw_file_name;
+	struct bnx2x_fw_file_hdr *fw_hdr;
+	int rc;
+
+	if (CHIP_IS_E1(bp))
+		fw_file_name = FW_FILE_NAME_E1;
+	else if (CHIP_IS_E1H(bp))
+		fw_file_name = FW_FILE_NAME_E1H;
+	else if (!CHIP_IS_E1x(bp))
+		fw_file_name = FW_FILE_NAME_E2;
+	else {
+		BNX2X_ERR("Unsupported chip revision\n");
+		return -EINVAL;
+	}
+
+	BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
+
+	rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
+	if (rc) {
+		BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
+		goto request_firmware_exit;
+	}
+
+	rc = bnx2x_check_firmware(bp);
+	if (rc) {
+		BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
+		goto request_firmware_exit;
+	}
+
+	fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
+
+	/* Initialize the pointers to the init arrays */
+	/* Blob */
+	BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
+
+	/* Opcodes */
+	BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
+
+	/* Offsets */
+	BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
+			    be16_to_cpu_n);
+
+	/* STORMs firmware */
+	INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+			be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
+	INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
+			be32_to_cpu(fw_hdr->tsem_pram_data.offset);
+	INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+			be32_to_cpu(fw_hdr->usem_int_table_data.offset);
+	INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
+			be32_to_cpu(fw_hdr->usem_pram_data.offset);
+	INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+			be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
+	INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
+			be32_to_cpu(fw_hdr->xsem_pram_data.offset);
+	INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
+			be32_to_cpu(fw_hdr->csem_int_table_data.offset);
+	INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
+			be32_to_cpu(fw_hdr->csem_pram_data.offset);
+	/* IRO */
+	BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
+
+	return 0;
+
+iro_alloc_err:
+	kfree(bp->init_ops_offsets);
+init_offsets_alloc_err:
+	kfree(bp->init_ops);
+init_ops_alloc_err:
+	kfree(bp->init_data);
+request_firmware_exit:
+	release_firmware(bp->firmware);
+
+	return rc;
+}
+
+static void bnx2x_release_firmware(struct bnx2x *bp)
+{
+	kfree(bp->init_ops_offsets);
+	kfree(bp->init_ops);
+	kfree(bp->init_data);
+	release_firmware(bp->firmware);
+}
+
+
+static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
+	.init_hw_cmn_chip = bnx2x_init_hw_common_chip,
+	.init_hw_cmn      = bnx2x_init_hw_common,
+	.init_hw_port     = bnx2x_init_hw_port,
+	.init_hw_func     = bnx2x_init_hw_func,
+
+	.reset_hw_cmn     = bnx2x_reset_common,
+	.reset_hw_port    = bnx2x_reset_port,
+	.reset_hw_func    = bnx2x_reset_func,
+
+	.gunzip_init      = bnx2x_gunzip_init,
+	.gunzip_end       = bnx2x_gunzip_end,
+
+	.init_fw          = bnx2x_init_firmware,
+	.release_fw       = bnx2x_release_firmware,
+};
+
+void bnx2x__init_func_obj(struct bnx2x *bp)
+{
+	/* Prepare DMAE related driver resources */
+	bnx2x_setup_dmae(bp);
+
+	bnx2x_init_func_obj(bp, &bp->func_obj,
+			    bnx2x_sp(bp, func_rdata),
+			    bnx2x_sp_mapping(bp, func_rdata),
+			    &bnx2x_func_sp_drv);
+}
+
+/* must be called after sriov-enable */
+static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp)
+{
+	int cid_count = BNX2X_L2_CID_COUNT(bp);
+
+#ifdef BCM_CNIC
+	cid_count += CNIC_CID_MAX;
+#endif
+	return roundup(cid_count, QM_CID_ROUND);
+}
+
+/**
+ * bnx2x_get_num_none_def_sbs - return the number of none default SBs
+ *
+ * @dev:	pci device
+ *
+ */
+static inline int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev)
+{
+	int pos;
+	u16 control;
+
+	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+
+	/*
+	 * If MSI-X is not supported - return number of SBs needed to support
+	 * one fast path queue: one FP queue + SB for CNIC
+	 */
+	if (!pos)
+		return 1 + CNIC_PRESENT;
+
+	/*
+	 * The value in the PCI configuration space is the index of the last
+	 * entry, namely one less than the actual size of the table, which is
+	 * exactly what we want to return from this function: number of all SBs
+	 * without the default SB.
+	 */
+	pci_read_config_word(pdev, pos  + PCI_MSI_FLAGS, &control);
+	return control & PCI_MSIX_FLAGS_QSIZE;
+}
+
+static int __devinit bnx2x_init_one(struct pci_dev *pdev,
+				    const struct pci_device_id *ent)
+{
+	struct net_device *dev = NULL;
+	struct bnx2x *bp;
+	int pcie_width, pcie_speed;
+	int rc, max_non_def_sbs;
+	int rx_count, tx_count, rss_count;
+	/*
+	 * An estimated maximum supported CoS number according to the chip
+	 * version.
+	 * We will try to roughly estimate the maximum number of CoSes this chip
+	 * may support in order to minimize the memory allocated for Tx
+	 * netdev_queue's. This number will be accurately calculated during the
+	 * initialization of bp->max_cos based on the chip versions AND chip
+	 * revision in the bnx2x_init_bp().
+	 */
+	u8 max_cos_est = 0;
+
+	switch (ent->driver_data) {
+	case BCM57710:
+	case BCM57711:
+	case BCM57711E:
+		max_cos_est = BNX2X_MULTI_TX_COS_E1X;
+		break;
+
+	case BCM57712:
+	case BCM57712_MF:
+		max_cos_est = BNX2X_MULTI_TX_COS_E2_E3A0;
+		break;
+
+	case BCM57800:
+	case BCM57800_MF:
+	case BCM57810:
+	case BCM57810_MF:
+	case BCM57840:
+	case BCM57840_MF:
+		max_cos_est = BNX2X_MULTI_TX_COS_E3B0;
+		break;
+
+	default:
+		pr_err("Unknown board_type (%ld), aborting\n",
+			   ent->driver_data);
+		return -ENODEV;
+	}
+
+	max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev);
+
+	/* !!! FIXME !!!
+	 * Do not allow the maximum SB count to grow above 16
+	 * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48.
+	 * We will use the FP_SB_MAX_E1x macro for this matter.
+	 */
+	max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs);
+
+	WARN_ON(!max_non_def_sbs);
+
+	/* Maximum number of RSS queues: one IGU SB goes to CNIC */
+	rss_count = max_non_def_sbs - CNIC_PRESENT;
+
+	/* Maximum number of netdev Rx queues: RSS + FCoE L2 */
+	rx_count = rss_count + FCOE_PRESENT;
+
+	/*
+	 * Maximum number of netdev Tx queues:
+	 *      Maximum TSS queues * Maximum supported number of CoS  + FCoE L2
+	 */
+	tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT;
+
+	/* dev zeroed in init_etherdev */
+	dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
+	if (!dev) {
+		dev_err(&pdev->dev, "Cannot allocate net device\n");
+		return -ENOMEM;
+	}
+
+	bp = netdev_priv(dev);
+
+	DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n",
+			  tx_count, rx_count);
+
+	bp->igu_sb_cnt = max_non_def_sbs;
+	bp->msg_enable = debug;
+	pci_set_drvdata(pdev, dev);
+
+	rc = bnx2x_init_dev(pdev, dev, ent->driver_data);
+	if (rc < 0) {
+		free_netdev(dev);
+		return rc;
+	}
+
+	DP(NETIF_MSG_DRV, "max_non_def_sbs %d", max_non_def_sbs);
+
+	rc = bnx2x_init_bp(bp);
+	if (rc)
+		goto init_one_exit;
+
+	/*
+	 * Map doorbels here as we need the real value of bp->max_cos which
+	 * is initialized in bnx2x_init_bp().
+	 */
+	bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
+					min_t(u64, BNX2X_DB_SIZE(bp),
+					      pci_resource_len(pdev, 2)));
+	if (!bp->doorbells) {
+		dev_err(&bp->pdev->dev,
+			"Cannot map doorbell space, aborting\n");
+		rc = -ENOMEM;
+		goto init_one_exit;
+	}
+
+	/* calc qm_cid_count */
+	bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
+
+#ifdef BCM_CNIC
+	/* disable FCOE L2 queue for E1x and E3*/
+	if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
+		bp->flags |= NO_FCOE_FLAG;
+
+#endif
+
+	/* Configure interrupt mode: try to enable MSI-X/MSI if
+	 * needed, set bp->num_queues appropriately.
+	 */
+	bnx2x_set_int_mode(bp);
+
+	/* Add all NAPI objects */
+	bnx2x_add_all_napi(bp);
+
+	rc = register_netdev(dev);
+	if (rc) {
+		dev_err(&pdev->dev, "Cannot register net device\n");
+		goto init_one_exit;
+	}
+
+#ifdef BCM_CNIC
+	if (!NO_FCOE(bp)) {
+		/* Add storage MAC address */
+		rtnl_lock();
+		dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+		rtnl_unlock();
+	}
+#endif
+
+	bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
+
+	netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
+	       " IRQ %d, ", board_info[ent->driver_data].name,
+	       (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+	       pcie_width,
+	       ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
+		 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
+						"5GHz (Gen2)" : "2.5GHz",
+	       dev->base_addr, bp->pdev->irq);
+	pr_cont("node addr %pM\n", dev->dev_addr);
+
+	return 0;
+
+init_one_exit:
+	if (bp->regview)
+		iounmap(bp->regview);
+
+	if (bp->doorbells)
+		iounmap(bp->doorbells);
+
+	free_netdev(dev);
+
+	if (atomic_read(&pdev->enable_cnt) == 1)
+		pci_release_regions(pdev);
+
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+	return rc;
+}
+
+static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp;
+
+	if (!dev) {
+		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
+		return;
+	}
+	bp = netdev_priv(dev);
+
+#ifdef BCM_CNIC
+	/* Delete storage MAC address */
+	if (!NO_FCOE(bp)) {
+		rtnl_lock();
+		dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
+		rtnl_unlock();
+	}
+#endif
+
+#ifdef BCM_DCBNL
+	/* Delete app tlvs from dcbnl */
+	bnx2x_dcbnl_update_applist(bp, true);
+#endif
+
+	unregister_netdev(dev);
+
+	/* Delete all NAPI objects */
+	bnx2x_del_all_napi(bp);
+
+	/* Power on: we can't let PCI layer write to us while we are in D3 */
+	bnx2x_set_power_state(bp, PCI_D0);
+
+	/* Disable MSI/MSI-X */
+	bnx2x_disable_msi(bp);
+
+	/* Power off */
+	bnx2x_set_power_state(bp, PCI_D3hot);
+
+	/* Make sure RESET task is not scheduled before continuing */
+	cancel_delayed_work_sync(&bp->sp_rtnl_task);
+
+	if (bp->regview)
+		iounmap(bp->regview);
+
+	if (bp->doorbells)
+		iounmap(bp->doorbells);
+
+	bnx2x_free_mem_bp(bp);
+
+	free_netdev(dev);
+
+	if (atomic_read(&pdev->enable_cnt) == 1)
+		pci_release_regions(pdev);
+
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
+{
+	int i;
+
+	bp->state = BNX2X_STATE_ERROR;
+
+	bp->rx_mode = BNX2X_RX_MODE_NONE;
+
+#ifdef BCM_CNIC
+	bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
+#endif
+	/* Stop Tx */
+	bnx2x_tx_disable(bp);
+
+	bnx2x_netif_stop(bp, 0);
+
+	del_timer_sync(&bp->timer);
+
+	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+
+	/* Release IRQs */
+	bnx2x_free_irq(bp);
+
+	/* Free SKBs, SGEs, TPA pool and driver internals */
+	bnx2x_free_skbs(bp);
+
+	for_each_rx_queue(bp, i)
+		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
+
+	bnx2x_free_mem(bp);
+
+	bp->state = BNX2X_STATE_CLOSED;
+
+	netif_carrier_off(bp->dev);
+
+	return 0;
+}
+
+static void bnx2x_eeh_recover(struct bnx2x *bp)
+{
+	u32 val;
+
+	mutex_init(&bp->port.phy_mutex);
+
+	bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
+	bp->link_params.shmem_base = bp->common.shmem_base;
+	BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
+
+	if (!bp->common.shmem_base ||
+	    (bp->common.shmem_base < 0xA0000) ||
+	    (bp->common.shmem_base >= 0xC0000)) {
+		BNX2X_DEV_INFO("MCP not active\n");
+		bp->flags |= NO_MCP_FLAG;
+		return;
+	}
+
+	val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
+	if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+		!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
+		BNX2X_ERR("BAD MCP validity signature\n");
+
+	if (!BP_NOMCP(bp)) {
+		bp->fw_seq =
+		    (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+		    DRV_MSG_SEQ_NUMBER_MASK);
+		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+	}
+}
+
+/**
+ * bnx2x_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
+						pci_channel_state_t state)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp = netdev_priv(dev);
+
+	rtnl_lock();
+
+	netif_device_detach(dev);
+
+	if (state == pci_channel_io_perm_failure) {
+		rtnl_unlock();
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	if (netif_running(dev))
+		bnx2x_eeh_nic_unload(bp);
+
+	pci_disable_device(pdev);
+
+	rtnl_unlock();
+
+	/* Request a slot reset */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * bnx2x_io_slot_reset - called after the PCI bus has been reset
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ */
+static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp = netdev_priv(dev);
+
+	rtnl_lock();
+
+	if (pci_enable_device(pdev)) {
+		dev_err(&pdev->dev,
+			"Cannot re-enable PCI device after reset\n");
+		rtnl_unlock();
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+
+	pci_set_master(pdev);
+	pci_restore_state(pdev);
+
+	if (netif_running(dev))
+		bnx2x_set_power_state(bp, PCI_D0);
+
+	rtnl_unlock();
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * bnx2x_io_resume - called when traffic can start flowing again
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation.
+ */
+static void bnx2x_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct bnx2x *bp = netdev_priv(dev);
+
+	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
+		netdev_err(bp->dev, "Handling parity error recovery. "
+				    "Try again later\n");
+		return;
+	}
+
+	rtnl_lock();
+
+	bnx2x_eeh_recover(bp);
+
+	if (netif_running(dev))
+		bnx2x_nic_load(bp, LOAD_NORMAL);
+
+	netif_device_attach(dev);
+
+	rtnl_unlock();
+}
+
+static struct pci_error_handlers bnx2x_err_handler = {
+	.error_detected = bnx2x_io_error_detected,
+	.slot_reset     = bnx2x_io_slot_reset,
+	.resume         = bnx2x_io_resume,
+};
+
+static struct pci_driver bnx2x_pci_driver = {
+	.name        = DRV_MODULE_NAME,
+	.id_table    = bnx2x_pci_tbl,
+	.probe       = bnx2x_init_one,
+	.remove      = __devexit_p(bnx2x_remove_one),
+	.suspend     = bnx2x_suspend,
+	.resume      = bnx2x_resume,
+	.err_handler = &bnx2x_err_handler,
+};
+
+static int __init bnx2x_init(void)
+{
+	int ret;
+
+	pr_info("%s", version);
+
+	bnx2x_wq = create_singlethread_workqueue("bnx2x");
+	if (bnx2x_wq == NULL) {
+		pr_err("Cannot create workqueue\n");
+		return -ENOMEM;
+	}
+
+	ret = pci_register_driver(&bnx2x_pci_driver);
+	if (ret) {
+		pr_err("Cannot register driver\n");
+		destroy_workqueue(bnx2x_wq);
+	}
+	return ret;
+}
+
+static void __exit bnx2x_cleanup(void)
+{
+	pci_unregister_driver(&bnx2x_pci_driver);
+
+	destroy_workqueue(bnx2x_wq);
+}
+
+void bnx2x_notify_link_changed(struct bnx2x *bp)
+{
+	REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
+}
+
+module_init(bnx2x_init);
+module_exit(bnx2x_cleanup);
+
+#ifdef BCM_CNIC
+/**
+ * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
+ *
+ * @bp:		driver handle
+ * @set:	set or clear the CAM entry
+ *
+ * This function will wait until the ramdord completion returns.
+ * Return 0 if success, -ENODEV if ramrod doesn't return.
+ */
+static inline int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
+{
+	unsigned long ramrod_flags = 0;
+
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
+				 &bp->iscsi_l2_mac_obj, true,
+				 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
+}
+
+/* count denotes the number of new completions we have seen */
+static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
+{
+	struct eth_spe *spe;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return;
+#endif
+
+	spin_lock_bh(&bp->spq_lock);
+	BUG_ON(bp->cnic_spq_pending < count);
+	bp->cnic_spq_pending -= count;
+
+
+	for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
+		u16 type =  (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
+				& SPE_HDR_CONN_TYPE) >>
+				SPE_HDR_CONN_TYPE_SHIFT;
+		u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
+				>> SPE_HDR_CMD_ID_SHIFT) & 0xff;
+
+		/* Set validation for iSCSI L2 client before sending SETUP
+		 *  ramrod
+		 */
+		if (type == ETH_CONNECTION_TYPE) {
+			if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
+				bnx2x_set_ctx_validation(bp, &bp->context.
+					vcxt[BNX2X_ISCSI_ETH_CID].eth,
+					BNX2X_ISCSI_ETH_CID);
+		}
+
+		/*
+		 * There may be not more than 8 L2, not more than 8 L5 SPEs
+		 * and in the air. We also check that number of outstanding
+		 * COMMON ramrods is not more than the EQ and SPQ can
+		 * accommodate.
+		 */
+		if (type == ETH_CONNECTION_TYPE) {
+			if (!atomic_read(&bp->cq_spq_left))
+				break;
+			else
+				atomic_dec(&bp->cq_spq_left);
+		} else if (type == NONE_CONNECTION_TYPE) {
+			if (!atomic_read(&bp->eq_spq_left))
+				break;
+			else
+				atomic_dec(&bp->eq_spq_left);
+		} else if ((type == ISCSI_CONNECTION_TYPE) ||
+			   (type == FCOE_CONNECTION_TYPE)) {
+			if (bp->cnic_spq_pending >=
+			    bp->cnic_eth_dev.max_kwqe_pending)
+				break;
+			else
+				bp->cnic_spq_pending++;
+		} else {
+			BNX2X_ERR("Unknown SPE type: %d\n", type);
+			bnx2x_panic();
+			break;
+		}
+
+		spe = bnx2x_sp_get_next(bp);
+		*spe = *bp->cnic_kwq_cons;
+
+		DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
+		   bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
+
+		if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
+			bp->cnic_kwq_cons = bp->cnic_kwq;
+		else
+			bp->cnic_kwq_cons++;
+	}
+	bnx2x_sp_prod_update(bp);
+	spin_unlock_bh(&bp->spq_lock);
+}
+
+static int bnx2x_cnic_sp_queue(struct net_device *dev,
+			       struct kwqe_16 *kwqes[], u32 count)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int i;
+
+#ifdef BNX2X_STOP_ON_ERROR
+	if (unlikely(bp->panic))
+		return -EIO;
+#endif
+
+	spin_lock_bh(&bp->spq_lock);
+
+	for (i = 0; i < count; i++) {
+		struct eth_spe *spe = (struct eth_spe *)kwqes[i];
+
+		if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
+			break;
+
+		*bp->cnic_kwq_prod = *spe;
+
+		bp->cnic_kwq_pending++;
+
+		DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
+		   spe->hdr.conn_and_cmd_data, spe->hdr.type,
+		   spe->data.update_data_addr.hi,
+		   spe->data.update_data_addr.lo,
+		   bp->cnic_kwq_pending);
+
+		if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
+			bp->cnic_kwq_prod = bp->cnic_kwq;
+		else
+			bp->cnic_kwq_prod++;
+	}
+
+	spin_unlock_bh(&bp->spq_lock);
+
+	if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
+		bnx2x_cnic_sp_post(bp, 0);
+
+	return i;
+}
+
+static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+	struct cnic_ops *c_ops;
+	int rc = 0;
+
+	mutex_lock(&bp->cnic_mutex);
+	c_ops = rcu_dereference_protected(bp->cnic_ops,
+					  lockdep_is_held(&bp->cnic_mutex));
+	if (c_ops)
+		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+	mutex_unlock(&bp->cnic_mutex);
+
+	return rc;
+}
+
+static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
+{
+	struct cnic_ops *c_ops;
+	int rc = 0;
+
+	rcu_read_lock();
+	c_ops = rcu_dereference(bp->cnic_ops);
+	if (c_ops)
+		rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
+	rcu_read_unlock();
+
+	return rc;
+}
+
+/*
+ * for commands that have no data
+ */
+int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
+{
+	struct cnic_ctl_info ctl = {0};
+
+	ctl.cmd = cmd;
+
+	return bnx2x_cnic_ctl_send(bp, &ctl);
+}
+
+static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
+{
+	struct cnic_ctl_info ctl = {0};
+
+	/* first we tell CNIC and only then we count this as a completion */
+	ctl.cmd = CNIC_CTL_COMPLETION_CMD;
+	ctl.data.comp.cid = cid;
+	ctl.data.comp.error = err;
+
+	bnx2x_cnic_ctl_send_bh(bp, &ctl);
+	bnx2x_cnic_sp_post(bp, 0);
+}
+
+
+/* Called with netif_addr_lock_bh() taken.
+ * Sets an rx_mode config for an iSCSI ETH client.
+ * Doesn't block.
+ * Completion should be checked outside.
+ */
+static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
+{
+	unsigned long accept_flags = 0, ramrod_flags = 0;
+	u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
+	int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
+
+	if (start) {
+		/* Start accepting on iSCSI L2 ring. Accept all multicasts
+		 * because it's the only way for UIO Queue to accept
+		 * multicasts (in non-promiscuous mode only one Queue per
+		 * function will receive multicast packets (leading in our
+		 * case).
+		 */
+		__set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
+		__set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
+		__set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
+		__set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+		/* Clear STOP_PENDING bit if START is requested */
+		clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
+
+		sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
+	} else
+		/* Clear START_PENDING bit if STOP is requested */
+		clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
+
+	if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
+		set_bit(sched_state, &bp->sp_state);
+	else {
+		__set_bit(RAMROD_RX, &ramrod_flags);
+		bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
+				    ramrod_flags);
+	}
+}
+
+
+static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	int rc = 0;
+
+	switch (ctl->cmd) {
+	case DRV_CTL_CTXTBL_WR_CMD: {
+		u32 index = ctl->data.io.offset;
+		dma_addr_t addr = ctl->data.io.dma_addr;
+
+		bnx2x_ilt_wr(bp, index, addr);
+		break;
+	}
+
+	case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
+		int count = ctl->data.credit.credit_count;
+
+		bnx2x_cnic_sp_post(bp, count);
+		break;
+	}
+
+	/* rtnl_lock is held.  */
+	case DRV_CTL_START_L2_CMD: {
+		struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+		unsigned long sp_bits = 0;
+
+		/* Configure the iSCSI classification object */
+		bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
+				   cp->iscsi_l2_client_id,
+				   cp->iscsi_l2_cid, BP_FUNC(bp),
+				   bnx2x_sp(bp, mac_rdata),
+				   bnx2x_sp_mapping(bp, mac_rdata),
+				   BNX2X_FILTER_MAC_PENDING,
+				   &bp->sp_state, BNX2X_OBJ_TYPE_RX,
+				   &bp->macs_pool);
+
+		/* Set iSCSI MAC address */
+		rc = bnx2x_set_iscsi_eth_mac_addr(bp);
+		if (rc)
+			break;
+
+		mmiowb();
+		barrier();
+
+		/* Start accepting on iSCSI L2 ring */
+
+		netif_addr_lock_bh(dev);
+		bnx2x_set_iscsi_eth_rx_mode(bp, true);
+		netif_addr_unlock_bh(dev);
+
+		/* bits to wait on */
+		__set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+		__set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
+
+		if (!bnx2x_wait_sp_comp(bp, sp_bits))
+			BNX2X_ERR("rx_mode completion timed out!\n");
+
+		break;
+	}
+
+	/* rtnl_lock is held.  */
+	case DRV_CTL_STOP_L2_CMD: {
+		unsigned long sp_bits = 0;
+
+		/* Stop accepting on iSCSI L2 ring */
+		netif_addr_lock_bh(dev);
+		bnx2x_set_iscsi_eth_rx_mode(bp, false);
+		netif_addr_unlock_bh(dev);
+
+		/* bits to wait on */
+		__set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
+		__set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
+
+		if (!bnx2x_wait_sp_comp(bp, sp_bits))
+			BNX2X_ERR("rx_mode completion timed out!\n");
+
+		mmiowb();
+		barrier();
+
+		/* Unset iSCSI L2 MAC */
+		rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
+					BNX2X_ISCSI_ETH_MAC, true);
+		break;
+	}
+	case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
+		int count = ctl->data.credit.credit_count;
+
+		smp_mb__before_atomic_inc();
+		atomic_add(count, &bp->cq_spq_left);
+		smp_mb__after_atomic_inc();
+		break;
+	}
+
+	default:
+		BNX2X_ERR("unknown command %x\n", ctl->cmd);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
+{
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	if (bp->flags & USING_MSIX_FLAG) {
+		cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+		cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+		cp->irq_arr[0].vector = bp->msix_table[1].vector;
+	} else {
+		cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+		cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+	}
+	if (!CHIP_IS_E1x(bp))
+		cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
+	else
+		cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
+
+	cp->irq_arr[0].status_blk_num =  bnx2x_cnic_fw_sb_id(bp);
+	cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
+	cp->irq_arr[1].status_blk = bp->def_status_blk;
+	cp->irq_arr[1].status_blk_num = DEF_SB_ID;
+	cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
+
+	cp->num_irq = 2;
+}
+
+static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+			       void *data)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	if (ops == NULL)
+		return -EINVAL;
+
+	bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!bp->cnic_kwq)
+		return -ENOMEM;
+
+	bp->cnic_kwq_cons = bp->cnic_kwq;
+	bp->cnic_kwq_prod = bp->cnic_kwq;
+	bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
+
+	bp->cnic_spq_pending = 0;
+	bp->cnic_kwq_pending = 0;
+
+	bp->cnic_data = data;
+
+	cp->num_irq = 0;
+	cp->drv_state |= CNIC_DRV_STATE_REGD;
+	cp->iro_arr = bp->iro_arr;
+
+	bnx2x_setup_cnic_irq_info(bp);
+
+	rcu_assign_pointer(bp->cnic_ops, ops);
+
+	return 0;
+}
+
+static int bnx2x_unregister_cnic(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	mutex_lock(&bp->cnic_mutex);
+	cp->drv_state = 0;
+	rcu_assign_pointer(bp->cnic_ops, NULL);
+	mutex_unlock(&bp->cnic_mutex);
+	synchronize_rcu();
+	kfree(bp->cnic_kwq);
+	bp->cnic_kwq = NULL;
+
+	return 0;
+}
+
+struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
+{
+	struct bnx2x *bp = netdev_priv(dev);
+	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+	/* If both iSCSI and FCoE are disabled - return NULL in
+	 * order to indicate CNIC that it should not try to work
+	 * with this device.
+	 */
+	if (NO_ISCSI(bp) && NO_FCOE(bp))
+		return NULL;
+
+	cp->drv_owner = THIS_MODULE;
+	cp->chip_id = CHIP_ID(bp);
+	cp->pdev = bp->pdev;
+	cp->io_base = bp->regview;
+	cp->io_base2 = bp->doorbells;
+	cp->max_kwqe_pending = 8;
+	cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
+	cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
+			     bnx2x_cid_ilt_lines(bp);
+	cp->ctx_tbl_len = CNIC_ILT_LINES;
+	cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
+	cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
+	cp->drv_ctl = bnx2x_drv_ctl;
+	cp->drv_register_cnic = bnx2x_register_cnic;
+	cp->drv_unregister_cnic = bnx2x_unregister_cnic;
+	cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
+	cp->iscsi_l2_client_id =
+		bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
+	cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
+
+	if (NO_ISCSI_OOO(bp))
+		cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
+
+	if (NO_ISCSI(bp))
+		cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
+
+	if (NO_FCOE(bp))
+		cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
+
+	DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
+			 "starting cid %d\n",
+	   cp->ctx_blk_size,
+	   cp->ctx_tbl_offset,
+	   cp->ctx_tbl_len,
+	   cp->starting_cid);
+	return cp;
+}
+EXPORT_SYMBOL(bnx2x_cnic_probe);
+
+#endif /* BCM_CNIC */
+
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
new file mode 100644
index 0000000..27b5ecb
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -0,0 +1,7146 @@
+/* bnx2x_reg.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * The registers description starts with the register Access type followed
+ * by size in bits. For example [RW 32]. The access types are:
+ * R  - Read only
+ * RC - Clear on read
+ * RW - Read/Write
+ * ST - Statistics register (clear on read)
+ * W  - Write only
+ * WB - Wide bus register - the size is over 32 bits and it should be
+ *      read/write in consecutive 32 bits accesses
+ * WR - Write Clear (write 1 to clear the bit)
+ *
+ */
+#ifndef BNX2X_REG_H
+#define BNX2X_REG_H
+
+#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR			 (0x1<<0)
+#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS		 (0x1<<2)
+#define ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU		 (0x1<<5)
+#define ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT		 (0x1<<3)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR			 (0x1<<4)
+#define ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND		 (0x1<<1)
+/* [RW 1] Initiate the ATC array - reset all the valid bits */
+#define ATC_REG_ATC_INIT_ARRAY					 0x1100b8
+/* [R 1] ATC initalization done */
+#define ATC_REG_ATC_INIT_DONE					 0x1100bc
+/* [RC 6] Interrupt register #0 read clear */
+#define ATC_REG_ATC_INT_STS_CLR					 0x1101c0
+/* [RW 5] Parity mask register #0 read/write */
+#define ATC_REG_ATC_PRTY_MASK					 0x1101d8
+/* [RC 5] Parity register #0 read clear */
+#define ATC_REG_ATC_PRTY_STS_CLR				 0x1101d0
+/* [RW 19] Interrupt mask register #0 read/write */
+#define BRB1_REG_BRB1_INT_MASK					 0x60128
+/* [R 19] Interrupt register #0 read */
+#define BRB1_REG_BRB1_INT_STS					 0x6011c
+/* [RW 4] Parity mask register #0 read/write */
+#define BRB1_REG_BRB1_PRTY_MASK 				 0x60138
+/* [R 4] Parity register #0 read */
+#define BRB1_REG_BRB1_PRTY_STS					 0x6012c
+/* [RC 4] Parity register #0 read clear */
+#define BRB1_REG_BRB1_PRTY_STS_CLR				 0x60130
+/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
+ * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
+ * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
+ * following reset the first rbc access to this reg must be write; there can
+ * be no more rbc writes after the first one; there can be any number of rbc
+ * read following the first write; rbc access not following these rules will
+ * result in hang condition. */
+#define BRB1_REG_FREE_LIST_PRS_CRDT				 0x60200
+/* [RW 10] The number of free blocks below which the full signal to class 0
+ * is asserted */
+#define BRB1_REG_FULL_0_XOFF_THRESHOLD_0			 0x601d0
+#define BRB1_REG_FULL_0_XOFF_THRESHOLD_1			 0x60230
+/* [RW 11] The number of free blocks above which the full signal to class 0
+ * is de-asserted */
+#define BRB1_REG_FULL_0_XON_THRESHOLD_0				 0x601d4
+#define BRB1_REG_FULL_0_XON_THRESHOLD_1				 0x60234
+/* [RW 11] The number of free blocks below which the full signal to class 1
+ * is asserted */
+#define BRB1_REG_FULL_1_XOFF_THRESHOLD_0			 0x601d8
+#define BRB1_REG_FULL_1_XOFF_THRESHOLD_1			 0x60238
+/* [RW 11] The number of free blocks above which the full signal to class 1
+ * is de-asserted */
+#define BRB1_REG_FULL_1_XON_THRESHOLD_0				 0x601dc
+#define BRB1_REG_FULL_1_XON_THRESHOLD_1				 0x6023c
+/* [RW 11] The number of free blocks below which the full signal to the LB
+ * port is asserted */
+#define BRB1_REG_FULL_LB_XOFF_THRESHOLD				 0x601e0
+/* [RW 10] The number of free blocks above which the full signal to the LB
+ * port is de-asserted */
+#define BRB1_REG_FULL_LB_XON_THRESHOLD				 0x601e4
+/* [RW 10] The number of free blocks above which the High_llfc signal to
+   interface #n is de-asserted. */
+#define BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD_0			 0x6014c
+/* [RW 10] The number of free blocks below which the High_llfc signal to
+   interface #n is asserted. */
+#define BRB1_REG_HIGH_LLFC_LOW_THRESHOLD_0			 0x6013c
+/* [RW 11] The number of blocks guarantied for the LB port */
+#define BRB1_REG_LB_GUARANTIED					 0x601ec
+/* [RW 11] The hysteresis on the guarantied buffer space for the Lb port
+ * before signaling XON. */
+#define BRB1_REG_LB_GUARANTIED_HYST				 0x60264
+/* [RW 24] LL RAM data. */
+#define BRB1_REG_LL_RAM						 0x61000
+/* [RW 10] The number of free blocks above which the Low_llfc signal to
+   interface #n is de-asserted. */
+#define BRB1_REG_LOW_LLFC_HIGH_THRESHOLD_0			 0x6016c
+/* [RW 10] The number of free blocks below which the Low_llfc signal to
+   interface #n is asserted. */
+#define BRB1_REG_LOW_LLFC_LOW_THRESHOLD_0			 0x6015c
+/* [RW 11] The number of blocks guarantied for class 0 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED			 0x60244
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST			 0x60254
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 0. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED			 0x60248
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1in MAC 0
+ * before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST			 0x60258
+/* [RW 11] The number of blocks guarantied for class 0in MAC1.The register
+ * is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED			 0x6024c
+/* [RW 11] The hysteresis on the guarantied buffer space for class 0 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST			 0x6025c
+/* [RW 11] The number of blocks guarantied for class 1 in MAC 1. The
+ * register is applicable only when per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED			 0x60250
+/* [RW 11] The hysteresis on the guarantied buffer space for class 1 in MAC
+ * 1 before signaling XON. The register is applicable only when
+ * per_class_guaranty_mode is set. */
+#define BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST			 0x60260
+/* [RW 11] The number of blocks guarantied for the MAC port. The register is
+ * applicable only when per_class_guaranty_mode is reset. */
+#define BRB1_REG_MAC_GUARANTIED_0				 0x601e8
+#define BRB1_REG_MAC_GUARANTIED_1				 0x60240
+/* [R 24] The number of full blocks. */
+#define BRB1_REG_NUM_OF_FULL_BLOCKS				 0x60090
+/* [ST 32] The number of cycles that the write_full signal towards MAC #0
+   was asserted. */
+#define BRB1_REG_NUM_OF_FULL_CYCLES_0				 0x600c8
+#define BRB1_REG_NUM_OF_FULL_CYCLES_1				 0x600cc
+#define BRB1_REG_NUM_OF_FULL_CYCLES_4				 0x600d8
+/* [ST 32] The number of cycles that the pause signal towards MAC #0 was
+   asserted. */
+#define BRB1_REG_NUM_OF_PAUSE_CYCLES_0				 0x600b8
+#define BRB1_REG_NUM_OF_PAUSE_CYCLES_1				 0x600bc
+/* [RW 10] The number of free blocks below which the pause signal to class 0
+ * is asserted */
+#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0			 0x601c0
+#define BRB1_REG_PAUSE_0_XOFF_THRESHOLD_1			 0x60220
+/* [RW 11] The number of free blocks above which the pause signal to class 0
+ * is de-asserted */
+#define BRB1_REG_PAUSE_0_XON_THRESHOLD_0			 0x601c4
+#define BRB1_REG_PAUSE_0_XON_THRESHOLD_1			 0x60224
+/* [RW 11] The number of free blocks below which the pause signal to class 1
+ * is asserted */
+#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0			 0x601c8
+#define BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1			 0x60228
+/* [RW 11] The number of free blocks above which the pause signal to class 1
+ * is de-asserted */
+#define BRB1_REG_PAUSE_1_XON_THRESHOLD_0			 0x601cc
+#define BRB1_REG_PAUSE_1_XON_THRESHOLD_1			 0x6022c
+/* [RW 10] Write client 0: De-assert pause threshold. Not Functional */
+#define BRB1_REG_PAUSE_HIGH_THRESHOLD_0 			 0x60078
+#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 			 0x6007c
+/* [RW 10] Write client 0: Assert pause threshold. */
+#define BRB1_REG_PAUSE_LOW_THRESHOLD_0				 0x60068
+#define BRB1_REG_PAUSE_LOW_THRESHOLD_1				 0x6006c
+/* [R 24] The number of full blocks occupied by port. */
+#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0				 0x60094
+/* [RW 1] Reset the design by software. */
+#define BRB1_REG_SOFT_RESET					 0x600dc
+/* [R 5] Used to read the value of the XX protection CAM occupancy counter. */
+#define CCM_REG_CAM_OCCUP					 0xd0188
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define CCM_REG_CCM_CFC_IFEN					 0xd003c
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define CCM_REG_CCM_CQM_IFEN					 0xd000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID.
+   Otherwise 0 is inserted. */
+#define CCM_REG_CCM_CQM_USE_Q					 0xd00c0
+/* [RW 11] Interrupt mask register #0 read/write */
+#define CCM_REG_CCM_INT_MASK					 0xd01e4
+/* [R 11] Interrupt register #0 read */
+#define CCM_REG_CCM_INT_STS					 0xd01d8
+/* [RW 27] Parity mask register #0 read/write */
+#define CCM_REG_CCM_PRTY_MASK					 0xd01f4
+/* [R 27] Parity register #0 read */
+#define CCM_REG_CCM_PRTY_STS					 0xd01e8
+/* [RC 27] Parity register #0 read clear */
+#define CCM_REG_CCM_PRTY_STS_CLR				 0xd01ec
+/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
+   REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+   Is used to determine the number of the AG context REG-pairs written back;
+   when the input message Reg1WbFlg isn't set. */
+#define CCM_REG_CCM_REG0_SZ					 0xd00c4
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define CCM_REG_CCM_STORM0_IFEN 				 0xd0004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define CCM_REG_CCM_STORM1_IFEN 				 0xd0008
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define CCM_REG_CDU_AG_RD_IFEN					 0xd0030
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+   are disregarded; all other signals are treated as usual; if 1 - normal
+   activity. */
+#define CCM_REG_CDU_AG_WR_IFEN					 0xd002c
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define CCM_REG_CDU_SM_RD_IFEN					 0xd0038
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+   input is disregarded; all other signals are treated as usual; if 1 -
+   normal activity. */
+#define CCM_REG_CDU_SM_WR_IFEN					 0xd0034
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 1 at start-up. */
+#define CCM_REG_CFC_INIT_CRD					 0xd0204
+/* [RW 2] Auxiliary counter flag Q number 1. */
+#define CCM_REG_CNT_AUX1_Q					 0xd00c8
+/* [RW 2] Auxiliary counter flag Q number 2. */
+#define CCM_REG_CNT_AUX2_Q					 0xd00cc
+/* [RW 28] The CM header value for QM request (primary). */
+#define CCM_REG_CQM_CCM_HDR_P					 0xd008c
+/* [RW 28] The CM header value for QM request (secondary). */
+#define CCM_REG_CQM_CCM_HDR_S					 0xd0090
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define CCM_REG_CQM_CCM_IFEN					 0xd0014
+/* [RW 6] QM output initial credit. Max credit available - 32. Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 32 at start-up. */
+#define CCM_REG_CQM_INIT_CRD					 0xd020c
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CQM_P_WEIGHT					 0xd00b8
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CQM_S_WEIGHT					 0xd00bc
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define CCM_REG_CSDM_IFEN					 0xd0018
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the SDM interface is detected. */
+#define CCM_REG_CSDM_LENGTH_MIS 				 0xd0170
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_CSDM_WEIGHT					 0xd00b4
+/* [RW 28] The CM header for QM formatting in case of an error in the QM
+   inputs. */
+#define CCM_REG_ERR_CCM_HDR					 0xd0094
+/* [RW 8] The Event ID in case the input message ErrorFlg is set. */
+#define CCM_REG_ERR_EVNT_ID					 0xd0098
+/* [RW 8] FIC0 output initial credit. Max credit available - 255. Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define CCM_REG_FIC0_INIT_CRD					 0xd0210
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define CCM_REG_FIC1_INIT_CRD					 0xd0214
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+   - strict priority defined by ~ccm_registers_gr_ag_pr.gr_ag_pr;
+   ~ccm_registers_gr_ld0_pr.gr_ld0_pr and
+   ~ccm_registers_gr_ld1_pr.gr_ld1_pr. Groups are according to channels and
+   outputs to STORM: aggregation; load FIC0; load FIC1 and store. */
+#define CCM_REG_GR_ARB_TYPE					 0xd015c
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed; that the Store channel priority is
+   the compliment to 4 of the rest priorities - Aggregation channel; Load
+   (FIC0) channel and Load (FIC1). */
+#define CCM_REG_GR_LD0_PR					 0xd0164
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed; that the Store channel priority is
+   the compliment to 4 of the rest priorities - Aggregation channel; Load
+   (FIC0) channel and Load (FIC1). */
+#define CCM_REG_GR_LD1_PR					 0xd0168
+/* [RW 2] General flags index. */
+#define CCM_REG_INV_DONE_Q					 0xd0108
+/* [RW 4] The number of double REG-pairs(128 bits); loaded from the STORM
+   context and sent to STORM; for a specific connection type. The double
+   REG-pairs are used in order to align to STORM context row size of 128
+   bits. The offset of these data in the STORM context is always 0. Index
+   _(0..15) stands for the connection type (one of 16). */
+#define CCM_REG_N_SM_CTX_LD_0					 0xd004c
+#define CCM_REG_N_SM_CTX_LD_1					 0xd0050
+#define CCM_REG_N_SM_CTX_LD_2					 0xd0054
+#define CCM_REG_N_SM_CTX_LD_3					 0xd0058
+#define CCM_REG_N_SM_CTX_LD_4					 0xd005c
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define CCM_REG_PBF_IFEN					 0xd0028
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the pbf interface is detected. */
+#define CCM_REG_PBF_LENGTH_MIS					 0xd0180
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_PBF_WEIGHT					 0xd00ac
+#define CCM_REG_PHYS_QNUM1_0					 0xd0134
+#define CCM_REG_PHYS_QNUM1_1					 0xd0138
+#define CCM_REG_PHYS_QNUM2_0					 0xd013c
+#define CCM_REG_PHYS_QNUM2_1					 0xd0140
+#define CCM_REG_PHYS_QNUM3_0					 0xd0144
+#define CCM_REG_PHYS_QNUM3_1					 0xd0148
+#define CCM_REG_QOS_PHYS_QNUM0_0				 0xd0114
+#define CCM_REG_QOS_PHYS_QNUM0_1				 0xd0118
+#define CCM_REG_QOS_PHYS_QNUM1_0				 0xd011c
+#define CCM_REG_QOS_PHYS_QNUM1_1				 0xd0120
+#define CCM_REG_QOS_PHYS_QNUM2_0				 0xd0124
+#define CCM_REG_QOS_PHYS_QNUM2_1				 0xd0128
+#define CCM_REG_QOS_PHYS_QNUM3_0				 0xd012c
+#define CCM_REG_QOS_PHYS_QNUM3_1				 0xd0130
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define CCM_REG_STORM_CCM_IFEN					 0xd0010
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the STORM interface is detected. */
+#define CCM_REG_STORM_LENGTH_MIS				 0xd016c
+/* [RW 3] The weight of the STORM input in the WRR (Weighted Round robin)
+   mechanism. 0 stands for weight 8 (the most prioritised); 1 stands for
+   weight 1(least prioritised); 2 stands for weight 2 (more prioritised);
+   tc. */
+#define CCM_REG_STORM_WEIGHT					 0xd009c
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define CCM_REG_TSEM_IFEN					 0xd001c
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the tsem interface is detected. */
+#define CCM_REG_TSEM_LENGTH_MIS 				 0xd0174
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_TSEM_WEIGHT					 0xd00a0
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define CCM_REG_USEM_IFEN					 0xd0024
+/* [RC 1] Set when message length mismatch (relative to last indication) at
+   the usem interface is detected. */
+#define CCM_REG_USEM_LENGTH_MIS 				 0xd017c
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_USEM_WEIGHT					 0xd00a8
+/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define CCM_REG_XSEM_IFEN					 0xd0020
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the xsem interface is detected. */
+#define CCM_REG_XSEM_LENGTH_MIS 				 0xd0178
+/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define CCM_REG_XSEM_WEIGHT					 0xd00a4
+/* [RW 19] Indirect access to the descriptor table of the XX protection
+   mechanism. The fields are: [5:0] - message length; [12:6] - message
+   pointer; 18:13] - next pointer. */
+#define CCM_REG_XX_DESCR_TABLE					 0xd0300
+#define CCM_REG_XX_DESCR_TABLE_SIZE				 24
+/* [R 7] Used to read the value of XX protection Free counter. */
+#define CCM_REG_XX_FREE 					 0xd0184
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+   of the Input Stage XX protection buffer by the XX protection pending
+   messages. Max credit available - 127. Write writes the initial credit
+   value; read returns the current value of the credit counter. Must be
+   initialized to maximum XX protected message size - 2 at start-up. */
+#define CCM_REG_XX_INIT_CRD					 0xd0220
+/* [RW 7] The maximum number of pending messages; which may be stored in XX
+   protection. At read the ~ccm_registers_xx_free.xx_free counter is read.
+   At write comprises the start value of the ~ccm_registers_xx_free.xx_free
+   counter. */
+#define CCM_REG_XX_MSG_NUM					 0xd0224
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define CCM_REG_XX_OVFL_EVNT_ID 				 0xd0044
+/* [RW 18] Indirect access to the XX table of the XX protection mechanism.
+   The fields are: [5:0] - tail pointer; 11:6] - Link List size; 17:12] -
+   header pointer. */
+#define CCM_REG_XX_TABLE					 0xd0280
+#define CDU_REG_CDU_CHK_MASK0					 0x101000
+#define CDU_REG_CDU_CHK_MASK1					 0x101004
+#define CDU_REG_CDU_CONTROL0					 0x101008
+#define CDU_REG_CDU_DEBUG					 0x101010
+#define CDU_REG_CDU_GLOBAL_PARAMS				 0x101020
+/* [RW 7] Interrupt mask register #0 read/write */
+#define CDU_REG_CDU_INT_MASK					 0x10103c
+/* [R 7] Interrupt register #0 read */
+#define CDU_REG_CDU_INT_STS					 0x101030
+/* [RW 5] Parity mask register #0 read/write */
+#define CDU_REG_CDU_PRTY_MASK					 0x10104c
+/* [R 5] Parity register #0 read */
+#define CDU_REG_CDU_PRTY_STS					 0x101040
+/* [RC 5] Parity register #0 read clear */
+#define CDU_REG_CDU_PRTY_STS_CLR				 0x101044
+/* [RC 32] logging of error data in case of a CDU load error:
+   {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
+   ype_error; ctual_active; ctual_compressed_context}; */
+#define CDU_REG_ERROR_DATA					 0x101014
+/* [WB 216] L1TT ram access. each entry has the following format :
+   {mrege_regions[7:0]; ffset12[5:0]...offset0[5:0];
+   ength12[5:0]...length0[5:0]; d12[3:0]...id0[3:0]} */
+#define CDU_REG_L1TT						 0x101800
+/* [WB 24] MATT ram access. each entry has the following
+   format:{RegionLength[11:0]; egionOffset[11:0]} */
+#define CDU_REG_MATT						 0x101100
+/* [RW 1] when this bit is set the CDU operates in e1hmf mode */
+#define CDU_REG_MF_MODE 					 0x101050
+/* [R 1] indication the initializing the activity counter by the hardware
+   was done. */
+#define CFC_REG_AC_INIT_DONE					 0x104078
+/* [RW 13] activity counter ram access */
+#define CFC_REG_ACTIVITY_COUNTER				 0x104400
+#define CFC_REG_ACTIVITY_COUNTER_SIZE				 256
+/* [R 1] indication the initializing the cams by the hardware was done. */
+#define CFC_REG_CAM_INIT_DONE					 0x10407c
+/* [RW 2] Interrupt mask register #0 read/write */
+#define CFC_REG_CFC_INT_MASK					 0x104108
+/* [R 2] Interrupt register #0 read */
+#define CFC_REG_CFC_INT_STS					 0x1040fc
+/* [RC 2] Interrupt register #0 read clear */
+#define CFC_REG_CFC_INT_STS_CLR 				 0x104100
+/* [RW 4] Parity mask register #0 read/write */
+#define CFC_REG_CFC_PRTY_MASK					 0x104118
+/* [R 4] Parity register #0 read */
+#define CFC_REG_CFC_PRTY_STS					 0x10410c
+/* [RC 4] Parity register #0 read clear */
+#define CFC_REG_CFC_PRTY_STS_CLR				 0x104110
+/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
+#define CFC_REG_CID_CAM 					 0x104800
+#define CFC_REG_CONTROL0					 0x104028
+#define CFC_REG_DEBUG0						 0x104050
+/* [RW 14] indicates per error (in #cfc_registers_cfc_error_vector.cfc_error
+   vector) whether the cfc should be disabled upon it */
+#define CFC_REG_DISABLE_ON_ERROR				 0x104044
+/* [RC 14] CFC error vector. when the CFC detects an internal error it will
+   set one of these bits. the bit description can be found in CFC
+   specifications */
+#define CFC_REG_ERROR_VECTOR					 0x10403c
+/* [WB 93] LCID info ram access */
+#define CFC_REG_INFO_RAM					 0x105000
+#define CFC_REG_INFO_RAM_SIZE					 1024
+#define CFC_REG_INIT_REG					 0x10404c
+#define CFC_REG_INTERFACES					 0x104058
+/* [RW 24] {weight_load_client7[2:0] to weight_load_client0[2:0]}. this
+   field allows changing the priorities of the weighted-round-robin arbiter
+   which selects which CFC load client should be served next */
+#define CFC_REG_LCREQ_WEIGHTS					 0x104084
+/* [RW 16] Link List ram access; data = {prev_lcid; ext_lcid} */
+#define CFC_REG_LINK_LIST					 0x104c00
+#define CFC_REG_LINK_LIST_SIZE					 256
+/* [R 1] indication the initializing the link list by the hardware was done. */
+#define CFC_REG_LL_INIT_DONE					 0x104074
+/* [R 9] Number of allocated LCIDs which are at empty state */
+#define CFC_REG_NUM_LCIDS_ALLOC 				 0x104020
+/* [R 9] Number of Arriving LCIDs in Link List Block */
+#define CFC_REG_NUM_LCIDS_ARRIVING				 0x104004
+#define CFC_REG_NUM_LCIDS_INSIDE_PF				 0x104120
+/* [R 9] Number of Leaving LCIDs in Link List Block */
+#define CFC_REG_NUM_LCIDS_LEAVING				 0x104018
+#define CFC_REG_WEAK_ENABLE_PF					 0x104124
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define CSDM_REG_AGG_INT_EVENT_0				 0xc2038
+#define CSDM_REG_AGG_INT_EVENT_10				 0xc2060
+#define CSDM_REG_AGG_INT_EVENT_11				 0xc2064
+#define CSDM_REG_AGG_INT_EVENT_12				 0xc2068
+#define CSDM_REG_AGG_INT_EVENT_13				 0xc206c
+#define CSDM_REG_AGG_INT_EVENT_14				 0xc2070
+#define CSDM_REG_AGG_INT_EVENT_15				 0xc2074
+#define CSDM_REG_AGG_INT_EVENT_16				 0xc2078
+#define CSDM_REG_AGG_INT_EVENT_2				 0xc2040
+#define CSDM_REG_AGG_INT_EVENT_3				 0xc2044
+#define CSDM_REG_AGG_INT_EVENT_4				 0xc2048
+#define CSDM_REG_AGG_INT_EVENT_5				 0xc204c
+#define CSDM_REG_AGG_INT_EVENT_6				 0xc2050
+#define CSDM_REG_AGG_INT_EVENT_7				 0xc2054
+#define CSDM_REG_AGG_INT_EVENT_8				 0xc2058
+#define CSDM_REG_AGG_INT_EVENT_9				 0xc205c
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+   or auto-mask-mode (1) */
+#define CSDM_REG_AGG_INT_MODE_10				 0xc21e0
+#define CSDM_REG_AGG_INT_MODE_11				 0xc21e4
+#define CSDM_REG_AGG_INT_MODE_12				 0xc21e8
+#define CSDM_REG_AGG_INT_MODE_13				 0xc21ec
+#define CSDM_REG_AGG_INT_MODE_14				 0xc21f0
+#define CSDM_REG_AGG_INT_MODE_15				 0xc21f4
+#define CSDM_REG_AGG_INT_MODE_16				 0xc21f8
+#define CSDM_REG_AGG_INT_MODE_6 				 0xc21d0
+#define CSDM_REG_AGG_INT_MODE_7 				 0xc21d4
+#define CSDM_REG_AGG_INT_MODE_8 				 0xc21d8
+#define CSDM_REG_AGG_INT_MODE_9 				 0xc21dc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define CSDM_REG_CFC_RSP_START_ADDR				 0xc2008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define CSDM_REG_CMP_COUNTER_MAX0				 0xc201c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define CSDM_REG_CMP_COUNTER_MAX1				 0xc2020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define CSDM_REG_CMP_COUNTER_MAX2				 0xc2024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define CSDM_REG_CMP_COUNTER_MAX3				 0xc2028
+/* [RW 13] The start address in the internal RAM for the completion
+   counters. */
+#define CSDM_REG_CMP_COUNTER_START_ADDR 			 0xc200c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define CSDM_REG_CSDM_INT_MASK_0				 0xc229c
+#define CSDM_REG_CSDM_INT_MASK_1				 0xc22ac
+/* [R 32] Interrupt register #0 read */
+#define CSDM_REG_CSDM_INT_STS_0 				 0xc2290
+#define CSDM_REG_CSDM_INT_STS_1 				 0xc22a0
+/* [RW 11] Parity mask register #0 read/write */
+#define CSDM_REG_CSDM_PRTY_MASK 				 0xc22bc
+/* [R 11] Parity register #0 read */
+#define CSDM_REG_CSDM_PRTY_STS					 0xc22b0
+/* [RC 11] Parity register #0 read clear */
+#define CSDM_REG_CSDM_PRTY_STS_CLR				 0xc22b4
+#define CSDM_REG_ENABLE_IN1					 0xc2238
+#define CSDM_REG_ENABLE_IN2					 0xc223c
+#define CSDM_REG_ENABLE_OUT1					 0xc2240
+#define CSDM_REG_ENABLE_OUT2					 0xc2244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+   interface without receiving any ACK. */
+#define CSDM_REG_INIT_CREDIT_PXP_CTRL				 0xc24bc
+/* [ST 32] The number of ACK after placement messages received */
+#define CSDM_REG_NUM_OF_ACK_AFTER_PLACE 			 0xc227c
+/* [ST 32] The number of packet end messages received from the parser */
+#define CSDM_REG_NUM_OF_PKT_END_MSG				 0xc2274
+/* [ST 32] The number of requests received from the pxp async if */
+#define CSDM_REG_NUM_OF_PXP_ASYNC_REQ				 0xc2278
+/* [ST 32] The number of commands received in queue 0 */
+#define CSDM_REG_NUM_OF_Q0_CMD					 0xc2248
+/* [ST 32] The number of commands received in queue 10 */
+#define CSDM_REG_NUM_OF_Q10_CMD 				 0xc226c
+/* [ST 32] The number of commands received in queue 11 */
+#define CSDM_REG_NUM_OF_Q11_CMD 				 0xc2270
+/* [ST 32] The number of commands received in queue 1 */
+#define CSDM_REG_NUM_OF_Q1_CMD					 0xc224c
+/* [ST 32] The number of commands received in queue 3 */
+#define CSDM_REG_NUM_OF_Q3_CMD					 0xc2250
+/* [ST 32] The number of commands received in queue 4 */
+#define CSDM_REG_NUM_OF_Q4_CMD					 0xc2254
+/* [ST 32] The number of commands received in queue 5 */
+#define CSDM_REG_NUM_OF_Q5_CMD					 0xc2258
+/* [ST 32] The number of commands received in queue 6 */
+#define CSDM_REG_NUM_OF_Q6_CMD					 0xc225c
+/* [ST 32] The number of commands received in queue 7 */
+#define CSDM_REG_NUM_OF_Q7_CMD					 0xc2260
+/* [ST 32] The number of commands received in queue 8 */
+#define CSDM_REG_NUM_OF_Q8_CMD					 0xc2264
+/* [ST 32] The number of commands received in queue 9 */
+#define CSDM_REG_NUM_OF_Q9_CMD					 0xc2268
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define CSDM_REG_Q_COUNTER_START_ADDR				 0xc2010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define CSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY			 0xc2548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define CSDM_REG_SYNC_PARSER_EMPTY				 0xc2550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define CSDM_REG_SYNC_SYNC_EMPTY				 0xc2558
+/* [RW 32] Tick for timer counter. Applicable only when
+   ~csdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define CSDM_REG_TIMER_TICK					 0xc2000
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define CSEM_REG_ARB_CYCLE_SIZE 				 0x200034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define CSEM_REG_ARB_ELEMENT0					 0x200020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~csem_registers_arb_element0.arb_element0 */
+#define CSEM_REG_ARB_ELEMENT1					 0x200024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~csem_registers_arb_element0.arb_element0
+   and ~csem_registers_arb_element1.arb_element1 */
+#define CSEM_REG_ARB_ELEMENT2					 0x200028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+   not be equal to register ~csem_registers_arb_element0.arb_element0 and
+   ~csem_registers_arb_element1.arb_element1 and
+   ~csem_registers_arb_element2.arb_element2 */
+#define CSEM_REG_ARB_ELEMENT3					 0x20002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~csem_registers_arb_element0.arb_element0
+   and ~csem_registers_arb_element1.arb_element1 and
+   ~csem_registers_arb_element2.arb_element2 and
+   ~csem_registers_arb_element3.arb_element3 */
+#define CSEM_REG_ARB_ELEMENT4					 0x200030
+/* [RW 32] Interrupt mask register #0 read/write */
+#define CSEM_REG_CSEM_INT_MASK_0				 0x200110
+#define CSEM_REG_CSEM_INT_MASK_1				 0x200120
+/* [R 32] Interrupt register #0 read */
+#define CSEM_REG_CSEM_INT_STS_0 				 0x200104
+#define CSEM_REG_CSEM_INT_STS_1 				 0x200114
+/* [RW 32] Parity mask register #0 read/write */
+#define CSEM_REG_CSEM_PRTY_MASK_0				 0x200130
+#define CSEM_REG_CSEM_PRTY_MASK_1				 0x200140
+/* [R 32] Parity register #0 read */
+#define CSEM_REG_CSEM_PRTY_STS_0				 0x200124
+#define CSEM_REG_CSEM_PRTY_STS_1				 0x200134
+/* [RC 32] Parity register #0 read clear */
+#define CSEM_REG_CSEM_PRTY_STS_CLR_0				 0x200128
+#define CSEM_REG_CSEM_PRTY_STS_CLR_1				 0x200138
+#define CSEM_REG_ENABLE_IN					 0x2000a4
+#define CSEM_REG_ENABLE_OUT					 0x2000a8
+/* [RW 32] This address space contains all registers and memories that are
+   placed in SEM_FAST block. The SEM_FAST registers are described in
+   appendix B. In order to access the sem_fast registers the base address
+   ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define CSEM_REG_FAST_MEMORY					 0x220000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+   by the microcode */
+#define CSEM_REG_FIC0_DISABLE					 0x200224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+   by the microcode */
+#define CSEM_REG_FIC1_DISABLE					 0x200234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+   the middle of the work */
+#define CSEM_REG_INT_TABLE					 0x200400
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC0 */
+#define CSEM_REG_MSG_NUM_FIC0					 0x200000
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC1 */
+#define CSEM_REG_MSG_NUM_FIC1					 0x200004
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC0 */
+#define CSEM_REG_MSG_NUM_FOC0					 0x200008
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC1 */
+#define CSEM_REG_MSG_NUM_FOC1					 0x20000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC2 */
+#define CSEM_REG_MSG_NUM_FOC2					 0x200010
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC3 */
+#define CSEM_REG_MSG_NUM_FOC3					 0x200014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+   during run_time by the microcode */
+#define CSEM_REG_PAS_DISABLE					 0x20024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define CSEM_REG_PASSIVE_BUFFER 				 0x202000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define CSEM_REG_PRAM						 0x240000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define CSEM_REG_SLEEP_THREADS_VALID				 0x20026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define CSEM_REG_SLOW_EXT_STORE_EMPTY				 0x2002a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define CSEM_REG_THREADS_LIST					 0x2002e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define CSEM_REG_TS_0_AS					 0x200038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define CSEM_REG_TS_10_AS					 0x200060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define CSEM_REG_TS_11_AS					 0x200064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define CSEM_REG_TS_12_AS					 0x200068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define CSEM_REG_TS_13_AS					 0x20006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define CSEM_REG_TS_14_AS					 0x200070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define CSEM_REG_TS_15_AS					 0x200074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define CSEM_REG_TS_16_AS					 0x200078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define CSEM_REG_TS_17_AS					 0x20007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define CSEM_REG_TS_18_AS					 0x200080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define CSEM_REG_TS_1_AS					 0x20003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define CSEM_REG_TS_2_AS					 0x200040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define CSEM_REG_TS_3_AS					 0x200044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define CSEM_REG_TS_4_AS					 0x200048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define CSEM_REG_TS_5_AS					 0x20004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define CSEM_REG_TS_6_AS					 0x200050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define CSEM_REG_TS_7_AS					 0x200054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define CSEM_REG_TS_8_AS					 0x200058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define CSEM_REG_TS_9_AS					 0x20005c
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define CSEM_REG_VFPF_ERR_NUM					 0x200380
+/* [RW 1] Parity mask register #0 read/write */
+#define DBG_REG_DBG_PRTY_MASK					 0xc0a8
+/* [R 1] Parity register #0 read */
+#define DBG_REG_DBG_PRTY_STS					 0xc09c
+/* [RC 1] Parity register #0 read clear */
+#define DBG_REG_DBG_PRTY_STS_CLR				 0xc0a0
+/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
+ * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
+ * 4.Completion function=0; 5.Error handling=0 */
+#define DMAE_REG_BACKWARD_COMP_EN				 0x10207c
+/* [RW 32] Commands memory. The address to command X; row Y is to calculated
+   as 14*X+Y. */
+#define DMAE_REG_CMD_MEM					 0x102400
+#define DMAE_REG_CMD_MEM_SIZE					 224
+/* [RW 1] If 0 - the CRC-16c initial value is all zeroes; if 1 - the CRC-16c
+   initial value is all ones. */
+#define DMAE_REG_CRC16C_INIT					 0x10201c
+/* [RW 1] If 0 - the CRC-16 T10 initial value is all zeroes; if 1 - the
+   CRC-16 T10 initial value is all ones. */
+#define DMAE_REG_CRC16T10_INIT					 0x102020
+/* [RW 2] Interrupt mask register #0 read/write */
+#define DMAE_REG_DMAE_INT_MASK					 0x102054
+/* [RW 4] Parity mask register #0 read/write */
+#define DMAE_REG_DMAE_PRTY_MASK 				 0x102064
+/* [R 4] Parity register #0 read */
+#define DMAE_REG_DMAE_PRTY_STS					 0x102058
+/* [RC 4] Parity register #0 read clear */
+#define DMAE_REG_DMAE_PRTY_STS_CLR				 0x10205c
+/* [RW 1] Command 0 go. */
+#define DMAE_REG_GO_C0						 0x102080
+/* [RW 1] Command 1 go. */
+#define DMAE_REG_GO_C1						 0x102084
+/* [RW 1] Command 10 go. */
+#define DMAE_REG_GO_C10 					 0x102088
+/* [RW 1] Command 11 go. */
+#define DMAE_REG_GO_C11 					 0x10208c
+/* [RW 1] Command 12 go. */
+#define DMAE_REG_GO_C12 					 0x102090
+/* [RW 1] Command 13 go. */
+#define DMAE_REG_GO_C13 					 0x102094
+/* [RW 1] Command 14 go. */
+#define DMAE_REG_GO_C14 					 0x102098
+/* [RW 1] Command 15 go. */
+#define DMAE_REG_GO_C15 					 0x10209c
+/* [RW 1] Command 2 go. */
+#define DMAE_REG_GO_C2						 0x1020a0
+/* [RW 1] Command 3 go. */
+#define DMAE_REG_GO_C3						 0x1020a4
+/* [RW 1] Command 4 go. */
+#define DMAE_REG_GO_C4						 0x1020a8
+/* [RW 1] Command 5 go. */
+#define DMAE_REG_GO_C5						 0x1020ac
+/* [RW 1] Command 6 go. */
+#define DMAE_REG_GO_C6						 0x1020b0
+/* [RW 1] Command 7 go. */
+#define DMAE_REG_GO_C7						 0x1020b4
+/* [RW 1] Command 8 go. */
+#define DMAE_REG_GO_C8						 0x1020b8
+/* [RW 1] Command 9 go. */
+#define DMAE_REG_GO_C9						 0x1020bc
+/* [RW 1] DMAE GRC Interface (Target; aster) enable. If 0 - the acknowledge
+   input is disregarded; valid is deasserted; all other signals are treated
+   as usual; if 1 - normal activity. */
+#define DMAE_REG_GRC_IFEN					 0x102008
+/* [RW 1] DMAE PCI Interface (Request; ead; rite) enable. If 0 - the
+   acknowledge input is disregarded; valid is deasserted; full is asserted;
+   all other signals are treated as usual; if 1 - normal activity. */
+#define DMAE_REG_PCI_IFEN					 0x102004
+/* [RW 4] DMAE- PCI Request Interface initial credit. Write writes the
+   initial value to the credit counter; related to the address. Read returns
+   the current value of the counter. */
+#define DMAE_REG_PXP_REQ_INIT_CRD				 0x1020c0
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD0					 0x170060
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD1					 0x170064
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD2					 0x170068
+/* [RW 8] Aggregation command. */
+#define DORQ_REG_AGG_CMD3					 0x17006c
+/* [RW 28] UCM Header. */
+#define DORQ_REG_CMHEAD_RX					 0x170050
+/* [RW 32] Doorbell address for RBC doorbells (function 0). */
+#define DORQ_REG_DB_ADDR0					 0x17008c
+/* [RW 5] Interrupt mask register #0 read/write */
+#define DORQ_REG_DORQ_INT_MASK					 0x170180
+/* [R 5] Interrupt register #0 read */
+#define DORQ_REG_DORQ_INT_STS					 0x170174
+/* [RC 5] Interrupt register #0 read clear */
+#define DORQ_REG_DORQ_INT_STS_CLR				 0x170178
+/* [RW 2] Parity mask register #0 read/write */
+#define DORQ_REG_DORQ_PRTY_MASK 				 0x170190
+/* [R 2] Parity register #0 read */
+#define DORQ_REG_DORQ_PRTY_STS					 0x170184
+/* [RC 2] Parity register #0 read clear */
+#define DORQ_REG_DORQ_PRTY_STS_CLR				 0x170188
+/* [RW 8] The address to write the DPM CID to STORM. */
+#define DORQ_REG_DPM_CID_ADDR					 0x170044
+/* [RW 5] The DPM mode CID extraction offset. */
+#define DORQ_REG_DPM_CID_OFST					 0x170030
+/* [RW 12] The threshold of the DQ FIFO to send the almost full interrupt. */
+#define DORQ_REG_DQ_FIFO_AFULL_TH				 0x17007c
+/* [RW 12] The threshold of the DQ FIFO to send the full interrupt. */
+#define DORQ_REG_DQ_FIFO_FULL_TH				 0x170078
+/* [R 13] Current value of the DQ FIFO fill level according to following
+   pointer. The range is 0 - 256 FIFO rows; where each row stands for the
+   doorbell. */
+#define DORQ_REG_DQ_FILL_LVLF					 0x1700a4
+/* [R 1] DQ FIFO full status. Is set; when FIFO filling level is more or
+   equal to full threshold; reset on full clear. */
+#define DORQ_REG_DQ_FULL_ST					 0x1700c0
+/* [RW 28] The value sent to CM header in the case of CFC load error. */
+#define DORQ_REG_ERR_CMHEAD					 0x170058
+#define DORQ_REG_IF_EN						 0x170004
+#define DORQ_REG_MODE_ACT					 0x170008
+/* [RW 5] The normal mode CID extraction offset. */
+#define DORQ_REG_NORM_CID_OFST					 0x17002c
+/* [RW 28] TCM Header when only TCP context is loaded. */
+#define DORQ_REG_NORM_CMHEAD_TX 				 0x17004c
+/* [RW 3] The number of simultaneous outstanding requests to Context Fetch
+   Interface. */
+#define DORQ_REG_OUTST_REQ					 0x17003c
+#define DORQ_REG_PF_USAGE_CNT					 0x1701d0
+#define DORQ_REG_REGN						 0x170038
+/* [R 4] Current value of response A counter credit. Initial credit is
+   configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
+   register. */
+#define DORQ_REG_RSPA_CRD_CNT					 0x1700ac
+/* [R 4] Current value of response B counter credit. Initial credit is
+   configured through write to ~dorq_registers_rsp_init_crd.rsp_init_crd
+   register. */
+#define DORQ_REG_RSPB_CRD_CNT					 0x1700b0
+/* [RW 4] The initial credit at the Doorbell Response Interface. The write
+   writes the same initial credit to the rspa_crd_cnt and rspb_crd_cnt. The
+   read reads this written value. */
+#define DORQ_REG_RSP_INIT_CRD					 0x170048
+/* [RW 4] Initial activity counter value on the load request; when the
+   shortcut is done. */
+#define DORQ_REG_SHRT_ACT_CNT					 0x170070
+/* [RW 28] TCM Header when both ULP and TCP context is loaded. */
+#define DORQ_REG_SHRT_CMHEAD					 0x170054
+#define HC_CONFIG_0_REG_ATTN_BIT_EN_0				 (0x1<<4)
+#define HC_CONFIG_0_REG_BLOCK_DISABLE_0				 (0x1<<0)
+#define HC_CONFIG_0_REG_INT_LINE_EN_0				 (0x1<<3)
+#define HC_CONFIG_0_REG_MSI_ATTN_EN_0				 (0x1<<7)
+#define HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0			 (0x1<<2)
+#define HC_CONFIG_0_REG_SINGLE_ISR_EN_0				 (0x1<<1)
+#define HC_CONFIG_1_REG_BLOCK_DISABLE_1				 (0x1<<0)
+#define HC_REG_AGG_INT_0					 0x108050
+#define HC_REG_AGG_INT_1					 0x108054
+#define HC_REG_ATTN_BIT 					 0x108120
+#define HC_REG_ATTN_IDX 					 0x108100
+#define HC_REG_ATTN_MSG0_ADDR_L 				 0x108018
+#define HC_REG_ATTN_MSG1_ADDR_L 				 0x108020
+#define HC_REG_ATTN_NUM_P0					 0x108038
+#define HC_REG_ATTN_NUM_P1					 0x10803c
+#define HC_REG_COMMAND_REG					 0x108180
+#define HC_REG_CONFIG_0 					 0x108000
+#define HC_REG_CONFIG_1 					 0x108004
+#define HC_REG_FUNC_NUM_P0					 0x1080ac
+#define HC_REG_FUNC_NUM_P1					 0x1080b0
+/* [RW 3] Parity mask register #0 read/write */
+#define HC_REG_HC_PRTY_MASK					 0x1080a0
+/* [R 3] Parity register #0 read */
+#define HC_REG_HC_PRTY_STS					 0x108094
+/* [RC 3] Parity register #0 read clear */
+#define HC_REG_HC_PRTY_STS_CLR					 0x108098
+#define HC_REG_INT_MASK						 0x108108
+#define HC_REG_LEADING_EDGE_0					 0x108040
+#define HC_REG_LEADING_EDGE_1					 0x108048
+#define HC_REG_MAIN_MEMORY					 0x108800
+#define HC_REG_MAIN_MEMORY_SIZE					 152
+#define HC_REG_P0_PROD_CONS					 0x108200
+#define HC_REG_P1_PROD_CONS					 0x108400
+#define HC_REG_PBA_COMMAND					 0x108140
+#define HC_REG_PCI_CONFIG_0					 0x108010
+#define HC_REG_PCI_CONFIG_1					 0x108014
+#define HC_REG_STATISTIC_COUNTERS				 0x109000
+#define HC_REG_TRAILING_EDGE_0					 0x108044
+#define HC_REG_TRAILING_EDGE_1					 0x10804c
+#define HC_REG_UC_RAM_ADDR_0					 0x108028
+#define HC_REG_UC_RAM_ADDR_1					 0x108030
+#define HC_REG_USTORM_ADDR_FOR_COALESCE 			 0x108068
+#define HC_REG_VQID_0						 0x108008
+#define HC_REG_VQID_1						 0x10800c
+#define IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN		 (0x1<<1)
+#define IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE		 (0x1<<0)
+#define IGU_REG_ATTENTION_ACK_BITS				 0x130108
+/* [R 4] Debug: attn_fsm */
+#define IGU_REG_ATTN_FSM					 0x130054
+#define IGU_REG_ATTN_MSG_ADDR_H				 0x13011c
+#define IGU_REG_ATTN_MSG_ADDR_L				 0x130120
+/* [R 4] Debug: [3] - attention write done message is pending (0-no pending;
+ * 1-pending). [2:0] = PFID. Pending means attention message was sent; but
+ * write done didn't receive. */
+#define IGU_REG_ATTN_WRITE_DONE_PENDING			 0x130030
+#define IGU_REG_BLOCK_CONFIGURATION				 0x130000
+#define IGU_REG_COMMAND_REG_32LSB_DATA				 0x130124
+#define IGU_REG_COMMAND_REG_CTRL				 0x13012c
+/* [WB_R 32] Cleanup bit status per SB. 1 = cleanup is set. 0 = cleanup bit
+ * is clear. The bits in this registers are set and clear via the producer
+ * command. Data valid only in addresses 0-4. all the rest are zero. */
+#define IGU_REG_CSTORM_TYPE_0_SB_CLEANUP			 0x130200
+/* [R 5] Debug: ctrl_fsm */
+#define IGU_REG_CTRL_FSM					 0x130064
+/* [R 1] data available for error memory. If this bit is clear do not red
+ * from error_handling_memory. */
+#define IGU_REG_ERROR_HANDLING_DATA_VALID			 0x130130
+/* [RW 11] Parity mask register #0 read/write */
+#define IGU_REG_IGU_PRTY_MASK					 0x1300a8
+/* [R 11] Parity register #0 read */
+#define IGU_REG_IGU_PRTY_STS					 0x13009c
+/* [RC 11] Parity register #0 read clear */
+#define IGU_REG_IGU_PRTY_STS_CLR				 0x1300a0
+/* [R 4] Debug: int_handle_fsm */
+#define IGU_REG_INT_HANDLE_FSM					 0x130050
+#define IGU_REG_LEADING_EDGE_LATCH				 0x130134
+/* [RW 14] mapping CAM; relevant for E2 operating mode only. [0] - valid.
+ * [6:1] - vector number; [13:7] - FID (if VF - [13] = 0; [12:7] = VF
+ * number; if PF - [13] = 1; [12:10] = 0; [9:7] = PF number); */
+#define IGU_REG_MAPPING_MEMORY					 0x131000
+#define IGU_REG_MAPPING_MEMORY_SIZE				 136
+#define IGU_REG_PBA_STATUS_LSB					 0x130138
+#define IGU_REG_PBA_STATUS_MSB					 0x13013c
+#define IGU_REG_PCI_PF_MSI_EN					 0x130140
+#define IGU_REG_PCI_PF_MSIX_EN					 0x130144
+#define IGU_REG_PCI_PF_MSIX_FUNC_MASK				 0x130148
+/* [WB_R 32] Each bit represent the pending bits status for that SB. 0 = no
+ * pending; 1 = pending. Pendings means interrupt was asserted; and write
+ * done was not received. Data valid only in addresses 0-4. all the rest are
+ * zero. */
+#define IGU_REG_PENDING_BITS_STATUS				 0x130300
+#define IGU_REG_PF_CONFIGURATION				 0x130154
+/* [RW 20] producers only. E2 mode: address 0-135 match to the mapping
+ * memory; 136 - PF0 default prod; 137 PF1 default prod; 138 - PF2 default
+ * prod; 139 PF3 default prod; 140 - PF0 - ATTN prod; 141 - PF1 - ATTN prod;
+ * 142 - PF2 - ATTN prod; 143 - PF3 - ATTN prod; 144-147 reserved. E1.5 mode
+ * - In backward compatible mode; for non default SB; each even line in the
+ * memory holds the U producer and each odd line hold the C producer. The
+ * first 128 producer are for NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The
+ * last 20 producers are for the DSB for each PF. each PF has five segments
+ * (the order inside each segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
+ * 132-135 C prods; 136-139 X prods; 140-143 T prods; 144-147 ATTN prods; */
+#define IGU_REG_PROD_CONS_MEMORY				 0x132000
+/* [R 3] Debug: pxp_arb_fsm */
+#define IGU_REG_PXP_ARB_FSM					 0x130068
+/* [RW 6] Write one for each bit will reset the appropriate memory. When the
+ * memory reset finished the appropriate bit will be clear. Bit 0 - mapping
+ * memory; Bit 1 - SB memory; Bit 2 - SB interrupt and mask register; Bit 3
+ * - MSIX memory; Bit 4 - PBA memory; Bit 5 - statistics; */
+#define IGU_REG_RESET_MEMORIES					 0x130158
+/* [R 4] Debug: sb_ctrl_fsm */
+#define IGU_REG_SB_CTRL_FSM					 0x13004c
+#define IGU_REG_SB_INT_BEFORE_MASK_LSB				 0x13015c
+#define IGU_REG_SB_INT_BEFORE_MASK_MSB				 0x130160
+#define IGU_REG_SB_MASK_LSB					 0x130164
+#define IGU_REG_SB_MASK_MSB					 0x130168
+/* [RW 16] Number of command that were dropped without causing an interrupt
+ * due to: read access for WO BAR address; or write access for RO BAR
+ * address or any access for reserved address or PCI function error is set
+ * and address is not MSIX; PBA or cleanup */
+#define IGU_REG_SILENT_DROP					 0x13016c
+/* [RW 10] Number of MSI/MSIX/ATTN messages sent for the function: 0-63 -
+ * number of MSIX messages per VF; 64-67 - number of MSI/MSIX messages per
+ * PF; 68-71 number of ATTN messages per PF */
+#define IGU_REG_STATISTIC_NUM_MESSAGE_SENT			 0x130800
+/* [RW 32] Number of cycles the timer mask masking the IGU interrupt when a
+ * timer mask command arrives. Value must be bigger than 100. */
+#define IGU_REG_TIMER_MASKING_VALUE				 0x13003c
+#define IGU_REG_TRAILING_EDGE_LATCH				 0x130104
+#define IGU_REG_VF_CONFIGURATION				 0x130170
+/* [WB_R 32] Each bit represent write done pending bits status for that SB
+ * (MSI/MSIX message was sent and write done was not received yet). 0 =
+ * clear; 1 = set. Data valid only in addresses 0-4. all the rest are zero. */
+#define IGU_REG_WRITE_DONE_PENDING				 0x130480
+#define MCP_A_REG_MCPR_SCRATCH					 0x3a0000
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER			 0x8501c
+#define MCP_REG_MCPR_GP_INPUTS					 0x800c0
+#define MCP_REG_MCPR_GP_OENABLE					 0x800c8
+#define MCP_REG_MCPR_GP_OUTPUTS					 0x800c4
+#define MCP_REG_MCPR_IMC_COMMAND				 0x85900
+#define MCP_REG_MCPR_IMC_DATAREG0				 0x85920
+#define MCP_REG_MCPR_IMC_SLAVE_CONTROL				 0x85904
+#define MCP_REG_MCPR_CPU_PROGRAM_COUNTER			 0x8501c
+#define MCP_REG_MCPR_NVM_ACCESS_ENABLE				 0x86424
+#define MCP_REG_MCPR_NVM_ADDR					 0x8640c
+#define MCP_REG_MCPR_NVM_CFG4					 0x8642c
+#define MCP_REG_MCPR_NVM_COMMAND				 0x86400
+#define MCP_REG_MCPR_NVM_READ					 0x86410
+#define MCP_REG_MCPR_NVM_SW_ARB 				 0x86420
+#define MCP_REG_MCPR_NVM_WRITE					 0x86408
+#define MCP_REG_MCPR_SCRATCH					 0xa0000
+#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK		 (0x1<<1)
+#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK		 (0x1<<0)
+/* [R 32] read first 32 bit after inversion of function 0. mapped as
+   follows: [0] NIG attention for function0; [1] NIG attention for
+   function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp;
+   [6] GPIO1 function 1; [7] GPIO2 function 1; [8] GPIO3 function 1; [9]
+   GPIO4 function 1; [10] PCIE glue/PXP VPD event function0; [11] PCIE
+   glue/PXP VPD event function1; [12] PCIE glue/PXP Expansion ROM event0;
+   [13] PCIE glue/PXP Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16]
+   MSI/X indication for mcp; [17] MSI/X indication for function 1; [18] BRB
+   Parity error; [19] BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw
+   interrupt; [22] SRC Parity error; [23] SRC Hw interrupt; [24] TSDM Parity
+   error; [25] TSDM Hw interrupt; [26] TCM Parity error; [27] TCM Hw
+   interrupt; [28] TSEMI Parity error; [29] TSEMI Hw interrupt; [30] PBF
+   Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_0			 0xa42c
+#define MISC_REG_AEU_AFTER_INVERT_1_FUNC_1			 0xa430
+/* [R 32] read first 32 bit after inversion of mcp. mapped as follows: [0]
+   NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
+   mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
+   [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
+   PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
+   function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
+   Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
+   mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
+   BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
+   Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
+   interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
+   Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
+   interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_1_MCP 			 0xa434
+/* [R 32] read second 32 bit after inversion of function 0. mapped as
+   follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+   Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+   interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+   error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+   interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+   NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+   [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+   interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+   Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+   Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+   Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+   interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_0			 0xa438
+#define MISC_REG_AEU_AFTER_INVERT_2_FUNC_1			 0xa43c
+/* [R 32] read second 32 bit after inversion of mcp. mapped as follows: [0]
+   PBClient Parity error; [1] PBClient Hw interrupt; [2] QM Parity error;
+   [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw interrupt;
+   [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9]
+   XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
+   DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
+   error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
+   PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
+   [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
+   [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
+   [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
+   [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
+#define MISC_REG_AEU_AFTER_INVERT_2_MCP 			 0xa440
+/* [R 32] read third 32 bit after inversion of function 0. mapped as
+   follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity
+   error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error; [5]
+   PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+   interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+   error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+   Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+   pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+   MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+   SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+   timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+   func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+   attn1; */
+#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_0			 0xa444
+#define MISC_REG_AEU_AFTER_INVERT_3_FUNC_1			 0xa448
+/* [R 32] read third 32 bit after inversion of mcp. mapped as follows: [0]
+   CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP Parity error; [3] PXP
+   Hw interrupt; [4] PXPpciClockClient Parity error; [5] PXPpciClockClient
+   Hw interrupt; [6] CFC Parity error; [7] CFC Hw interrupt; [8] CDU Parity
+   error; [9] CDU Hw interrupt; [10] DMAE Parity error; [11] DMAE Hw
+   interrupt; [12] IGU (HC) Parity error; [13] IGU (HC) Hw interrupt; [14]
+   MISC Parity error; [15] MISC Hw interrupt; [16] pxp_misc_mps_attn; [17]
+   Flash event; [18] SMB event; [19] MCP attn0; [20] MCP attn1; [21] SW
+   timers attn_1 func0; [22] SW timers attn_2 func0; [23] SW timers attn_3
+   func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW timers attn_1
+   func1; [27] SW timers attn_2 func1; [28] SW timers attn_3 func1; [29] SW
+   timers attn_4 func1; [30] General attn0; [31] General attn1; */
+#define MISC_REG_AEU_AFTER_INVERT_3_MCP 			 0xa44c
+/* [R 32] read fourth 32 bit after inversion of function 0. mapped as
+   follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+   General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+   [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+   attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+   [14] General attn16; [15] General attn17; [16] General attn18; [17]
+   General attn19; [18] General attn20; [19] General attn21; [20] Main power
+   interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+   Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+   Latched timeout attention; [27] GRC Latched reserved access attention;
+   [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+   Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_0			 0xa450
+#define MISC_REG_AEU_AFTER_INVERT_4_FUNC_1			 0xa454
+/* [R 32] read fourth 32 bit after inversion of mcp. mapped as follows: [0]
+   General attn2; [1] General attn3; [2] General attn4; [3] General attn5;
+   [4] General attn6; [5] General attn7; [6] General attn8; [7] General
+   attn9; [8] General attn10; [9] General attn11; [10] General attn12; [11]
+   General attn13; [12] General attn14; [13] General attn15; [14] General
+   attn16; [15] General attn17; [16] General attn18; [17] General attn19;
+   [18] General attn20; [19] General attn21; [20] Main power interrupt; [21]
+   RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN Latched attn; [24]
+   RBCU Latched attn; [25] RBCP Latched attn; [26] GRC Latched timeout
+   attention; [27] GRC Latched reserved access attention; [28] MCP Latched
+   rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP Latched
+   ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_AFTER_INVERT_4_MCP 			 0xa458
+/* [R 32] Read fifth 32 bit after inversion of function 0. Mapped as
+ * follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC
+ * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6]
+ * CNIG attention (reserved); [7] CNIG parity (reserved); [31-8] Reserved; */
+#define MISC_REG_AEU_AFTER_INVERT_5_FUNC_0			 0xa700
+/* [W 14] write to this register results with the clear of the latched
+   signals; one in d0 clears RBCR latch; one in d1 clears RBCT latch; one in
+   d2 clears RBCN latch; one in d3 clears RBCU latch; one in d4 clears RBCP
+   latch; one in d5 clears GRC Latched timeout attention; one in d6 clears
+   GRC Latched reserved access attention; one in d7 clears Latched
+   rom_parity; one in d8 clears Latched ump_rx_parity; one in d9 clears
+   Latched ump_tx_parity; one in d10 clears Latched scpad_parity (both
+   ports); one in d11 clears pxpv_misc_mps_attn; one in d12 clears
+   pxp_misc_exp_rom_attn0; one in d13 clears pxp_misc_exp_rom_attn1; read
+   from this register return zero */
+#define MISC_REG_AEU_CLR_LATCH_SIGNAL				 0xa45c
+/* [RW 32] first 32b for enabling the output for function 0 output0. mapped
+   as follows: [0] NIG attention for function0; [1] NIG attention for
+   function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+   0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+   GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+   function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+   Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+   SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+   indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+   [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+   SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+   TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+   TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0			 0xa06c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1			 0xa07c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2			 0xa08c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_3			 0xa09c
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_5			 0xa0bc
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_6			 0xa0cc
+#define MISC_REG_AEU_ENABLE1_FUNC_0_OUT_7			 0xa0dc
+/* [RW 32] first 32b for enabling the output for function 1 output0. mapped
+   as follows: [0] NIG attention for function0; [1] NIG attention for
+   function1; [2] GPIO1 function 1; [3] GPIO2 function 1; [4] GPIO3 function
+   1; [5] GPIO4 function 1; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+   GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+   function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+   Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+   SPIO4; [15] SPIO5; [16] MSI/X indication for function 1; [17] MSI/X
+   indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+   [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+   SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+   TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+   TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0			 0xa10c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1			 0xa11c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2			 0xa12c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_3			 0xa13c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_5			 0xa15c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_6			 0xa16c
+#define MISC_REG_AEU_ENABLE1_FUNC_1_OUT_7			 0xa17c
+/* [RW 32] first 32b for enabling the output for close the gate nig. mapped
+   as follows: [0] NIG attention for function0; [1] NIG attention for
+   function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+   0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+   GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+   function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+   Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+   SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+   indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+   [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+   SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+   TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+   TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_NIG_0				 0xa0ec
+#define MISC_REG_AEU_ENABLE1_NIG_1				 0xa18c
+/* [RW 32] first 32b for enabling the output for close the gate pxp. mapped
+   as follows: [0] NIG attention for function0; [1] NIG attention for
+   function1; [2] GPIO1 function 0; [3] GPIO2 function 0; [4] GPIO3 function
+   0; [5] GPIO4 function 0; [6] GPIO1 function 1; [7] GPIO2 function 1; [8]
+   GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+   function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+   Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+   SPIO4; [15] SPIO5; [16] MSI/X indication for function 0; [17] MSI/X
+   indication for function 1; [18] BRB Parity error; [19] BRB Hw interrupt;
+   [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23]
+   SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26]
+   TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29]
+   TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_ENABLE1_PXP_0				 0xa0fc
+#define MISC_REG_AEU_ENABLE1_PXP_1				 0xa19c
+/* [RW 32] second 32b for enabling the output for function 0 output0. mapped
+   as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+   Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+   interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+   error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+   interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+   NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+   [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+   interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+   Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+   Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+   Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+   interrupt; */
+#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_0			 0xa070
+#define MISC_REG_AEU_ENABLE2_FUNC_0_OUT_1			 0xa080
+/* [RW 32] second 32b for enabling the output for function 1 output0. mapped
+   as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+   Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+   interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+   error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+   interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+   NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+   [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+   interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+   Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+   Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+   Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+   interrupt; */
+#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_0			 0xa110
+#define MISC_REG_AEU_ENABLE2_FUNC_1_OUT_1			 0xa120
+/* [RW 32] second 32b for enabling the output for close the gate nig. mapped
+   as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+   Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+   interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+   error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+   interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+   NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+   [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+   interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+   Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+   Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+   Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+   interrupt; */
+#define MISC_REG_AEU_ENABLE2_NIG_0				 0xa0f0
+#define MISC_REG_AEU_ENABLE2_NIG_1				 0xa190
+/* [RW 32] second 32b for enabling the output for close the gate pxp. mapped
+   as follows: [0] PBClient Parity error; [1] PBClient Hw interrupt; [2] QM
+   Parity error; [3] QM Hw interrupt; [4] Timers Parity error; [5] Timers Hw
+   interrupt; [6] XSDM Parity error; [7] XSDM Hw interrupt; [8] XCM Parity
+   error; [9] XCM Hw interrupt; [10] XSEMI Parity error; [11] XSEMI Hw
+   interrupt; [12] DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14]
+   NIG Parity error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error;
+   [17] Vaux PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw
+   interrupt; [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM
+   Parity error; [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI
+   Hw interrupt; [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM
+   Parity error; [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw
+   interrupt; */
+#define MISC_REG_AEU_ENABLE2_PXP_0				 0xa100
+#define MISC_REG_AEU_ENABLE2_PXP_1				 0xa1a0
+/* [RW 32] third 32b for enabling the output for function 0 output0. mapped
+   as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+   Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+   [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+   interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+   error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+   Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+   pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+   MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+   SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+   timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+   func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+   attn1; */
+#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_0			 0xa074
+#define MISC_REG_AEU_ENABLE3_FUNC_0_OUT_1			 0xa084
+/* [RW 32] third 32b for enabling the output for function 1 output0. mapped
+   as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+   Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+   [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+   interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+   error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+   Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+   pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+   MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+   SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+   timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+   func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+   attn1; */
+#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_0			 0xa114
+#define MISC_REG_AEU_ENABLE3_FUNC_1_OUT_1			 0xa124
+/* [RW 32] third 32b for enabling the output for close the gate nig. mapped
+   as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+   Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+   [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+   interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+   error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+   Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+   pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+   MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+   SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+   timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+   func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+   attn1; */
+#define MISC_REG_AEU_ENABLE3_NIG_0				 0xa0f4
+#define MISC_REG_AEU_ENABLE3_NIG_1				 0xa194
+/* [RW 32] third 32b for enabling the output for close the gate pxp. mapped
+   as follows: [0] CSEMI Parity error; [1] CSEMI Hw interrupt; [2] PXP
+   Parity error; [3] PXP Hw interrupt; [4] PXPpciClockClient Parity error;
+   [5] PXPpciClockClient Hw interrupt; [6] CFC Parity error; [7] CFC Hw
+   interrupt; [8] CDU Parity error; [9] CDU Hw interrupt; [10] DMAE Parity
+   error; [11] DMAE Hw interrupt; [12] IGU (HC) Parity error; [13] IGU (HC)
+   Hw interrupt; [14] MISC Parity error; [15] MISC Hw interrupt; [16]
+   pxp_misc_mps_attn; [17] Flash event; [18] SMB event; [19] MCP attn0; [20]
+   MCP attn1; [21] SW timers attn_1 func0; [22] SW timers attn_2 func0; [23]
+   SW timers attn_3 func0; [24] SW timers attn_4 func0; [25] PERST; [26] SW
+   timers attn_1 func1; [27] SW timers attn_2 func1; [28] SW timers attn_3
+   func1; [29] SW timers attn_4 func1; [30] General attn0; [31] General
+   attn1; */
+#define MISC_REG_AEU_ENABLE3_PXP_0				 0xa104
+#define MISC_REG_AEU_ENABLE3_PXP_1				 0xa1a4
+/* [RW 32] fourth 32b for enabling the output for function 0 output0.mapped
+   as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+   General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+   [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+   attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+   [14] General attn16; [15] General attn17; [16] General attn18; [17]
+   General attn19; [18] General attn20; [19] General attn21; [20] Main power
+   interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+   Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+   Latched timeout attention; [27] GRC Latched reserved access attention;
+   [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+   Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0			 0xa078
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_2			 0xa098
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_4			 0xa0b8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_5			 0xa0c8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_6			 0xa0d8
+#define MISC_REG_AEU_ENABLE4_FUNC_0_OUT_7			 0xa0e8
+/* [RW 32] fourth 32b for enabling the output for function 1 output0.mapped
+   as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+   General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+   [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+   attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+   [14] General attn16; [15] General attn17; [16] General attn18; [17]
+   General attn19; [18] General attn20; [19] General attn21; [20] Main power
+   interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+   Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+   Latched timeout attention; [27] GRC Latched reserved access attention;
+   [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+   Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0			 0xa118
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_2			 0xa138
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_4			 0xa158
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_5			 0xa168
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_6			 0xa178
+#define MISC_REG_AEU_ENABLE4_FUNC_1_OUT_7			 0xa188
+/* [RW 32] fourth 32b for enabling the output for close the gate nig.mapped
+   as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+   General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+   [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+   attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+   [14] General attn16; [15] General attn17; [16] General attn18; [17]
+   General attn19; [18] General attn20; [19] General attn21; [20] Main power
+   interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+   Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+   Latched timeout attention; [27] GRC Latched reserved access attention;
+   [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+   Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_NIG_0				 0xa0f8
+#define MISC_REG_AEU_ENABLE4_NIG_1				 0xa198
+/* [RW 32] fourth 32b for enabling the output for close the gate pxp.mapped
+   as follows: [0] General attn2; [1] General attn3; [2] General attn4; [3]
+   General attn5; [4] General attn6; [5] General attn7; [6] General attn8;
+   [7] General attn9; [8] General attn10; [9] General attn11; [10] General
+   attn12; [11] General attn13; [12] General attn14; [13] General attn15;
+   [14] General attn16; [15] General attn17; [16] General attn18; [17]
+   General attn19; [18] General attn20; [19] General attn21; [20] Main power
+   interrupt; [21] RBCR Latched attn; [22] RBCT Latched attn; [23] RBCN
+   Latched attn; [24] RBCU Latched attn; [25] RBCP Latched attn; [26] GRC
+   Latched timeout attention; [27] GRC Latched reserved access attention;
+   [28] MCP Latched rom_parity; [29] MCP Latched ump_rx_parity; [30] MCP
+   Latched ump_tx_parity; [31] MCP Latched scpad_parity; */
+#define MISC_REG_AEU_ENABLE4_PXP_0				 0xa108
+#define MISC_REG_AEU_ENABLE4_PXP_1				 0xa1a8
+/* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu
+   128 bit vector */
+#define MISC_REG_AEU_GENERAL_ATTN_0				 0xa000
+#define MISC_REG_AEU_GENERAL_ATTN_1				 0xa004
+#define MISC_REG_AEU_GENERAL_ATTN_10				 0xa028
+#define MISC_REG_AEU_GENERAL_ATTN_11				 0xa02c
+#define MISC_REG_AEU_GENERAL_ATTN_12				 0xa030
+#define MISC_REG_AEU_GENERAL_ATTN_2				 0xa008
+#define MISC_REG_AEU_GENERAL_ATTN_3				 0xa00c
+#define MISC_REG_AEU_GENERAL_ATTN_4				 0xa010
+#define MISC_REG_AEU_GENERAL_ATTN_5				 0xa014
+#define MISC_REG_AEU_GENERAL_ATTN_6				 0xa018
+#define MISC_REG_AEU_GENERAL_ATTN_7				 0xa01c
+#define MISC_REG_AEU_GENERAL_ATTN_8				 0xa020
+#define MISC_REG_AEU_GENERAL_ATTN_9				 0xa024
+#define MISC_REG_AEU_GENERAL_MASK				 0xa61c
+/* [RW 32] first 32b for inverting the input for function 0; for each bit:
+   0= do not invert; 1= invert; mapped as follows: [0] NIG attention for
+   function0; [1] NIG attention for function1; [2] GPIO1 mcp; [3] GPIO2 mcp;
+   [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1; [7] GPIO2 function 1;
+   [8] GPIO3 function 1; [9] GPIO4 function 1; [10] PCIE glue/PXP VPD event
+   function0; [11] PCIE glue/PXP VPD event function1; [12] PCIE glue/PXP
+   Expansion ROM event0; [13] PCIE glue/PXP Expansion ROM event1; [14]
+   SPIO4; [15] SPIO5; [16] MSI/X indication for mcp; [17] MSI/X indication
+   for function 1; [18] BRB Parity error; [19] BRB Hw interrupt; [20] PRS
+   Parity error; [21] PRS Hw interrupt; [22] SRC Parity error; [23] SRC Hw
+   interrupt; [24] TSDM Parity error; [25] TSDM Hw interrupt; [26] TCM
+   Parity error; [27] TCM Hw interrupt; [28] TSEMI Parity error; [29] TSEMI
+   Hw interrupt; [30] PBF Parity error; [31] PBF Hw interrupt; */
+#define MISC_REG_AEU_INVERTER_1_FUNC_0				 0xa22c
+#define MISC_REG_AEU_INVERTER_1_FUNC_1				 0xa23c
+/* [RW 32] second 32b for inverting the input for function 0; for each bit:
+   0= do not invert; 1= invert. mapped as follows: [0] PBClient Parity
+   error; [1] PBClient Hw interrupt; [2] QM Parity error; [3] QM Hw
+   interrupt; [4] Timers Parity error; [5] Timers Hw interrupt; [6] XSDM
+   Parity error; [7] XSDM Hw interrupt; [8] XCM Parity error; [9] XCM Hw
+   interrupt; [10] XSEMI Parity error; [11] XSEMI Hw interrupt; [12]
+   DoorbellQ Parity error; [13] DoorbellQ Hw interrupt; [14] NIG Parity
+   error; [15] NIG Hw interrupt; [16] Vaux PCI core Parity error; [17] Vaux
+   PCI core Hw interrupt; [18] Debug Parity error; [19] Debug Hw interrupt;
+   [20] USDM Parity error; [21] USDM Hw interrupt; [22] UCM Parity error;
+   [23] UCM Hw interrupt; [24] USEMI Parity error; [25] USEMI Hw interrupt;
+   [26] UPB Parity error; [27] UPB Hw interrupt; [28] CSDM Parity error;
+   [29] CSDM Hw interrupt; [30] CCM Parity error; [31] CCM Hw interrupt; */
+#define MISC_REG_AEU_INVERTER_2_FUNC_0				 0xa230
+#define MISC_REG_AEU_INVERTER_2_FUNC_1				 0xa240
+/* [RW 10] [7:0] = mask 8 attention output signals toward IGU function0;
+   [9:8] = raserved. Zero = mask; one = unmask */
+#define MISC_REG_AEU_MASK_ATTN_FUNC_0				 0xa060
+#define MISC_REG_AEU_MASK_ATTN_FUNC_1				 0xa064
+/* [RW 1] If set a system kill occurred */
+#define MISC_REG_AEU_SYS_KILL_OCCURRED				 0xa610
+/* [RW 32] Represent the status of the input vector to the AEU when a system
+   kill occurred. The register is reset in por reset. Mapped as follows: [0]
+   NIG attention for function0; [1] NIG attention for function1; [2] GPIO1
+   mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; [6] GPIO1 function 1;
+   [7] GPIO2 function 1; [8] GPIO3 function 1; [9] GPIO4 function 1; [10]
+   PCIE glue/PXP VPD event function0; [11] PCIE glue/PXP VPD event
+   function1; [12] PCIE glue/PXP Expansion ROM event0; [13] PCIE glue/PXP
+   Expansion ROM event1; [14] SPIO4; [15] SPIO5; [16] MSI/X indication for
+   mcp; [17] MSI/X indication for function 1; [18] BRB Parity error; [19]
+   BRB Hw interrupt; [20] PRS Parity error; [21] PRS Hw interrupt; [22] SRC
+   Parity error; [23] SRC Hw interrupt; [24] TSDM Parity error; [25] TSDM Hw
+   interrupt; [26] TCM Parity error; [27] TCM Hw interrupt; [28] TSEMI
+   Parity error; [29] TSEMI Hw interrupt; [30] PBF Parity error; [31] PBF Hw
+   interrupt; */
+#define MISC_REG_AEU_SYS_KILL_STATUS_0				 0xa600
+#define MISC_REG_AEU_SYS_KILL_STATUS_1				 0xa604
+#define MISC_REG_AEU_SYS_KILL_STATUS_2				 0xa608
+#define MISC_REG_AEU_SYS_KILL_STATUS_3				 0xa60c
+/* [R 4] This field indicates the type of the device. '0' - 2 Ports; '1' - 1
+   Port. */
+#define MISC_REG_BOND_ID					 0xa400
+/* [R 8] These bits indicate the metal revision of the chip. This value
+   starts at 0x00 for each all-layer tape-out and increments by one for each
+   tape-out. */
+#define MISC_REG_CHIP_METAL					 0xa404
+/* [R 16] These bits indicate the part number for the chip. */
+#define MISC_REG_CHIP_NUM					 0xa408
+/* [R 4] These bits indicate the base revision of the chip. This value
+   starts at 0x0 for the A0 tape-out and increments by one for each
+   all-layer tape-out. */
+#define MISC_REG_CHIP_REV					 0xa40c
+/* [RW 32] The following driver registers(1...16) represent 16 drivers and
+   32 clients. Each client can be controlled by one driver only. One in each
+   bit represent that this driver control the appropriate client (Ex: bit 5
+   is set means this driver control client number 5). addr1 = set; addr0 =
+   clear; read from both addresses will give the same result = status. write
+   to address 1 will set a request to control all the clients that their
+   appropriate bit (in the write command) is set. if the client is free (the
+   appropriate bit in all the other drivers is clear) one will be written to
+   that driver register; if the client isn't free the bit will remain zero.
+   if the appropriate bit is set (the driver request to gain control on a
+   client it already controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW
+   interrupt will be asserted). write to address 0 will set a request to
+   free all the clients that their appropriate bit (in the write command) is
+   set. if the appropriate bit is clear (the driver request to free a client
+   it doesn't controls the ~MISC_REGISTERS_INT_STS.GENERIC_SW interrupt will
+   be asserted). */
+#define MISC_REG_DRIVER_CONTROL_1				 0xa510
+#define MISC_REG_DRIVER_CONTROL_7				 0xa3c8
+/* [RW 1] e1hmf for WOL. If clr WOL signal o the PXP will be send on bit 0
+   only. */
+#define MISC_REG_E1HMF_MODE					 0xa5f8
+/* [R 1] Status of four port mode path swap input pin. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP				 0xa75c
+/* [RW 2] 4 port path swap overwrite.[0] - Overwrite control; if it is 0 -
+   the path_swap output is equal to 4 port mode path swap input pin; if it
+   is 1 - the path_swap output is equal to bit[1] of this register; [1] -
+   Overwrite value. If bit[0] of this register is 1 this is the value that
+   receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PATH_SWAP_OVWR			 0xa738
+/* [R 1] Status of 4 port mode port swap input pin. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP				 0xa754
+/* [RW 2] 4 port port swap overwrite.[0] - Overwrite control; if it is 0 -
+   the port_swap output is equal to 4 port mode port swap input pin; if it
+   is 1 - the port_swap output is equal to bit[1] of this register; [1] -
+   Overwrite value. If bit[0] of this register is 1 this is the value that
+   receives the port_swap output. Reset on Hard reset. */
+#define MISC_REG_FOUR_PORT_PORT_SWAP_OVWR			 0xa734
+/* [RW 32] Debug only: spare RW register reset by core reset */
+#define MISC_REG_GENERIC_CR_0					 0xa460
+#define MISC_REG_GENERIC_CR_1					 0xa464
+/* [RW 32] Debug only: spare RW register reset by por reset */
+#define MISC_REG_GENERIC_POR_1					 0xa474
+/* [RW 32] Bit[0]: EPIO MODE SEL: Setting this bit to 1 will allow SW/FW to
+   use all of the 32 Extended GPIO pins. Without setting this bit; an EPIO
+   can not be configured as an output. Each output has its output enable in
+   the MCP register space; but this bit needs to be set to make use of that.
+   Bit[3:1] spare. Bit[4]: WCVTMON_PWRDN: Powerdown for Warpcore VTMON. When
+   set to 1 - Powerdown. Bit[5]: WCVTMON_RESETB: Reset for Warpcore VTMON.
+   When set to 0 - vTMON is in reset. Bit[6]: setting this bit will change
+   the i/o to an output and will drive the TimeSync output. Bit[31:7]:
+   spare. Global register. Reset by hard reset. */
+#define MISC_REG_GEN_PURP_HWG					 0xa9a0
+/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
+   these bits is written as a '1'; the corresponding SPIO bit will turn off
+   it's drivers and become an input. This is the reset state of all GPIO
+   pins. The read value of these bits will be a '1' if that last command
+   (#SET; #CLR; or #FLOAT) for this bit was a #FLOAT. (reset value 0xff).
+   [23-20] CLR port 1; 19-16] CLR port 0; When any of these bits is written
+   as a '1'; the corresponding GPIO bit will drive low. The read value of
+   these bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for
+   this bit was a #CLR. (reset value 0). [15-12] SET port 1; 11-8] port 0;
+   SET When any of these bits is written as a '1'; the corresponding GPIO
+   bit will drive high (if it has that capability). The read value of these
+   bits will be a '1' if that last command (#SET; #CLR; or #FLOAT) for this
+   bit was a #SET. (reset value 0). [7-4] VALUE port 1; [3-0] VALUE port 0;
+   RO; These bits indicate the read value of each of the eight GPIO pins.
+   This is the result value of the pin; not the drive value. Writing these
+   bits will have not effect. */
+#define MISC_REG_GPIO						 0xa490
+/* [RW 8] These bits enable the GPIO_INTs to signals event to the
+   IGU/MCP.according to the following map: [0] p0_gpio_0; [1] p0_gpio_1; [2]
+   p0_gpio_2; [3] p0_gpio_3; [4] p1_gpio_0; [5] p1_gpio_1; [6] p1_gpio_2;
+   [7] p1_gpio_3; */
+#define MISC_REG_GPIO_EVENT_EN					 0xa2bc
+/* [RW 32] GPIO INT. [31-28] OLD_CLR port1; [27-24] OLD_CLR port0; Writing a
+   '1' to these bit clears the corresponding bit in the #OLD_VALUE register.
+   This will acknowledge an interrupt on the falling edge of corresponding
+   GPIO input (reset value 0). [23-16] OLD_SET [23-16] port1; OLD_SET port0;
+   Writing a '1' to these bit sets the corresponding bit in the #OLD_VALUE
+   register. This will acknowledge an interrupt on the rising edge of
+   corresponding SPIO input (reset value 0). [15-12] OLD_VALUE [11-8] port1;
+   OLD_VALUE port0; RO; These bits indicate the old value of the GPIO input
+   value. When the ~INT_STATE bit is set; this bit indicates the OLD value
+   of the pin such that if ~INT_STATE is set and this bit is '0'; then the
+   interrupt is due to a low to high edge. If ~INT_STATE is set and this bit
+   is '1'; then the interrupt is due to a high to low edge (reset value 0).
+   [7-4] INT_STATE port1; [3-0] INT_STATE RO port0; These bits indicate the
+   current GPIO interrupt state for each GPIO pin. This bit is cleared when
+   the appropriate #OLD_SET or #OLD_CLR command bit is written. This bit is
+   set when the GPIO input does not match the current value in #OLD_VALUE
+   (reset value 0). */
+#define MISC_REG_GPIO_INT					 0xa494
+/* [R 28] this field hold the last information that caused reserved
+   attention. bits [19:0] - address; [22:20] function; [23] reserved;
+   [27:24] the master that caused the attention - according to the following
+   encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+   dbu; 8 = dmae */
+#define MISC_REG_GRC_RSV_ATTN					 0xa3c0
+/* [R 28] this field hold the last information that caused timeout
+   attention. bits [19:0] - address; [22:20] function; [23] reserved;
+   [27:24] the master that caused the attention - according to the following
+   encodeing:1 = pxp; 2 = mcp; 3 = usdm; 4 = tsdm; 5 = xsdm; 6 = csdm; 7 =
+   dbu; 8 = dmae */
+#define MISC_REG_GRC_TIMEOUT_ATTN				 0xa3c4
+/* [RW 1] Setting this bit enables a timer in the GRC block to timeout any
+   access that does not finish within
+   ~misc_registers_grc_timout_val.grc_timeout_val cycles. When this bit is
+   cleared; this timeout is disabled. If this timeout occurs; the GRC shall
+   assert it attention output. */
+#define MISC_REG_GRC_TIMEOUT_EN 				 0xa280
+/* [RW 28] 28 LSB of LCPLL first register; reset val = 521. inside order of
+   the bits is: [2:0] OAC reset value 001) CML output buffer bias control;
+   111 for +40%; 011 for +20%; 001 for 0%; 000 for -20%. [5:3] Icp_ctrl
+   (reset value 001) Charge pump current control; 111 for 720u; 011 for
+   600u; 001 for 480u and 000 for 360u. [7:6] Bias_ctrl (reset value 00)
+   Global bias control; When bit 7 is high bias current will be 10 0gh; When
+   bit 6 is high bias will be 100w; Valid values are 00; 10; 01. [10:8]
+   Pll_observe (reset value 010) Bits to control observability. bit 10 is
+   for test bias; bit 9 is for test CK; bit 8 is test Vc. [12:11] Vth_ctrl
+   (reset value 00) Comparator threshold control. 00 for 0.6V; 01 for 0.54V
+   and 10 for 0.66V. [13] pllSeqStart (reset value 0) Enables VCO tuning
+   sequencer: 1= sequencer disabled; 0= sequencer enabled (inverted
+   internally). [14] reserved (reset value 0) Reset for VCO sequencer is
+   connected to RESET input directly. [15] capRetry_en (reset value 0)
+   enable retry on cap search failure (inverted). [16] freqMonitor_e (reset
+   value 0) bit to continuously monitor vco freq (inverted). [17]
+   freqDetRestart_en (reset value 0) bit to enable restart when not freq
+   locked (inverted). [18] freqDetRetry_en (reset value 0) bit to enable
+   retry on freq det failure(inverted). [19] pllForceFdone_en (reset value
+   0) bit to enable pllForceFdone & pllForceFpass into pllSeq. [20]
+   pllForceFdone (reset value 0) bit to force freqDone. [21] pllForceFpass
+   (reset value 0) bit to force freqPass. [22] pllForceDone_en (reset value
+   0) bit to enable pllForceCapDone. [23] pllForceCapDone (reset value 0)
+   bit to force capDone. [24] pllForceCapPass_en (reset value 0) bit to
+   enable pllForceCapPass. [25] pllForceCapPass (reset value 0) bit to force
+   capPass. [26] capRestart (reset value 0) bit to force cap sequencer to
+   restart. [27] capSelectM_en (reset value 0) bit to enable cap select
+   register bits. */
+#define MISC_REG_LCPLL_CTRL_1					 0xa2a4
+#define MISC_REG_LCPLL_CTRL_REG_2				 0xa2a8
+/* [RW 4] Interrupt mask register #0 read/write */
+#define MISC_REG_MISC_INT_MASK					 0xa388
+/* [RW 1] Parity mask register #0 read/write */
+#define MISC_REG_MISC_PRTY_MASK 				 0xa398
+/* [R 1] Parity register #0 read */
+#define MISC_REG_MISC_PRTY_STS					 0xa38c
+/* [RC 1] Parity register #0 read clear */
+#define MISC_REG_MISC_PRTY_STS_CLR				 0xa390
+#define MISC_REG_NIG_WOL_P0					 0xa270
+#define MISC_REG_NIG_WOL_P1					 0xa274
+/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
+   assertion */
+#define MISC_REG_PCIE_HOT_RESET 				 0xa618
+/* [RW 32] 32 LSB of storm PLL first register; reset val = 0x 071d2911.
+   inside order of the bits is: [0] P1 divider[0] (reset value 1); [1] P1
+   divider[1] (reset value 0); [2] P1 divider[2] (reset value 0); [3] P1
+   divider[3] (reset value 0); [4] P2 divider[0] (reset value 1); [5] P2
+   divider[1] (reset value 0); [6] P2 divider[2] (reset value 0); [7] P2
+   divider[3] (reset value 0); [8] ph_det_dis (reset value 1); [9]
+   freq_det_dis (reset value 0); [10] Icpx[0] (reset value 0); [11] Icpx[1]
+   (reset value 1); [12] Icpx[2] (reset value 0); [13] Icpx[3] (reset value
+   1); [14] Icpx[4] (reset value 0); [15] Icpx[5] (reset value 0); [16]
+   Rx[0] (reset value 1); [17] Rx[1] (reset value 0); [18] vc_en (reset
+   value 1); [19] vco_rng[0] (reset value 1); [20] vco_rng[1] (reset value
+   1); [21] Kvco_xf[0] (reset value 0); [22] Kvco_xf[1] (reset value 0);
+   [23] Kvco_xf[2] (reset value 0); [24] Kvco_xs[0] (reset value 1); [25]
+   Kvco_xs[1] (reset value 1); [26] Kvco_xs[2] (reset value 1); [27]
+   testd_en (reset value 0); [28] testd_sel[0] (reset value 0); [29]
+   testd_sel[1] (reset value 0); [30] testd_sel[2] (reset value 0); [31]
+   testa_en (reset value 0); */
+#define MISC_REG_PLL_STORM_CTRL_1				 0xa294
+#define MISC_REG_PLL_STORM_CTRL_2				 0xa298
+#define MISC_REG_PLL_STORM_CTRL_3				 0xa29c
+#define MISC_REG_PLL_STORM_CTRL_4				 0xa2a0
+/* [R 1] Status of 4 port mode enable input pin. */
+#define MISC_REG_PORT4MODE_EN					 0xa750
+/* [RW 2] 4 port mode enable overwrite.[0] - Overwrite control; if it is 0 -
+ * the port4mode_en output is equal to 4 port mode input pin; if it is 1 -
+ * the port4mode_en output is equal to bit[1] of this register; [1] -
+ * Overwrite value. If bit[0] of this register is 1 this is the value that
+ * receives the port4mode_en output . */
+#define MISC_REG_PORT4MODE_EN_OVWR				 0xa720
+/* [RW 32] reset reg#2; rite/read one = the specific block is out of reset;
+   write/read zero = the specific block is in reset; addr 0-wr- the write
+   value will be written to the register; addr 1-set - one will be written
+   to all the bits that have the value of one in the data written (bits that
+   have the value of zero will not be change) ; addr 2-clear - zero will be
+   written to all the bits that have the value of one in the data written
+   (bits that have the value of zero will not be change); addr 3-ignore;
+   read ignore from all addr except addr 00; inside order of the bits is:
+   [0] rst_bmac0; [1] rst_bmac1; [2] rst_emac0; [3] rst_emac1; [4] rst_grc;
+   [5] rst_mcp_n_reset_reg_hard_core; [6] rst_ mcp_n_hard_core_rst_b; [7]
+   rst_ mcp_n_reset_cmn_cpu; [8] rst_ mcp_n_reset_cmn_core; [9] rst_rbcn;
+   [10] rst_dbg; [11] rst_misc_core; [12] rst_dbue (UART); [13]
+   Pci_resetmdio_n; [14] rst_emac0_hard_core; [15] rst_emac1_hard_core; 16]
+   rst_pxp_rq_rd_wr; 31:17] reserved */
+#define MISC_REG_RESET_REG_2					 0xa590
+/* [RW 20] 20 bit GRC address where the scratch-pad of the MCP that is
+   shared with the driver resides */
+#define MISC_REG_SHARED_MEM_ADDR				 0xa2b4
+/* [RW 32] SPIO. [31-24] FLOAT When any of these bits is written as a '1';
+   the corresponding SPIO bit will turn off it's drivers and become an
+   input. This is the reset state of all SPIO pins. The read value of these
+   bits will be a '1' if that last command (#SET; #CL; or #FLOAT) for this
+   bit was a #FLOAT. (reset value 0xff). [23-16] CLR When any of these bits
+   is written as a '1'; the corresponding SPIO bit will drive low. The read
+   value of these bits will be a '1' if that last command (#SET; #CLR; or
+#FLOAT) for this bit was a #CLR. (reset value 0). [15-8] SET When any of
+   these bits is written as a '1'; the corresponding SPIO bit will drive
+   high (if it has that capability). The read value of these bits will be a
+   '1' if that last command (#SET; #CLR; or #FLOAT) for this bit was a #SET.
+   (reset value 0). [7-0] VALUE RO; These bits indicate the read value of
+   each of the eight SPIO pins. This is the result value of the pin; not the
+   drive value. Writing these bits will have not effect. Each 8 bits field
+   is divided as follows: [0] VAUX Enable; when pulsed low; enables supply
+   from VAUX. (This is an output pin only; the FLOAT field is not applicable
+   for this pin); [1] VAUX Disable; when pulsed low; disables supply form
+   VAUX. (This is an output pin only; FLOAT field is not applicable for this
+   pin); [2] SEL_VAUX_B - Control to power switching logic. Drive low to
+   select VAUX supply. (This is an output pin only; it is not controlled by
+   the SET and CLR fields; it is controlled by the Main Power SM; the FLOAT
+   field is not applicable for this pin; only the VALUE fields is relevant -
+   it reflects the output value); [3] port swap [4] spio_4; [5] spio_5; [6]
+   Bit 0 of UMP device ID select; read by UMP firmware; [7] Bit 1 of UMP
+   device ID select; read by UMP firmware. */
+#define MISC_REG_SPIO						 0xa4fc
+/* [RW 8] These bits enable the SPIO_INTs to signals event to the IGU/MC.
+   according to the following map: [3:0] reserved; [4] spio_4 [5] spio_5;
+   [7:0] reserved */
+#define MISC_REG_SPIO_EVENT_EN					 0xa2b8
+/* [RW 32] SPIO INT. [31-24] OLD_CLR Writing a '1' to these bit clears the
+   corresponding bit in the #OLD_VALUE register. This will acknowledge an
+   interrupt on the falling edge of corresponding SPIO input (reset value
+   0). [23-16] OLD_SET Writing a '1' to these bit sets the corresponding bit
+   in the #OLD_VALUE register. This will acknowledge an interrupt on the
+   rising edge of corresponding SPIO input (reset value 0). [15-8] OLD_VALUE
+   RO; These bits indicate the old value of the SPIO input value. When the
+   ~INT_STATE bit is set; this bit indicates the OLD value of the pin such
+   that if ~INT_STATE is set and this bit is '0'; then the interrupt is due
+   to a low to high edge. If ~INT_STATE is set and this bit is '1'; then the
+   interrupt is due to a high to low edge (reset value 0). [7-0] INT_STATE
+   RO; These bits indicate the current SPIO interrupt state for each SPIO
+   pin. This bit is cleared when the appropriate #OLD_SET or #OLD_CLR
+   command bit is written. This bit is set when the SPIO input does not
+   match the current value in #OLD_VALUE (reset value 0). */
+#define MISC_REG_SPIO_INT					 0xa500
+/* [RW 32] reload value for counter 4 if reload; the value will be reload if
+   the counter reached zero and the reload bit
+   (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
+#define MISC_REG_SW_TIMER_RELOAD_VAL_4				 0xa2fc
+/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
+   in this register. address 0 - timer 1; address 1 - timer 2, ...  address 7 -
+   timer 8 */
+#define MISC_REG_SW_TIMER_VAL					 0xa5c0
+/* [R 1] Status of two port mode path swap input pin. */
+#define MISC_REG_TWO_PORT_PATH_SWAP				 0xa758
+/* [RW 2] 2 port swap overwrite.[0] - Overwrite control; if it is 0 - the
+   path_swap output is equal to 2 port mode path swap input pin; if it is 1
+   - the path_swap output is equal to bit[1] of this register; [1] -
+   Overwrite value. If bit[0] of this register is 1 this is the value that
+   receives the path_swap output. Reset on Hard reset. */
+#define MISC_REG_TWO_PORT_PATH_SWAP_OVWR			 0xa72c
+/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
+   loaded; 0-prepare; -unprepare */
+#define MISC_REG_UNPREPARED					 0xa424
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST	 (0x1<<0)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST	 (0x1<<1)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN	 (0x1<<4)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST	 (0x1<<2)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN	 (0x1<<3)
+/* [RW 5] MDIO PHY Address. The WC uses this address to determine whether or
+ * not it is the recipient of the message on the MDIO interface. The value
+ * is compared to the value on ctrl_md_devad. Drives output
+ * misc_xgxs0_phy_addr. Global register. */
+#define MISC_REG_WC0_CTRL_PHY_ADDR				 0xa9cc
+/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
+   side. This should be less than or equal to phy_port_mode; if some of the
+   ports are not used. This enables reduction of frequency on the core side.
+   This is a strap input for the XMAC_MP core. 00 - Single Port Mode; 01 -
+   Dual Port Mode; 10 - Tri Port Mode; 11 - Quad Port Mode. This is a strap
+   input for the XMAC_MP core; and should be changed only while reset is
+   held low. Reset on Hard reset. */
+#define MISC_REG_XMAC_CORE_PORT_MODE				 0xa964
+/* [RW 2] XMAC PHY port mode. Indicates the number of ports on the Warp
+   Core. This is a strap input for the XMAC_MP core. 00 - Single Port Mode;
+   01 - Dual Port Mode; 1x - Quad Port Mode; This is a strap input for the
+   XMAC_MP core; and should be changed only while reset is held low. Reset
+   on Hard reset. */
+#define MISC_REG_XMAC_PHY_PORT_MODE				 0xa960
+/* [RW 32] 1 [47] Packet Size = 64 Write to this register write bits 31:0.
+ * Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_RX_STAT_GR64_LO				 0x200
+/* [RW 32] 1 [00] Tx Good Packet Count Write to this register write bits
+ * 31:0. Reads from this register will clear bits 31:0. */
+#define MSTAT_REG_TX_STAT_GTXPOK_LO				 0
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST	 (0x1<<0)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST	 (0x1<<1)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN	 (0x1<<4)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST	 (0x1<<2)
+#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN	 (0x1<<3)
+#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN			 (0x1<<0)
+#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN			 (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT	 (0x1<<0)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS	 (0x1<<9)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G 	 (0x1<<15)
+#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK_STATUS	 (0xf<<18)
+/* [RW 1] Input enable for RX_BMAC0 IF */
+#define NIG_REG_BMAC0_IN_EN					 0x100ac
+/* [RW 1] output enable for TX_BMAC0 IF */
+#define NIG_REG_BMAC0_OUT_EN					 0x100e0
+/* [RW 1] output enable for TX BMAC pause port 0 IF */
+#define NIG_REG_BMAC0_PAUSE_OUT_EN				 0x10110
+/* [RW 1] output enable for RX_BMAC0_REGS IF */
+#define NIG_REG_BMAC0_REGS_OUT_EN				 0x100e8
+/* [RW 1] output enable for RX BRB1 port0 IF */
+#define NIG_REG_BRB0_OUT_EN					 0x100f8
+/* [RW 1] Input enable for TX BRB1 pause port 0 IF */
+#define NIG_REG_BRB0_PAUSE_IN_EN				 0x100c4
+/* [RW 1] output enable for RX BRB1 port1 IF */
+#define NIG_REG_BRB1_OUT_EN					 0x100fc
+/* [RW 1] Input enable for TX BRB1 pause port 1 IF */
+#define NIG_REG_BRB1_PAUSE_IN_EN				 0x100c8
+/* [RW 1] output enable for RX BRB1 LP IF */
+#define NIG_REG_BRB_LB_OUT_EN					 0x10100
+/* [WB_W 82] Debug packet to LP from RBC; Data spelling:[63:0] data; 64]
+   error; [67:65]eop_bvalid; [68]eop; [69]sop; [70]port_id; 71]flush;
+   72:73]-vnic_num; 81:74]-sideband_info */
+#define NIG_REG_DEBUG_PACKET_LB 				 0x10800
+/* [RW 1] Input enable for TX Debug packet */
+#define NIG_REG_EGRESS_DEBUG_IN_EN				 0x100dc
+/* [RW 1] If 1 - egress drain mode for port0 is active. In this mode all
+   packets from PBFare not forwarded to the MAC and just deleted from FIFO.
+   First packet may be deleted from the middle. And last packet will be
+   always deleted till the end. */
+#define NIG_REG_EGRESS_DRAIN0_MODE				 0x10060
+/* [RW 1] Output enable to EMAC0 */
+#define NIG_REG_EGRESS_EMAC0_OUT_EN				 0x10120
+/* [RW 1] MAC configuration for packets of port0. If 1 - all packet outputs
+   to emac for port0; other way to bmac for port0 */
+#define NIG_REG_EGRESS_EMAC0_PORT				 0x10058
+/* [RW 1] Input enable for TX PBF user packet port0 IF */
+#define NIG_REG_EGRESS_PBF0_IN_EN				 0x100cc
+/* [RW 1] Input enable for TX PBF user packet port1 IF */
+#define NIG_REG_EGRESS_PBF1_IN_EN				 0x100d0
+/* [RW 1] Input enable for TX UMP management packet port0 IF */
+#define NIG_REG_EGRESS_UMP0_IN_EN				 0x100d4
+/* [RW 1] Input enable for RX_EMAC0 IF */
+#define NIG_REG_EMAC0_IN_EN					 0x100a4
+/* [RW 1] output enable for TX EMAC pause port 0 IF */
+#define NIG_REG_EMAC0_PAUSE_OUT_EN				 0x10118
+/* [R 1] status from emac0. This bit is set when MDINT from either the
+   EXT_MDINT pin or from the Copper PHY is driven low. This condition must
+   be cleared in the attached PHY device that is driving the MINT pin. */
+#define NIG_REG_EMAC0_STATUS_MISC_MI_INT			 0x10494
+/* [WB 48] This address space contains BMAC0 registers. The BMAC registers
+   are described in appendix A. In order to access the BMAC0 registers; the
+   base address; NIG_REGISTERS_INGRESS_BMAC0_MEM; Offset: 0x10c00; should be
+   added to each BMAC register offset */
+#define NIG_REG_INGRESS_BMAC0_MEM				 0x10c00
+/* [WB 48] This address space contains BMAC1 registers. The BMAC registers
+   are described in appendix A. In order to access the BMAC0 registers; the
+   base address; NIG_REGISTERS_INGRESS_BMAC1_MEM; Offset: 0x11000; should be
+   added to each BMAC register offset */
+#define NIG_REG_INGRESS_BMAC1_MEM				 0x11000
+/* [R 1] FIFO empty in EOP descriptor FIFO of LP in NIG_RX_EOP */
+#define NIG_REG_INGRESS_EOP_LB_EMPTY				 0x104e0
+/* [RW 17] Debug only. RX_EOP_DSCR_lb_FIFO in NIG_RX_EOP. Data
+   packet_length[13:0]; mac_error[14]; trunc_error[15]; parity[16] */
+#define NIG_REG_INGRESS_EOP_LB_FIFO				 0x104e4
+/* [RW 27] 0 - must be active for Everest A0; 1- for Everest B0 when latch
+   logic for interrupts must be used. Enable per bit of interrupt of
+   ~latch_status.latch_status */
+#define NIG_REG_LATCH_BC_0					 0x16210
+/* [RW 27] Latch for each interrupt from Unicore.b[0]
+   status_emac0_misc_mi_int; b[1] status_emac0_misc_mi_complete;
+   b[2]status_emac0_misc_cfg_change; b[3]status_emac0_misc_link_status;
+   b[4]status_emac0_misc_link_change; b[5]status_emac0_misc_attn;
+   b[6]status_serdes0_mac_crs; b[7]status_serdes0_autoneg_complete;
+   b[8]status_serdes0_fiber_rxact; b[9]status_serdes0_link_status;
+   b[10]status_serdes0_mr_page_rx; b[11]status_serdes0_cl73_an_complete;
+   b[12]status_serdes0_cl73_mr_page_rx; b[13]status_serdes0_rx_sigdet;
+   b[14]status_xgxs0_remotemdioreq; b[15]status_xgxs0_link10g;
+   b[16]status_xgxs0_autoneg_complete; b[17]status_xgxs0_fiber_rxact;
+   b[21:18]status_xgxs0_link_status; b[22]status_xgxs0_mr_page_rx;
+   b[23]status_xgxs0_cl73_an_complete; b[24]status_xgxs0_cl73_mr_page_rx;
+   b[25]status_xgxs0_rx_sigdet; b[26]status_xgxs0_mac_crs */
+#define NIG_REG_LATCH_STATUS_0					 0x18000
+/* [RW 1] led 10g for port 0 */
+#define NIG_REG_LED_10G_P0					 0x10320
+/* [RW 1] led 10g for port 1 */
+#define NIG_REG_LED_10G_P1					 0x10324
+/* [RW 1] Port0: This bit is set to enable the use of the
+   ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 field
+   defined below. If this bit is cleared; then the blink rate will be about
+   8Hz. */
+#define NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0			 0x10318
+/* [RW 12] Port0: Specifies the period of each blink cycle (on + off) for
+   Traffic LED in milliseconds. Must be a non-zero value. This 12-bit field
+   is reset to 0x080; giving a default blink period of approximately 8Hz. */
+#define NIG_REG_LED_CONTROL_BLINK_RATE_P0			 0x10310
+/* [RW 1] Port0: If set along with the
+ ~nig_registers_led_control_override_traffic_p0.led_control_override_traffic_p0
+   bit and ~nig_registers_led_control_traffic_p0.led_control_traffic_p0 LED
+   bit; the Traffic LED will blink with the blink rate specified in
+   ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
+   ~nig_registers_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
+   fields. */
+#define NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0			 0x10308
+/* [RW 1] Port0: If set overrides hardware control of the Traffic LED. The
+   Traffic LED will then be controlled via bit ~nig_registers_
+   led_control_traffic_p0.led_control_traffic_p0 and bit
+   ~nig_registers_led_control_blink_traffic_p0.led_control_blink_traffic_p0 */
+#define NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 		 0x102f8
+/* [RW 1] Port0: If set along with the led_control_override_trafic_p0 bit;
+   turns on the Traffic LED. If the led_control_blink_traffic_p0 bit is also
+   set; the LED will blink with blink rate specified in
+   ~nig_registers_led_control_blink_rate_p0.led_control_blink_rate_p0 and
+   ~nig_regsters_led_control_blink_rate_ena_p0.led_control_blink_rate_ena_p0
+   fields. */
+#define NIG_REG_LED_CONTROL_TRAFFIC_P0				 0x10300
+/* [RW 4] led mode for port0: 0 MAC; 1-3 PHY1; 4 MAC2; 5-7 PHY4; 8-MAC3;
+   9-11PHY7; 12 MAC4; 13-15 PHY10; */
+#define NIG_REG_LED_MODE_P0					 0x102f0
+/* [RW 3] for port0 enable for llfc ppp and pause. b0 - brb1 enable; b1-
+   tsdm enable; b2- usdm enable */
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_0			 0x16070
+#define NIG_REG_LLFC_EGRESS_SRC_ENABLE_1			 0x16074
+/* [RW 1] SAFC enable for port0. This register may get 1 only when
+   ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
+   port */
+#define NIG_REG_LLFC_ENABLE_0					 0x16208
+#define NIG_REG_LLFC_ENABLE_1					 0x1620c
+/* [RW 16] classes are high-priority for port0 */
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0			 0x16058
+#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1			 0x1605c
+/* [RW 16] classes are low-priority for port0 */
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0			 0x16060
+#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1			 0x16064
+/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */
+#define NIG_REG_LLFC_OUT_EN_0					 0x160c8
+#define NIG_REG_LLFC_OUT_EN_1					 0x160cc
+#define NIG_REG_LLH0_ACPI_PAT_0_CRC				 0x1015c
+#define NIG_REG_LLH0_ACPI_PAT_6_LEN				 0x10154
+#define NIG_REG_LLH0_BRB1_DRV_MASK				 0x10244
+#define NIG_REG_LLH0_BRB1_DRV_MASK_MF				 0x16048
+/* [RW 1] send to BRB1 if no match on any of RMP rules. */
+#define NIG_REG_LLH0_BRB1_NOT_MCP				 0x1025c
+/* [RW 2] Determine the classification participants. 0: no classification.1:
+   classification upon VLAN id. 2: classification upon MAC address. 3:
+   classification upon both VLAN id & MAC addr. */
+#define NIG_REG_LLH0_CLS_TYPE					 0x16080
+/* [RW 32] cm header for llh0 */
+#define NIG_REG_LLH0_CM_HEADER					 0x1007c
+#define NIG_REG_LLH0_DEST_IP_0_1				 0x101dc
+#define NIG_REG_LLH0_DEST_MAC_0_0				 0x101c0
+/* [RW 16] destination TCP address 1. The LLH will look for this address in
+   all incoming packets. */
+#define NIG_REG_LLH0_DEST_TCP_0 				 0x10220
+/* [RW 16] destination UDP address 1 The LLH will look for this address in
+   all incoming packets. */
+#define NIG_REG_LLH0_DEST_UDP_0 				 0x10214
+#define NIG_REG_LLH0_ERROR_MASK 				 0x1008c
+/* [RW 8] event id for llh0 */
+#define NIG_REG_LLH0_EVENT_ID					 0x10084
+#define NIG_REG_LLH0_FUNC_EN					 0x160fc
+#define NIG_REG_LLH0_FUNC_MEM					 0x16180
+#define NIG_REG_LLH0_FUNC_MEM_ENABLE				 0x16140
+#define NIG_REG_LLH0_FUNC_VLAN_ID				 0x16100
+/* [RW 1] Determine the IP version to look for in
+   ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
+#define NIG_REG_LLH0_IPV4_IPV6_0				 0x10208
+/* [RW 1] t bit for llh0 */
+#define NIG_REG_LLH0_T_BIT					 0x10074
+/* [RW 12] VLAN ID 1. In case of VLAN packet the LLH will look for this ID. */
+#define NIG_REG_LLH0_VLAN_ID_0					 0x1022c
+/* [RW 8] init credit counter for port0 in LLH */
+#define NIG_REG_LLH0_XCM_INIT_CREDIT				 0x10554
+#define NIG_REG_LLH0_XCM_MASK					 0x10130
+#define NIG_REG_LLH1_BRB1_DRV_MASK				 0x10248
+/* [RW 1] send to BRB1 if no match on any of RMP rules. */
+#define NIG_REG_LLH1_BRB1_NOT_MCP				 0x102dc
+/* [RW 2] Determine the classification participants. 0: no classification.1:
+   classification upon VLAN id. 2: classification upon MAC address. 3:
+   classification upon both VLAN id & MAC addr. */
+#define NIG_REG_LLH1_CLS_TYPE					 0x16084
+/* [RW 32] cm header for llh1 */
+#define NIG_REG_LLH1_CM_HEADER					 0x10080
+#define NIG_REG_LLH1_ERROR_MASK 				 0x10090
+/* [RW 8] event id for llh1 */
+#define NIG_REG_LLH1_EVENT_ID					 0x10088
+#define NIG_REG_LLH1_FUNC_MEM					 0x161c0
+#define NIG_REG_LLH1_FUNC_MEM_ENABLE				 0x16160
+#define NIG_REG_LLH1_FUNC_MEM_SIZE				 16
+/* [RW 1] When this bit is set; the LLH will classify the packet before
+ * sending it to the BRB or calculating WoL on it. This bit controls port 1
+ * only. The legacy llh_multi_function_mode bit controls port 0. */
+#define NIG_REG_LLH1_MF_MODE					 0x18614
+/* [RW 8] init credit counter for port1 in LLH */
+#define NIG_REG_LLH1_XCM_INIT_CREDIT				 0x10564
+#define NIG_REG_LLH1_XCM_MASK					 0x10134
+/* [RW 1] When this bit is set; the LLH will expect all packets to be with
+   e1hov */
+#define NIG_REG_LLH_E1HOV_MODE					 0x160d8
+/* [RW 1] When this bit is set; the LLH will classify the packet before
+   sending it to the BRB or calculating WoL on it. */
+#define NIG_REG_LLH_MF_MODE					 0x16024
+#define NIG_REG_MASK_INTERRUPT_PORT0				 0x10330
+#define NIG_REG_MASK_INTERRUPT_PORT1				 0x10334
+/* [RW 1] Output signal from NIG to EMAC0. When set enables the EMAC0 block. */
+#define NIG_REG_NIG_EMAC0_EN					 0x1003c
+/* [RW 1] Output signal from NIG to EMAC1. When set enables the EMAC1 block. */
+#define NIG_REG_NIG_EMAC1_EN					 0x10040
+/* [RW 1] Output signal from NIG to TX_EMAC0. When set indicates to the
+   EMAC0 to strip the CRC from the ingress packets. */
+#define NIG_REG_NIG_INGRESS_EMAC0_NO_CRC			 0x10044
+/* [R 32] Interrupt register #0 read */
+#define NIG_REG_NIG_INT_STS_0					 0x103b0
+#define NIG_REG_NIG_INT_STS_1					 0x103c0
+/* [R 32] Legacy E1 and E1H location for parity error mask register. */
+#define NIG_REG_NIG_PRTY_MASK					 0x103dc
+/* [RW 32] Parity mask register #0 read/write */
+#define NIG_REG_NIG_PRTY_MASK_0					 0x183c8
+#define NIG_REG_NIG_PRTY_MASK_1					 0x183d8
+/* [R 32] Legacy E1 and E1H location for parity error status register. */
+#define NIG_REG_NIG_PRTY_STS					 0x103d0
+/* [R 32] Parity register #0 read */
+#define NIG_REG_NIG_PRTY_STS_0					 0x183bc
+#define NIG_REG_NIG_PRTY_STS_1					 0x183cc
+/* [R 32] Legacy E1 and E1H location for parity error status clear register. */
+#define NIG_REG_NIG_PRTY_STS_CLR				 0x103d4
+/* [RC 32] Parity register #0 read clear */
+#define NIG_REG_NIG_PRTY_STS_CLR_0				 0x183c0
+#define NIG_REG_NIG_PRTY_STS_CLR_1				 0x183d0
+#define MCPR_IMC_COMMAND_ENABLE					 (1L<<31)
+#define MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT			 16
+#define MCPR_IMC_COMMAND_OPERATION_BITSHIFT			 28
+#define MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT		 8
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P0_HDRS_AFTER_BASIC				 0x18038
+/* [RW 1] HW PFC enable bit. Set this bit to enable the PFC functionality in
+ * the NIG. Other flow control modes such as PAUSE and SAFC/LLFC should be
+ * disabled when this bit is set. */
+#define NIG_REG_P0_HWPFC_ENABLE				 0x18078
+#define NIG_REG_P0_LLH_FUNC_MEM2				 0x18480
+#define NIG_REG_P0_LLH_FUNC_MEM2_ENABLE			 0x18440
+/* [RW 1] Input enable for RX MAC interface. */
+#define NIG_REG_P0_MAC_IN_EN					 0x185ac
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P0_MAC_OUT_EN					 0x185b0
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P0_MAC_PAUSE_OUT_EN				 0x185b4
+/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
+ * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
+ * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
+ * priority field is extracted from the outer-most VLAN in receive packet.
+ * Only COS 0 and COS 1 are supported in E2. */
+#define NIG_REG_P0_PKT_PRIORITY_TO_COS				 0x18054
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
+ * priority is mapped to COS 0 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS0_PRIORITY_MASK			 0x18058
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
+ * priority is mapped to COS 1 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS1_PRIORITY_MASK			 0x1805c
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS2_PRIORITY_MASK			 0x186b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 3. A
+ * priority is mapped to COS 3 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS3_PRIORITY_MASK			 0x186b4
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 4. A
+ * priority is mapped to COS 4 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS4_PRIORITY_MASK			 0x186b8
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 5. A
+ * priority is mapped to COS 5 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P0_RX_COS5_PRIORITY_MASK			 0x186bc
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
+/* [RW 15] Specify which of the credit registers the client is to be mapped
+ * to. Bits[2:0] are for client 0; bits [14:12] are for client 4. For
+ * clients that are not subject to WFQ credit blocking - their
+ * specifications here are not used. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP			 0x180f0
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_LSB		 0x18688
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. */
+#define NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP2_MSB		 0x1868c
+/* [RW 5] Specify whether the client competes directly in the strict
+ * priority arbiter. The bits are mapped according to client ID (client IDs
+ * are defined in tx_arb_priority_client). Default value is set to enable
+ * strict priorities for clients 0-2 -- management and debug traffic. */
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT			 0x180e8
+/* [RW 5] Specify whether the client is subject to WFQ credit blocking. The
+ * bits are mapped according to client ID (client IDs are defined in
+ * tx_arb_priority_client). Default value is 0 for not using WFQ credit
+ * blocking. */
+#define NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ		 0x180ec
+/* [RW 32] Specify the upper bound that credit register 0 is allowed to
+ * reach. */
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0			 0x1810c
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1			 0x18110
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_2			 0x18114
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_3			 0x18118
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_4			 0x1811c
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5			 0x186a0
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6			 0x186a4
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7			 0x186a8
+#define NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_8			 0x186ac
+/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
+ * when it is time to increment. */
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0			 0x180f8
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1			 0x180fc
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_2			 0x18100
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_3			 0x18104
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4			 0x18108
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5			 0x18690
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6			 0x18694
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7			 0x18698
+#define NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8			 0x1869c
+/* [RW 12] Specify the number of strict priority arbitration slots between
+ * two round-robin arbitration slots to avoid starvation. A value of 0 means
+ * no strict priority cycles - the strict priority with anti-starvation
+ * arbiter becomes a round-robin arbiter. */
+#define NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS			 0x180f4
+/* [RW 15] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. Priority 0 is the highest priority. Bits [2:0]
+ * are for priority 0 client; bits [14:12] are for priority 4 client. The
+ * clients are assigned the following IDs: 0-management; 1-debug traffic
+ * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
+ * traffic. The reset value[14:0] is set to 0x4688 (15'b100_011_010_001_000)
+ * for management at priority 0; debug traffic at priorities 1 and 2; COS0
+ * traffic at priority 3; and COS1 traffic at priority 4. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT			 0x180e4
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define NIG_REG_P1_HDRS_AFTER_BASIC				 0x1818c
+#define NIG_REG_P1_LLH_FUNC_MEM2				 0x184c0
+#define NIG_REG_P1_LLH_FUNC_MEM2_ENABLE			 0x18460
+/* [RW 32] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_LSB			 0x18680
+/* [RW 4] Specify the client number to be assigned to each priority of the
+ * strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+ * value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+ * client; bits [35-32] are for priority 8 client. The clients are assigned
+ * the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+ * traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+ * 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+ * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+ * accommodate the 9 input clients to ETS arbiter. */
+#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB			 0x18684
+#define NIG_REG_P1_MAC_IN_EN					 0x185c0
+/* [RW 1] Output enable for TX MAC interface */
+#define NIG_REG_P1_MAC_OUT_EN					 0x185c4
+/* [RW 1] Output enable for TX PAUSE signal to the MAC. */
+#define NIG_REG_P1_MAC_PAUSE_OUT_EN				 0x185c8
+/* [RW 32] Eight 4-bit configurations for specifying which COS (0-15 for
+ * future expansion) each priorty is to be mapped to. Bits 3:0 specify the
+ * COS for priority 0. Bits 31:28 specify the COS for priority 7. The 3-bit
+ * priority field is extracted from the outer-most VLAN in receive packet.
+ * Only COS 0 and COS 1 are supported in E2. */
+#define NIG_REG_P1_PKT_PRIORITY_TO_COS				 0x181a8
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 0. A
+ * priority is mapped to COS 0 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS0_PRIORITY_MASK			 0x181ac
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 1. A
+ * priority is mapped to COS 1 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS1_PRIORITY_MASK			 0x181b0
+/* [RW 16] Bit-map indicating which SAFC/PFC priorities to map to COS 2. A
+ * priority is mapped to COS 2 when the corresponding mask bit is 1. More
+ * than one bit may be set; allowing multiple priorities to be mapped to one
+ * COS. */
+#define NIG_REG_P1_RX_COS2_PRIORITY_MASK			 0x186f8
+/* [R 1] RX FIFO for receiving data from MAC is empty. */
+#define NIG_REG_P1_RX_MACFIFO_EMPTY				 0x1858c
+/* [R 1] TLLH FIFO is empty. */
+#define NIG_REG_P1_TLLH_FIFO_EMPTY				 0x18338
+/* [RW 32] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 31:0 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_LSB		 0x186e8
+/* [RW 4] Specify which of the credit registers the client is to be mapped
+ * to. This register specifies bits 35:32 of the 36-bit value. Bits[3:0] are
+ * for client 0; bits [35:32] are for client 8. For clients that are not
+ * subject to WFQ credit blocking - their specifications here are not used.
+ * This is a new register (with 2_) added in E3 B0 to accommodate the 9
+ * input clients to ETS arbiter. The reset default is set for management and
+ * debug to use credit registers 6, 7, and 8, respectively, and COSes 0-5 to
+ * use credit registers 0-5 respectively (0x543210876). Note that credit
+ * registers can not be shared between clients. Note also that there are
+ * only COS0-2 in port 1- there is a total of 6 clients in port 1. Only
+ * credit registers 0-5 are valid. This register should be configured
+ * appropriately before enabling WFQ. */
+#define NIG_REG_P1_TX_ARB_CLIENT_CREDIT_MAP2_MSB		 0x186ec
+/* [RW 9] Specify whether the client competes directly in the strict
+ * priority arbiter. The bits are mapped according to client ID (client IDs
+ * are defined in tx_arb_priority_client2): 0-management; 1-debug traffic
+ * from this port; 2-debug traffic from other port; 3-COS0 traffic; 4-COS1
+ * traffic; 5-COS2 traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic.
+ * Default value is set to enable strict priorities for all clients. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_STRICT			 0x18234
+/* [RW 9] Specify whether the client is subject to WFQ credit blocking. The
+ * bits are mapped according to client ID (client IDs are defined in
+ * tx_arb_priority_client2): 0-management; 1-debug traffic from this port;
+ * 2-debug traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2
+ * traffic; 6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. Default value is
+ * 0 for not using WFQ credit blocking. */
+#define NIG_REG_P1_TX_ARB_CLIENT_IS_SUBJECT2WFQ			 0x18238
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_0			 0x18258
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_1			 0x1825c
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_2			 0x18260
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_3			 0x18264
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_4			 0x18268
+#define NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5			 0x186f4
+/* [RW 32] Specify the weight (in bytes) to be added to credit register 0
+ * when it is time to increment. */
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_0			 0x18244
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_1			 0x18248
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_2			 0x1824c
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_3			 0x18250
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_4			 0x18254
+#define NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5			 0x186f0
+/* [RW 12] Specify the number of strict priority arbitration slots between
+   two round-robin arbitration slots to avoid starvation. A value of 0 means
+   no strict priority cycles - the strict priority with anti-starvation
+   arbiter becomes a round-robin arbiter. */
+#define NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS			 0x18240
+/* [RW 32] Specify the client number to be assigned to each priority of the
+   strict priority arbiter. This register specifies bits 31:0 of the 36-bit
+   value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+   client; bits [35-32] are for priority 8 client. The clients are assigned
+   the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+   traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+   6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+   set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+   accommodate the 9 input clients to ETS arbiter. Note that this register
+   is the same as the one for port 0, except that port 1 only has COS 0-2
+   traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_LSB			 0x186e0
+/* [RW 4] Specify the client number to be assigned to each priority of the
+   strict priority arbiter. This register specifies bits 35:32 of the 36-bit
+   value. Priority 0 is the highest priority. Bits [3:0] are for priority 0
+   client; bits [35-32] are for priority 8 client. The clients are assigned
+   the following IDs: 0-management; 1-debug traffic from this port; 2-debug
+   traffic from other port; 3-COS0 traffic; 4-COS1 traffic; 5-COS2 traffic;
+   6-COS3 traffic; 7-COS4 traffic; 8-COS5 traffic. The reset value[35:0] is
+   set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
+   accommodate the 9 input clients to ETS arbiter. Note that this register
+   is the same as the one for port 0, except that port 1 only has COS 0-2
+   traffic. There is no traffic for COS 3-5 of port 1. */
+#define NIG_REG_P1_TX_ARB_PRIORITY_CLIENT2_MSB			 0x186e4
+/* [R 1] TX FIFO for transmitting data to MAC is empty. */
+#define NIG_REG_P1_TX_MACFIFO_EMPTY				 0x18594
+/* [R 1] FIFO empty status of the MCP TX FIFO used for storing MCP packets
+   forwarded to the host. */
+#define NIG_REG_P1_TX_MNG_HOST_FIFO_EMPTY			 0x182b8
+/* [RW 32] Specify the upper bound that credit register 0 is allowed to
+ * reach. */
+/* [RW 1] Pause enable for port0. This register may get 1 only when
+   ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
+   port */
+#define NIG_REG_PAUSE_ENABLE_0					 0x160c0
+#define NIG_REG_PAUSE_ENABLE_1					 0x160c4
+/* [RW 1] Input enable for RX PBF LP IF */
+#define NIG_REG_PBF_LB_IN_EN					 0x100b4
+/* [RW 1] Value of this register will be transmitted to port swap when
+   ~nig_registers_strap_override.strap_override =1 */
+#define NIG_REG_PORT_SWAP					 0x10394
+/* [RW 1] PPP enable for port0. This register may get 1 only when
+ * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the
+ * same port */
+#define NIG_REG_PPP_ENABLE_0					 0x160b0
+#define NIG_REG_PPP_ENABLE_1					 0x160b4
+/* [RW 1] output enable for RX parser descriptor IF */
+#define NIG_REG_PRS_EOP_OUT_EN					 0x10104
+/* [RW 1] Input enable for RX parser request IF */
+#define NIG_REG_PRS_REQ_IN_EN					 0x100b8
+/* [RW 5] control to serdes - CL45 DEVAD */
+#define NIG_REG_SERDES0_CTRL_MD_DEVAD				 0x10370
+/* [RW 1] control to serdes; 0 - clause 45; 1 - clause 22 */
+#define NIG_REG_SERDES0_CTRL_MD_ST				 0x1036c
+/* [RW 5] control to serdes - CL22 PHY_ADD and CL45 PRTAD */
+#define NIG_REG_SERDES0_CTRL_PHY_ADDR				 0x10374
+/* [R 1] status from serdes0 that inputs to interrupt logic of link status */
+#define NIG_REG_SERDES0_STATUS_LINK_STATUS			 0x10578
+/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
+   for port0 */
+#define NIG_REG_STAT0_BRB_DISCARD				 0x105f0
+/* [R 32] Rx statistics : In user packets truncated due to BRB backpressure
+   for port0 */
+#define NIG_REG_STAT0_BRB_TRUNCATE				 0x105f8
+/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
+   between 1024 and 1522 bytes for port0 */
+#define NIG_REG_STAT0_EGRESS_MAC_PKT0				 0x10750
+/* [WB_R 36] Tx statistics : Number of packets from emac0 or bmac0 that
+   between 1523 bytes and above for port0 */
+#define NIG_REG_STAT0_EGRESS_MAC_PKT1				 0x10760
+/* [R 32] Rx statistics : In user packets discarded due to BRB backpressure
+   for port1 */
+#define NIG_REG_STAT1_BRB_DISCARD				 0x10628
+/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
+   between 1024 and 1522 bytes for port1 */
+#define NIG_REG_STAT1_EGRESS_MAC_PKT0				 0x107a0
+/* [WB_R 36] Tx statistics : Number of packets from emac1 or bmac1 that
+   between 1523 bytes and above for port1 */
+#define NIG_REG_STAT1_EGRESS_MAC_PKT1				 0x107b0
+/* [WB_R 64] Rx statistics : User octets received for LP */
+#define NIG_REG_STAT2_BRB_OCTET 				 0x107e0
+#define NIG_REG_STATUS_INTERRUPT_PORT0				 0x10328
+#define NIG_REG_STATUS_INTERRUPT_PORT1				 0x1032c
+/* [RW 1] port swap mux selection. If this register equal to 0 then port
+   swap is equal to SPIO pin that inputs from ifmux_serdes_swap. If 1 then
+   ort swap is equal to ~nig_registers_port_swap.port_swap */
+#define NIG_REG_STRAP_OVERRIDE					 0x10398
+/* [RW 1] output enable for RX_XCM0 IF */
+#define NIG_REG_XCM0_OUT_EN					 0x100f0
+/* [RW 1] output enable for RX_XCM1 IF */
+#define NIG_REG_XCM1_OUT_EN					 0x100f4
+/* [RW 1] control to xgxs - remote PHY in-band MDIO */
+#define NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST			 0x10348
+/* [RW 5] control to xgxs - CL45 DEVAD */
+#define NIG_REG_XGXS0_CTRL_MD_DEVAD				 0x1033c
+/* [RW 1] control to xgxs; 0 - clause 45; 1 - clause 22 */
+#define NIG_REG_XGXS0_CTRL_MD_ST				 0x10338
+/* [RW 5] control to xgxs - CL22 PHY_ADD and CL45 PRTAD */
+#define NIG_REG_XGXS0_CTRL_PHY_ADDR				 0x10340
+/* [R 1] status from xgxs0 that inputs to interrupt logic of link10g. */
+#define NIG_REG_XGXS0_STATUS_LINK10G				 0x10680
+/* [R 4] status from xgxs0 that inputs to interrupt logic of link status */
+#define NIG_REG_XGXS0_STATUS_LINK_STATUS			 0x10684
+/* [RW 2] selection for XGXS lane of port 0 in NIG_MUX block */
+#define NIG_REG_XGXS_LANE_SEL_P0				 0x102e8
+/* [RW 1] selection for port0 for NIG_MUX block : 0 = SerDes; 1 = XGXS */
+#define NIG_REG_XGXS_SERDES0_MODE_SEL				 0x102e0
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_EMAC0_MISC_MI_INT  (0x1<<0)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_SERDES0_LINK_STATUS (0x1<<9)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G	 (0x1<<15)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS  (0xf<<18)
+#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_UPPER_BOUND				 0x15c05c
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 0. */
+#define PBF_REG_COS0_UPPER_BOUND_P0				 0x15c2cc
+/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter
+ * of port 1. */
+#define PBF_REG_COS0_UPPER_BOUND_P1				 0x15c2e4
+/* [RW 31] The weight of COS0 in the ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT					 0x15c054
+/* [RW 31] The weight of COS0 in port 0 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P0					 0x15c2a8
+/* [RW 31] The weight of COS0 in port 1 ETS command arbiter. */
+#define PBF_REG_COS0_WEIGHT_P1					 0x15c2c0
+/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_UPPER_BOUND				 0x15c060
+/* [RW 31] The weight of COS1 in the ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT					 0x15c058
+/* [RW 31] The weight of COS1 in port 0 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P0					 0x15c2ac
+/* [RW 31] The weight of COS1 in port 1 ETS command arbiter. */
+#define PBF_REG_COS1_WEIGHT_P1					 0x15c2c4
+/* [RW 31] The weight of COS2 in port 0 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P0					 0x15c2b0
+/* [RW 31] The weight of COS2 in port 1 ETS command arbiter. */
+#define PBF_REG_COS2_WEIGHT_P1					 0x15c2c8
+/* [RW 31] The weight of COS3 in port 0 ETS command arbiter. */
+#define PBF_REG_COS3_WEIGHT_P0					 0x15c2b4
+/* [RW 31] The weight of COS4 in port 0 ETS command arbiter. */
+#define PBF_REG_COS4_WEIGHT_P0					 0x15c2b8
+/* [RW 31] The weight of COS5 in port 0 ETS command arbiter. */
+#define PBF_REG_COS5_WEIGHT_P0					 0x15c2bc
+/* [R 11] Current credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_LB_Q					 0x140338
+/* [R 11] Current credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q0					 0x14033c
+/* [R 11] Current credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_CREDIT_Q1					 0x140340
+/* [RW 1] Disable processing further tasks from port 0 (after ending the
+   current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P0			 0x14005c
+/* [RW 1] Disable processing further tasks from port 1 (after ending the
+   current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P1			 0x140060
+/* [RW 1] Disable processing further tasks from port 4 (after ending the
+   current task in process). */
+#define PBF_REG_DISABLE_NEW_TASK_PROC_P4			 0x14006c
+#define PBF_REG_DISABLE_PF					 0x1402e8
+/* [RW 18] For port 0: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P0			 0x15c288
+/* [RW 9] For port 1: For each client that is subject to WFQ (the
+ * corresponding bit is 1); indicates to which of the credit registers this
+ * client is mapped. For clients which are not credit blocked; their mapping
+ * is dont care. */
+#define PBF_REG_ETS_ARB_CLIENT_CREDIT_MAP_P1			 0x15c28c
+/* [RW 6] For port 0: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P0			 0x15c278
+/* [RW 3] For port 1: Bit per client to indicate if the client competes in
+ * the strict priority arbiter directly (corresponding bit = 1); or first
+ * goes to the RR arbiter (corresponding bit = 0); and then competes in the
+ * lowest priority in the strict-priority arbiter. */
+#define PBF_REG_ETS_ARB_CLIENT_IS_STRICT_P1			 0x15c27c
+/* [RW 6] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P0		 0x15c280
+/* [RW 3] For port 0: Bit per client to indicate if the client is subject to
+ * WFQ credit blocking (corresponding bit = 1). */
+#define PBF_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ_P1		 0x15c284
+/* [RW 16] For port 0: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P0			 0x15c2a0
+/* [RW 16] For port 1: The number of strict priority arbitration slots
+ * between 2 RR arbitration slots. A value of 0 means no strict priority
+ * cycles; i.e. the strict-priority w/ anti-starvation arbiter is a RR
+ * arbiter. */
+#define PBF_REG_ETS_ARB_NUM_STRICT_ARB_SLOTS_P1			 0x15c2a4
+/* [RW 18] For port 0: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P0			 0x15c270
+/* [RW 9] For port 1: Indicates which client is connected to each priority
+ * in the strict-priority arbiter. Priority 0 is the highest priority, and
+ * priority 5 is the lowest; to which the RR output is connected to (this is
+ * not configurable). */
+#define PBF_REG_ETS_ARB_PRIORITY_CLIENT_P1			 0x15c274
+/* [RW 1] Indicates that ETS is performed between the COSes in the command
+ * arbiter. If reset strict priority w/ anti-starvation will be performed
+ * w/o WFQ. */
+#define PBF_REG_ETS_ENABLED					 0x15c050
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define PBF_REG_HDRS_AFTER_BASIC				 0x15c0a8
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PBF_REG_HDRS_AFTER_TAG_0				 0x15c0b8
+/* [R 1] Removed for E3 B0 - Indicates which COS is conncted to the highest
+ * priority in the command arbiter. */
+#define PBF_REG_HIGH_PRIORITY_COS_NUM				 0x15c04c
+#define PBF_REG_IF_ENABLE_REG					 0x140044
+/* [RW 1] Init bit. When set the initial credits are copied to the credit
+   registers (except the port credits). Should be set and then reset after
+   the configuration of the block has ended. */
+#define PBF_REG_INIT						 0x140000
+/* [RW 11] Initial credit for the LB queue in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_LB_Q					 0x15c248
+/* [RW 11] Initial credit for queue 0 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q0					 0x15c230
+/* [RW 11] Initial credit for queue 1 in the tx port buffers in 16 byte
+ * lines. */
+#define PBF_REG_INIT_CRD_Q1					 0x15c234
+/* [RW 1] Init bit for port 0. When set the initial credit of port 0 is
+   copied to the credit register. Should be set and then reset after the
+   configuration of the port has ended. */
+#define PBF_REG_INIT_P0 					 0x140004
+/* [RW 1] Init bit for port 1. When set the initial credit of port 1 is
+   copied to the credit register. Should be set and then reset after the
+   configuration of the port has ended. */
+#define PBF_REG_INIT_P1 					 0x140008
+/* [RW 1] Init bit for port 4. When set the initial credit of port 4 is
+   copied to the credit register. Should be set and then reset after the
+   configuration of the port has ended. */
+#define PBF_REG_INIT_P4 					 0x14000c
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * the LB queue. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q			 0x140354
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 0. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q0			 0x140358
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * queue 1. Reset upon init. */
+#define PBF_REG_INTERNAL_CRD_FREED_CNT_Q1			 0x14035c
+/* [RW 1] Enable for mac interface 0. */
+#define PBF_REG_MAC_IF0_ENABLE					 0x140030
+/* [RW 1] Enable for mac interface 1. */
+#define PBF_REG_MAC_IF1_ENABLE					 0x140034
+/* [RW 1] Enable for the loopback interface. */
+#define PBF_REG_MAC_LB_ENABLE					 0x140040
+/* [RW 6] Bit-map indicating which headers must appear in the packet */
+#define PBF_REG_MUST_HAVE_HDRS					 0x15c0c4
+/* [RW 16] The number of strict priority arbitration slots between 2 RR
+ * arbitration slots. A value of 0 means no strict priority cycles; i.e. the
+ * strict-priority w/ anti-starvation arbiter is a RR arbiter. */
+#define PBF_REG_NUM_STRICT_ARB_SLOTS				 0x15c064
+/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
+   not suppoterd. */
+#define PBF_REG_P0_ARB_THRSH					 0x1400e4
+/* [R 11] Current credit for port 0 in the tx port buffers in 16 byte lines. */
+#define PBF_REG_P0_CREDIT					 0x140200
+/* [RW 11] Initial credit for port 0 in the tx port buffers in 16 byte
+   lines. */
+#define PBF_REG_P0_INIT_CRD					 0x1400d0
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 0. Reset upon init. */
+#define PBF_REG_P0_INTERNAL_CRD_FREED_CNT			 0x140308
+/* [R 1] Removed for E3 B0 - Indication that pause is enabled for port 0. */
+#define PBF_REG_P0_PAUSE_ENABLE					 0x140014
+/* [R 8] Removed for E3 B0 - Number of tasks in port 0 task queue. */
+#define PBF_REG_P0_TASK_CNT					 0x140204
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 0. Reset upon init. */
+#define PBF_REG_P0_TQ_LINES_FREED_CNT				 0x1402f0
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 0. */
+#define PBF_REG_P0_TQ_OCCUPANCY					 0x1402fc
+/* [R 11] Removed for E3 B0 - Current credit for port 1 in the tx port
+ * buffers in 16 byte lines. */
+#define PBF_REG_P1_CREDIT					 0x140208
+/* [R 11] Removed for E3 B0 - Initial credit for port 0 in the tx port
+ * buffers in 16 byte lines. */
+#define PBF_REG_P1_INIT_CRD					 0x1400d4
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 1. Reset upon init. */
+#define PBF_REG_P1_INTERNAL_CRD_FREED_CNT			 0x14030c
+/* [R 8] Removed for E3 B0 - Number of tasks in port 1 task queue. */
+#define PBF_REG_P1_TASK_CNT					 0x14020c
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 1. Reset upon init. */
+#define PBF_REG_P1_TQ_LINES_FREED_CNT				 0x1402f4
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 1. */
+#define PBF_REG_P1_TQ_OCCUPANCY					 0x140300
+/* [R 11] Current credit for port 4 in the tx port buffers in 16 byte lines. */
+#define PBF_REG_P4_CREDIT					 0x140210
+/* [RW 11] Initial credit for port 4 in the tx port buffers in 16 byte
+   lines. */
+#define PBF_REG_P4_INIT_CRD					 0x1400e0
+/* [R 32] Cyclic counter for the amount credits in 16 bytes lines added for
+ * port 4. Reset upon init. */
+#define PBF_REG_P4_INTERNAL_CRD_FREED_CNT			 0x140310
+/* [R 8] Removed for E3 B0 - Number of tasks in port 4 task queue. */
+#define PBF_REG_P4_TASK_CNT					 0x140214
+/* [R 32] Removed for E3 B0 - Cyclic counter for number of 8 byte lines
+ * freed from the task queue of port 4. Reset upon init. */
+#define PBF_REG_P4_TQ_LINES_FREED_CNT				 0x1402f8
+/* [R 12] Number of 8 bytes lines occupied in the task queue of port 4. */
+#define PBF_REG_P4_TQ_OCCUPANCY					 0x140304
+/* [RW 5] Interrupt mask register #0 read/write */
+#define PBF_REG_PBF_INT_MASK					 0x1401d4
+/* [R 5] Interrupt register #0 read */
+#define PBF_REG_PBF_INT_STS					 0x1401c8
+/* [RW 20] Parity mask register #0 read/write */
+#define PBF_REG_PBF_PRTY_MASK					 0x1401e4
+/* [RC 20] Parity register #0 read clear */
+#define PBF_REG_PBF_PRTY_STS_CLR				 0x1401dc
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PBF_REG_TAG_ETHERTYPE_0					 0x15c090
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PBF_REG_TAG_LEN_0					 0x15c09c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the LB task
+ * queue. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_LB_Q				 0x14038c
+/* [R 32] Cyclic counter for number of 8 byte lines freed from the task
+ * queue 0. Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q0				 0x140390
+/* [R 32] Cyclic counter for number of 8 byte lines freed from task queue 1.
+ * Reset upon init. */
+#define PBF_REG_TQ_LINES_FREED_CNT_Q1				 0x140394
+/* [R 13] Number of 8 bytes lines occupied in the task queue of the LB
+ * queue. */
+#define PBF_REG_TQ_OCCUPANCY_LB_Q				 0x1403a8
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 0. */
+#define PBF_REG_TQ_OCCUPANCY_Q0					 0x1403ac
+/* [R 13] Number of 8 bytes lines occupied in the task queue of queue 1. */
+#define PBF_REG_TQ_OCCUPANCY_Q1					 0x1403b0
+#define PB_REG_CONTROL						 0
+/* [RW 2] Interrupt mask register #0 read/write */
+#define PB_REG_PB_INT_MASK					 0x28
+/* [R 2] Interrupt register #0 read */
+#define PB_REG_PB_INT_STS					 0x1c
+/* [RW 4] Parity mask register #0 read/write */
+#define PB_REG_PB_PRTY_MASK					 0x38
+/* [R 4] Parity register #0 read */
+#define PB_REG_PB_PRTY_STS					 0x2c
+/* [RC 4] Parity register #0 read clear */
+#define PB_REG_PB_PRTY_STS_CLR					 0x30
+#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR		 (0x1<<0)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW	 (0x1<<8)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR	 (0x1<<1)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN		 (0x1<<6)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN	 (0x1<<7)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN  (0x1<<4)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN	 (0x1<<3)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN	 (0x1<<5)
+#define PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN		 (0x1<<2)
+/* [R 8] Config space A attention dirty bits. Each bit indicates that the
+ * corresponding PF generates config space A attention. Set by PXP. Reset by
+ * MCP writing 1 to icfg_space_a_request_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_CFG_SPACE_A_REQUEST			 0x9010
+/* [R 8] Config space B attention dirty bits. Each bit indicates that the
+ * corresponding PF generates config space B attention. Set by PXP. Reset by
+ * MCP writing 1 to icfg_space_b_request_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_CFG_SPACE_B_REQUEST			 0x9014
+/* [RW 1] Type A PF enable inbound interrupt table for CSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_CSDM_INB_INT_A_PF_ENABLE			 0x9194
+/* [RW 18] Type B VF inbound interrupt table for CSDM: bits[17:9]-mask;
+ * its[8:0]-address. Bits [1:0] must be zero (DW resolution address). */
+#define PGLUE_B_REG_CSDM_INB_INT_B_VF				 0x916c
+/* [RW 1] Type B VF enable inbound interrupt table for CSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_CSDM_INB_INT_B_VF_ENABLE			 0x919c
+/* [RW 16] Start offset of CSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_START_OFFSET_A			 0x9100
+/* [RW 16] Start offset of CSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_START_OFFSET_B			 0x9108
+/* [RW 5] VF Shift of CSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_CSDM_VF_SHIFT_B				 0x9110
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_CSDM_ZONE_A_SIZE_PF			 0x91ac
+/* [R 8] FLR request attention dirty bits for PFs 0 to 7. Each bit indicates
+ * that the FLR register of the corresponding PF was set. Set by PXP. Reset
+ * by MCP writing 1 to flr_request_pf_7_0_clr. Note: register contains bits
+ * from both paths. */
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0				 0x9028
+/* [W 8] FLR request attention dirty bits clear for PFs 0 to 7. MCP writes 1
+ * to a bit in this register in order to clear the corresponding bit in
+ * flr_request_pf_7_0 register. Note: register contains bits from both
+ * paths. */
+#define PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR			 0x9418
+/* [R 32] FLR request attention dirty bits for VFs 96 to 127. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_127_96_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_127_96			 0x9024
+/* [R 32] FLR request attention dirty bits for VFs 0 to 31. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_31_0_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_31_0			 0x9018
+/* [R 32] FLR request attention dirty bits for VFs 32 to 63. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_63_32_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_63_32			 0x901c
+/* [R 32] FLR request attention dirty bits for VFs 64 to 95. Each bit
+ * indicates that the FLR register of the corresponding VF was set. Set by
+ * PXP. Reset by MCP writing 1 to flr_request_vf_95_64_clr. */
+#define PGLUE_B_REG_FLR_REQUEST_VF_95_64			 0x9020
+/* [R 8] Each bit indicates an incorrect behavior in user RX interface. Bit
+ * 0 - Target memory read arrived with a correctable error. Bit 1 - Target
+ * memory read arrived with an uncorrectable error. Bit 2 - Configuration RW
+ * arrived with a correctable error. Bit 3 - Configuration RW arrived with
+ * an uncorrectable error. Bit 4 - Completion with Configuration Request
+ * Retry Status. Bit 5 - Expansion ROM access received with a write request.
+ * Bit 6 - Completion with pcie_rx_err of 0000; CMPL_STATUS of non-zero; and
+ * pcie_rx_last not asserted. Bit 7 - Completion with pcie_rx_err of 1010;
+ * and pcie_rx_last not asserted. */
+#define PGLUE_B_REG_INCORRECT_RCV_DETAILS			 0x9068
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER		 0x942c
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ		 0x9430
+#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_WRITE		 0x9434
+#define PGLUE_B_REG_INTERNAL_VFID_ENABLE			 0x9438
+/* [R 9] Interrupt register #0 read */
+#define PGLUE_B_REG_PGLUE_B_INT_STS				 0x9298
+/* [RC 9] Interrupt register #0 read clear */
+#define PGLUE_B_REG_PGLUE_B_INT_STS_CLR			 0x929c
+/* [RW 2] Parity mask register #0 read/write */
+#define PGLUE_B_REG_PGLUE_B_PRTY_MASK				 0x92b4
+/* [R 2] Parity register #0 read */
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS				 0x92a8
+/* [RC 2] Parity register #0 read clear */
+#define PGLUE_B_REG_PGLUE_B_PRTY_STS_CLR			 0x92ac
+/* [R 13] Details of first request received with error. [2:0] - PFID. [3] -
+ * VF_VALID. [9:4] - VFID. [11:10] - Error Code - 0 - Indicates Completion
+ * Timeout of a User Tx non-posted request. 1 - unsupported request. 2 -
+ * completer abort. 3 - Illegal value for this field. [12] valid - indicates
+ * if there was a completion error since the last time this register was
+ * cleared. */
+#define PGLUE_B_REG_RX_ERR_DETAILS				 0x9080
+/* [R 18] Details of first ATS Translation Completion request received with
+ * error. [2:0] - PFID. [3] - VF_VALID. [9:4] - VFID. [11:10] - Error Code -
+ * 0 - Indicates Completion Timeout of a User Tx non-posted request. 1 -
+ * unsupported request. 2 - completer abort. 3 - Illegal value for this
+ * field. [16:12] - ATC OTB EntryID. [17] valid - indicates if there was a
+ * completion error since the last time this register was cleared. */
+#define PGLUE_B_REG_RX_TCPL_ERR_DETAILS			 0x9084
+/* [W 8] Debug only - Shadow BME bits clear for PFs 0 to 7. MCP writes 1 to
+ * a bit in this register in order to clear the corresponding bit in
+ * shadow_bme_pf_7_0 register. MCP should never use this unless a
+ * work-around is needed. Note: register contains bits from both paths. */
+#define PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR			 0x9458
+/* [R 8] SR IOV disabled attention dirty bits. Each bit indicates that the
+ * VF enable register of the corresponding PF is written to 0 and was
+ * previously 1. Set by PXP. Reset by MCP writing 1 to
+ * sr_iov_disabled_request_clr. Note: register contains bits from both
+ * paths. */
+#define PGLUE_B_REG_SR_IOV_DISABLED_REQUEST			 0x9030
+/* [R 32] Indicates the status of tags 32-63. 0 - tags is used - read
+ * completion did not return yet. 1 - tag is unused. Same functionality as
+ * pxp2_registers_pgl_exp_rom_data2 for tags 0-31. */
+#define PGLUE_B_REG_TAGS_63_32					 0x9244
+/* [RW 1] Type A PF enable inbound interrupt table for TSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_TSDM_INB_INT_A_PF_ENABLE			 0x9170
+/* [RW 16] Start offset of TSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_START_OFFSET_A			 0x90c4
+/* [RW 16] Start offset of TSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_START_OFFSET_B			 0x90cc
+/* [RW 5] VF Shift of TSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_TSDM_VF_SHIFT_B				 0x90d4
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_TSDM_ZONE_A_SIZE_PF			 0x91a0
+/* [R 32] Address [31:0] of first read request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0				 0x9098
+/* [R 32] Address [63:32] of first read request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32			 0x909c
+/* [R 31] Details of first read request not submitted due to error. [4:0]
+ * VQID. [5] TREQ. 1 - Indicates the request is a Translation Request.
+ * [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25] -
+ * VFID. */
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS				 0x90a0
+/* [R 26] Details of first read request not submitted due to error. [15:0]
+ * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
+ * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
+ * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
+ * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
+ * indicates if there was a request not submitted due to error since the
+ * last time this register was cleared. */
+#define PGLUE_B_REG_TX_ERR_RD_DETAILS2				 0x90a4
+/* [R 32] Address [31:0] of first write request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0				 0x9088
+/* [R 32] Address [63:32] of first write request not submitted due to error */
+#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32			 0x908c
+/* [R 31] Details of first write request not submitted due to error. [4:0]
+ * VQID. [20:8] - Length in bytes. [23:21] - PFID. [24] - VF_VALID. [30:25]
+ * - VFID. */
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS				 0x9090
+/* [R 26] Details of first write request not submitted due to error. [15:0]
+ * Request ID. [19:16] client ID. [20] - last SR. [24:21] - Error type -
+ * [21] - Indicates was_error was set; [22] - Indicates BME was cleared;
+ * [23] - Indicates FID_enable was cleared; [24] - Indicates VF with parent
+ * PF FLR_request or IOV_disable_request dirty bit is set. [25] valid -
+ * indicates if there was a request not submitted due to error since the
+ * last time this register was cleared. */
+#define PGLUE_B_REG_TX_ERR_WR_DETAILS2				 0x9094
+/* [RW 10] Type A PF/VF inbound interrupt table for USDM: bits[9:5]-mask;
+ * its[4:0]-address relative to start_offset_a. Bits [1:0] can have any
+ * value (Byte resolution address). */
+#define PGLUE_B_REG_USDM_INB_INT_A_0				 0x9128
+#define PGLUE_B_REG_USDM_INB_INT_A_1				 0x912c
+#define PGLUE_B_REG_USDM_INB_INT_A_2				 0x9130
+#define PGLUE_B_REG_USDM_INB_INT_A_3				 0x9134
+#define PGLUE_B_REG_USDM_INB_INT_A_4				 0x9138
+#define PGLUE_B_REG_USDM_INB_INT_A_5				 0x913c
+#define PGLUE_B_REG_USDM_INB_INT_A_6				 0x9140
+/* [RW 1] Type A PF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_A_PF_ENABLE			 0x917c
+/* [RW 1] Type A VF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_A_VF_ENABLE			 0x9180
+/* [RW 1] Type B VF enable inbound interrupt table for USDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_USDM_INB_INT_B_VF_ENABLE			 0x9184
+/* [RW 16] Start offset of USDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_START_OFFSET_A			 0x90d8
+/* [RW 16] Start offset of USDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_START_OFFSET_B			 0x90e0
+/* [RW 5] VF Shift of USDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_USDM_VF_SHIFT_B				 0x90e8
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_USDM_ZONE_A_SIZE_PF			 0x91a4
+/* [R 26] Details of first target VF request accessing VF GRC space that
+ * failed permission check. [14:0] Address. [15] w_nr: 0 - Read; 1 - Write.
+ * [21:16] VFID. [24:22] - PFID. [25] valid - indicates if there was a
+ * request accessing VF GRC space that failed permission check since the
+ * last time this register was cleared. Permission checks are: function
+ * permission; R/W permission; address range permission. */
+#define PGLUE_B_REG_VF_GRC_SPACE_VIOLATION_DETAILS		 0x9234
+/* [R 31] Details of first target VF request with length violation (too many
+ * DWs) accessing BAR0. [12:0] Address in DWs (bits [14:2] of byte address).
+ * [14:13] BAR. [20:15] VFID. [23:21] - PFID. [29:24] - Length in DWs. [30]
+ * valid - indicates if there was a request with length violation since the
+ * last time this register was cleared. Length violations: length of more
+ * than 2DWs; length of 2DWs and address not QW aligned; window is GRC and
+ * length is more than 1 DW. */
+#define PGLUE_B_REG_VF_LENGTH_VIOLATION_DETAILS		 0x9230
+/* [R 8] Was_error indication dirty bits for PFs 0 to 7. Each bit indicates
+ * that there was a completion with uncorrectable error for the
+ * corresponding PF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_pf_7_0_clr. */
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0				 0x907c
+/* [W 8] Was_error indication dirty bits clear for PFs 0 to 7. MCP writes 1
+ * to a bit in this register in order to clear the corresponding bit in
+ * flr_request_pf_7_0 register. */
+#define PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR			 0x9470
+/* [R 32] Was_error indication dirty bits for VFs 96 to 127. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_127_96_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96			 0x9078
+/* [W 32] Was_error indication dirty bits clear for VFs 96 to 127. MCP
+ * writes 1 to a bit in this register in order to clear the corresponding
+ * bit in was_error_vf_127_96 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_127_96_CLR			 0x9474
+/* [R 32] Was_error indication dirty bits for VFs 0 to 31. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_31_0_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0				 0x906c
+/* [W 32] Was_error indication dirty bits clear for VFs 0 to 31. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_31_0 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR			 0x9478
+/* [R 32] Was_error indication dirty bits for VFs 32 to 63. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_63_32_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32				 0x9070
+/* [W 32] Was_error indication dirty bits clear for VFs 32 to 63. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_63_32 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_63_32_CLR			 0x947c
+/* [R 32] Was_error indication dirty bits for VFs 64 to 95. Each bit
+ * indicates that there was a completion with uncorrectable error for the
+ * corresponding VF. Set by PXP. Reset by MCP writing 1 to
+ * was_error_vf_95_64_clr. */
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64				 0x9074
+/* [W 32] Was_error indication dirty bits clear for VFs 64 to 95. MCP writes
+ * 1 to a bit in this register in order to clear the corresponding bit in
+ * was_error_vf_95_64 register. */
+#define PGLUE_B_REG_WAS_ERROR_VF_95_64_CLR			 0x9480
+/* [RW 1] Type A PF enable inbound interrupt table for XSDM. 0 - disable; 1
+ * - enable. */
+#define PGLUE_B_REG_XSDM_INB_INT_A_PF_ENABLE			 0x9188
+/* [RW 16] Start offset of XSDM zone A (queue zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_START_OFFSET_A			 0x90ec
+/* [RW 16] Start offset of XSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_START_OFFSET_B			 0x90f4
+/* [RW 5] VF Shift of XSDM zone B (legacy zone) in the internal RAM */
+#define PGLUE_B_REG_XSDM_VF_SHIFT_B				 0x90fc
+/* [RW 1] 0 - Zone A size is 136x32B; 1 - Zone A size is 152x32B. */
+#define PGLUE_B_REG_XSDM_ZONE_A_SIZE_PF			 0x91a8
+#define PRS_REG_A_PRSU_20					 0x40134
+/* [R 8] debug only: CFC load request current credit. Transaction based. */
+#define PRS_REG_CFC_LD_CURRENT_CREDIT				 0x40164
+/* [R 8] debug only: CFC search request current credit. Transaction based. */
+#define PRS_REG_CFC_SEARCH_CURRENT_CREDIT			 0x40168
+/* [RW 6] The initial credit for the search message to the CFC interface.
+   Credit is transaction based. */
+#define PRS_REG_CFC_SEARCH_INITIAL_CREDIT			 0x4011c
+/* [RW 24] CID for port 0 if no match */
+#define PRS_REG_CID_PORT_0					 0x400fc
+/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
+   load response is reset and packet type is 0. Used in packet start message
+   to TCM. */
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_0			 0x400dc
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_1			 0x400e0
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_2			 0x400e4
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_3			 0x400e8
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_4			 0x400ec
+#define PRS_REG_CM_HDR_FLUSH_LOAD_TYPE_5			 0x400f0
+/* [RW 32] The CM header for flush message where 'load existed' bit in CFC
+   load response is set and packet type is 0. Used in packet start message
+   to TCM. */
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_0			 0x400bc
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_1			 0x400c0
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_2			 0x400c4
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_3			 0x400c8
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_4			 0x400cc
+#define PRS_REG_CM_HDR_FLUSH_NO_LOAD_TYPE_5			 0x400d0
+/* [RW 32] The CM header for a match and packet type 1 for loopback port.
+   Used in packet start message to TCM. */
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_1				 0x4009c
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_2				 0x400a0
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_3				 0x400a4
+#define PRS_REG_CM_HDR_LOOPBACK_TYPE_4				 0x400a8
+/* [RW 32] The CM header for a match and packet type 0. Used in packet start
+   message to TCM. */
+#define PRS_REG_CM_HDR_TYPE_0					 0x40078
+#define PRS_REG_CM_HDR_TYPE_1					 0x4007c
+#define PRS_REG_CM_HDR_TYPE_2					 0x40080
+#define PRS_REG_CM_HDR_TYPE_3					 0x40084
+#define PRS_REG_CM_HDR_TYPE_4					 0x40088
+/* [RW 32] The CM header in case there was not a match on the connection */
+#define PRS_REG_CM_NO_MATCH_HDR 				 0x400b8
+/* [RW 1] Indicates if in e1hov mode. 0=non-e1hov mode; 1=e1hov mode. */
+#define PRS_REG_E1HOV_MODE					 0x401c8
+/* [RW 8] The 8-bit event ID for a match and packet type 1. Used in packet
+   start message to TCM. */
+#define PRS_REG_EVENT_ID_1					 0x40054
+#define PRS_REG_EVENT_ID_2					 0x40058
+#define PRS_REG_EVENT_ID_3					 0x4005c
+/* [RW 16] The Ethernet type value for FCoE */
+#define PRS_REG_FCOE_TYPE					 0x401d0
+/* [RW 8] Context region for flush packet with packet type 0. Used in CFC
+   load request message. */
+#define PRS_REG_FLUSH_REGIONS_TYPE_0				 0x40004
+#define PRS_REG_FLUSH_REGIONS_TYPE_1				 0x40008
+#define PRS_REG_FLUSH_REGIONS_TYPE_2				 0x4000c
+#define PRS_REG_FLUSH_REGIONS_TYPE_3				 0x40010
+#define PRS_REG_FLUSH_REGIONS_TYPE_4				 0x40014
+#define PRS_REG_FLUSH_REGIONS_TYPE_5				 0x40018
+#define PRS_REG_FLUSH_REGIONS_TYPE_6				 0x4001c
+#define PRS_REG_FLUSH_REGIONS_TYPE_7				 0x40020
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header. */
+#define PRS_REG_HDRS_AFTER_BASIC				 0x40238
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
+ * Ethernet header for port 0 packets. */
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_0				 0x40270
+#define PRS_REG_HDRS_AFTER_BASIC_PORT_1				 0x40290
+/* [R 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 */
+#define PRS_REG_HDRS_AFTER_TAG_0				 0x40248
+/* [RW 6] Bit-map indicating which L2 hdrs may appear after L2 tag 0 for
+ * port 0 packets */
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_0				 0x40280
+#define PRS_REG_HDRS_AFTER_TAG_0_PORT_1				 0x402a0
+/* [RW 4] The increment value to send in the CFC load request message */
+#define PRS_REG_INC_VALUE					 0x40048
+/* [RW 6] Bit-map indicating which headers must appear in the packet */
+#define PRS_REG_MUST_HAVE_HDRS					 0x40254
+/* [RW 6] Bit-map indicating which headers must appear in the packet for
+ * port 0 packets */
+#define PRS_REG_MUST_HAVE_HDRS_PORT_0				 0x4028c
+#define PRS_REG_MUST_HAVE_HDRS_PORT_1				 0x402ac
+#define PRS_REG_NIC_MODE					 0x40138
+/* [RW 8] The 8-bit event ID for cases where there is no match on the
+   connection. Used in packet start message to TCM. */
+#define PRS_REG_NO_MATCH_EVENT_ID				 0x40070
+/* [ST 24] The number of input CFC flush packets */
+#define PRS_REG_NUM_OF_CFC_FLUSH_MESSAGES			 0x40128
+/* [ST 32] The number of cycles the Parser halted its operation since it
+   could not allocate the next serial number */
+#define PRS_REG_NUM_OF_DEAD_CYCLES				 0x40130
+/* [ST 24] The number of input packets */
+#define PRS_REG_NUM_OF_PACKETS					 0x40124
+/* [ST 24] The number of input transparent flush packets */
+#define PRS_REG_NUM_OF_TRANSPARENT_FLUSH_MESSAGES		 0x4012c
+/* [RW 8] Context region for received Ethernet packet with a match and
+   packet type 0. Used in CFC load request message */
+#define PRS_REG_PACKET_REGIONS_TYPE_0				 0x40028
+#define PRS_REG_PACKET_REGIONS_TYPE_1				 0x4002c
+#define PRS_REG_PACKET_REGIONS_TYPE_2				 0x40030
+#define PRS_REG_PACKET_REGIONS_TYPE_3				 0x40034
+#define PRS_REG_PACKET_REGIONS_TYPE_4				 0x40038
+#define PRS_REG_PACKET_REGIONS_TYPE_5				 0x4003c
+#define PRS_REG_PACKET_REGIONS_TYPE_6				 0x40040
+#define PRS_REG_PACKET_REGIONS_TYPE_7				 0x40044
+/* [R 2] debug only: Number of pending requests for CAC on port 0. */
+#define PRS_REG_PENDING_BRB_CAC0_RQ				 0x40174
+/* [R 2] debug only: Number of pending requests for header parsing. */
+#define PRS_REG_PENDING_BRB_PRS_RQ				 0x40170
+/* [R 1] Interrupt register #0 read */
+#define PRS_REG_PRS_INT_STS					 0x40188
+/* [RW 8] Parity mask register #0 read/write */
+#define PRS_REG_PRS_PRTY_MASK					 0x401a4
+/* [R 8] Parity register #0 read */
+#define PRS_REG_PRS_PRTY_STS					 0x40198
+/* [RC 8] Parity register #0 read clear */
+#define PRS_REG_PRS_PRTY_STS_CLR				 0x4019c
+/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
+   request message */
+#define PRS_REG_PURE_REGIONS					 0x40024
+/* [R 32] debug only: Serial number status lsb 32 bits. '1' indicates this
+   serail number was released by SDM but cannot be used because a previous
+   serial number was not released. */
+#define PRS_REG_SERIAL_NUM_STATUS_LSB				 0x40154
+/* [R 32] debug only: Serial number status msb 32 bits. '1' indicates this
+   serail number was released by SDM but cannot be used because a previous
+   serial number was not released. */
+#define PRS_REG_SERIAL_NUM_STATUS_MSB				 0x40158
+/* [R 4] debug only: SRC current credit. Transaction based. */
+#define PRS_REG_SRC_CURRENT_CREDIT				 0x4016c
+/* [RW 16] The Ethernet type value for L2 tag 0 */
+#define PRS_REG_TAG_ETHERTYPE_0					 0x401d4
+/* [RW 4] The length of the info field for L2 tag 0. The length is between
+ * 2B and 14B; in 2B granularity */
+#define PRS_REG_TAG_LEN_0					 0x4022c
+/* [R 8] debug only: TCM current credit. Cycle based. */
+#define PRS_REG_TCM_CURRENT_CREDIT				 0x40160
+/* [R 8] debug only: TSDM current credit. Transaction based. */
+#define PRS_REG_TSDM_CURRENT_CREDIT				 0x4015c
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT			 (0x1<<19)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF			 (0x1<<20)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN			 (0x1<<22)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED		 (0x1<<23)
+#define PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED		 (0x1<<24)
+#define PXP2_PXP2_INT_STS_0_REG_WR_PGLUE_EOP_ERROR		 (0x1<<7)
+#define PXP2_PXP2_INT_STS_CLR_0_REG_WR_PGLUE_EOP_ERROR		 (0x1<<7)
+/* [R 6] Debug only: Number of used entries in the data FIFO */
+#define PXP2_REG_HST_DATA_FIFO_STATUS				 0x12047c
+/* [R 7] Debug only: Number of used entries in the header FIFO */
+#define PXP2_REG_HST_HEADER_FIFO_STATUS 			 0x120478
+#define PXP2_REG_PGL_ADDR_88_F0 				 0x120534
+#define PXP2_REG_PGL_ADDR_8C_F0 				 0x120538
+#define PXP2_REG_PGL_ADDR_90_F0 				 0x12053c
+#define PXP2_REG_PGL_ADDR_94_F0 				 0x120540
+#define PXP2_REG_PGL_CONTROL0					 0x120490
+#define PXP2_REG_PGL_CONTROL1					 0x120514
+#define PXP2_REG_PGL_DEBUG					 0x120520
+/* [RW 32] third dword data of expansion rom request. this register is
+   special. reading from it provides a vector outstanding read requests. if
+   a bit is zero it means that a read request on the corresponding tag did
+   not finish yet (not all completions have arrived for it) */
+#define PXP2_REG_PGL_EXP_ROM2					 0x120808
+/* [RW 32] Inbound interrupt table for CSDM: bits[31:16]-mask;
+   its[15:0]-address */
+#define PXP2_REG_PGL_INT_CSDM_0 				 0x1204f4
+#define PXP2_REG_PGL_INT_CSDM_1 				 0x1204f8
+#define PXP2_REG_PGL_INT_CSDM_2 				 0x1204fc
+#define PXP2_REG_PGL_INT_CSDM_3 				 0x120500
+#define PXP2_REG_PGL_INT_CSDM_4 				 0x120504
+#define PXP2_REG_PGL_INT_CSDM_5 				 0x120508
+#define PXP2_REG_PGL_INT_CSDM_6 				 0x12050c
+#define PXP2_REG_PGL_INT_CSDM_7 				 0x120510
+/* [RW 32] Inbound interrupt table for TSDM: bits[31:16]-mask;
+   its[15:0]-address */
+#define PXP2_REG_PGL_INT_TSDM_0 				 0x120494
+#define PXP2_REG_PGL_INT_TSDM_1 				 0x120498
+#define PXP2_REG_PGL_INT_TSDM_2 				 0x12049c
+#define PXP2_REG_PGL_INT_TSDM_3 				 0x1204a0
+#define PXP2_REG_PGL_INT_TSDM_4 				 0x1204a4
+#define PXP2_REG_PGL_INT_TSDM_5 				 0x1204a8
+#define PXP2_REG_PGL_INT_TSDM_6 				 0x1204ac
+#define PXP2_REG_PGL_INT_TSDM_7 				 0x1204b0
+/* [RW 32] Inbound interrupt table for USDM: bits[31:16]-mask;
+   its[15:0]-address */
+#define PXP2_REG_PGL_INT_USDM_0 				 0x1204b4
+#define PXP2_REG_PGL_INT_USDM_1 				 0x1204b8
+#define PXP2_REG_PGL_INT_USDM_2 				 0x1204bc
+#define PXP2_REG_PGL_INT_USDM_3 				 0x1204c0
+#define PXP2_REG_PGL_INT_USDM_4 				 0x1204c4
+#define PXP2_REG_PGL_INT_USDM_5 				 0x1204c8
+#define PXP2_REG_PGL_INT_USDM_6 				 0x1204cc
+#define PXP2_REG_PGL_INT_USDM_7 				 0x1204d0
+/* [RW 32] Inbound interrupt table for XSDM: bits[31:16]-mask;
+   its[15:0]-address */
+#define PXP2_REG_PGL_INT_XSDM_0 				 0x1204d4
+#define PXP2_REG_PGL_INT_XSDM_1 				 0x1204d8
+#define PXP2_REG_PGL_INT_XSDM_2 				 0x1204dc
+#define PXP2_REG_PGL_INT_XSDM_3 				 0x1204e0
+#define PXP2_REG_PGL_INT_XSDM_4 				 0x1204e4
+#define PXP2_REG_PGL_INT_XSDM_5 				 0x1204e8
+#define PXP2_REG_PGL_INT_XSDM_6 				 0x1204ec
+#define PXP2_REG_PGL_INT_XSDM_7 				 0x1204f0
+/* [RW 3] this field allows one function to pretend being another function
+   when accessing any BAR mapped resource within the device. the value of
+   the field is the number of the function that will be accessed
+   effectively. after software write to this bit it must read it in order to
+   know that the new value is updated */
+#define PXP2_REG_PGL_PRETEND_FUNC_F0				 0x120674
+#define PXP2_REG_PGL_PRETEND_FUNC_F1				 0x120678
+#define PXP2_REG_PGL_PRETEND_FUNC_F2				 0x12067c
+#define PXP2_REG_PGL_PRETEND_FUNC_F3				 0x120680
+#define PXP2_REG_PGL_PRETEND_FUNC_F4				 0x120684
+#define PXP2_REG_PGL_PRETEND_FUNC_F5				 0x120688
+#define PXP2_REG_PGL_PRETEND_FUNC_F6				 0x12068c
+#define PXP2_REG_PGL_PRETEND_FUNC_F7				 0x120690
+/* [R 1] this bit indicates that a read request was blocked because of
+   bus_master_en was deasserted */
+#define PXP2_REG_PGL_READ_BLOCKED				 0x120568
+#define PXP2_REG_PGL_TAGS_LIMIT 				 0x1205a8
+/* [R 18] debug only */
+#define PXP2_REG_PGL_TXW_CDTS					 0x12052c
+/* [R 1] this bit indicates that a write request was blocked because of
+   bus_master_en was deasserted */
+#define PXP2_REG_PGL_WRITE_BLOCKED				 0x120564
+#define PXP2_REG_PSWRQ_BW_ADD1					 0x1201c0
+#define PXP2_REG_PSWRQ_BW_ADD10 				 0x1201e4
+#define PXP2_REG_PSWRQ_BW_ADD11 				 0x1201e8
+#define PXP2_REG_PSWRQ_BW_ADD2					 0x1201c4
+#define PXP2_REG_PSWRQ_BW_ADD28 				 0x120228
+#define PXP2_REG_PSWRQ_BW_ADD3					 0x1201c8
+#define PXP2_REG_PSWRQ_BW_ADD6					 0x1201d4
+#define PXP2_REG_PSWRQ_BW_ADD7					 0x1201d8
+#define PXP2_REG_PSWRQ_BW_ADD8					 0x1201dc
+#define PXP2_REG_PSWRQ_BW_ADD9					 0x1201e0
+#define PXP2_REG_PSWRQ_BW_CREDIT				 0x12032c
+#define PXP2_REG_PSWRQ_BW_L1					 0x1202b0
+#define PXP2_REG_PSWRQ_BW_L10					 0x1202d4
+#define PXP2_REG_PSWRQ_BW_L11					 0x1202d8
+#define PXP2_REG_PSWRQ_BW_L2					 0x1202b4
+#define PXP2_REG_PSWRQ_BW_L28					 0x120318
+#define PXP2_REG_PSWRQ_BW_L3					 0x1202b8
+#define PXP2_REG_PSWRQ_BW_L6					 0x1202c4
+#define PXP2_REG_PSWRQ_BW_L7					 0x1202c8
+#define PXP2_REG_PSWRQ_BW_L8					 0x1202cc
+#define PXP2_REG_PSWRQ_BW_L9					 0x1202d0
+#define PXP2_REG_PSWRQ_BW_RD					 0x120324
+#define PXP2_REG_PSWRQ_BW_UB1					 0x120238
+#define PXP2_REG_PSWRQ_BW_UB10					 0x12025c
+#define PXP2_REG_PSWRQ_BW_UB11					 0x120260
+#define PXP2_REG_PSWRQ_BW_UB2					 0x12023c
+#define PXP2_REG_PSWRQ_BW_UB28					 0x1202a0
+#define PXP2_REG_PSWRQ_BW_UB3					 0x120240
+#define PXP2_REG_PSWRQ_BW_UB6					 0x12024c
+#define PXP2_REG_PSWRQ_BW_UB7					 0x120250
+#define PXP2_REG_PSWRQ_BW_UB8					 0x120254
+#define PXP2_REG_PSWRQ_BW_UB9					 0x120258
+#define PXP2_REG_PSWRQ_BW_WR					 0x120328
+#define PXP2_REG_PSWRQ_CDU0_L2P 				 0x120000
+#define PXP2_REG_PSWRQ_QM0_L2P					 0x120038
+#define PXP2_REG_PSWRQ_SRC0_L2P 				 0x120054
+#define PXP2_REG_PSWRQ_TM0_L2P					 0x12001c
+#define PXP2_REG_PSWRQ_TSDM0_L2P				 0x1200e0
+/* [RW 32] Interrupt mask register #0 read/write */
+#define PXP2_REG_PXP2_INT_MASK_0				 0x120578
+/* [R 32] Interrupt register #0 read */
+#define PXP2_REG_PXP2_INT_STS_0 				 0x12056c
+#define PXP2_REG_PXP2_INT_STS_1 				 0x120608
+/* [RC 32] Interrupt register #0 read clear */
+#define PXP2_REG_PXP2_INT_STS_CLR_0				 0x120570
+/* [RW 32] Parity mask register #0 read/write */
+#define PXP2_REG_PXP2_PRTY_MASK_0				 0x120588
+#define PXP2_REG_PXP2_PRTY_MASK_1				 0x120598
+/* [R 32] Parity register #0 read */
+#define PXP2_REG_PXP2_PRTY_STS_0				 0x12057c
+#define PXP2_REG_PXP2_PRTY_STS_1				 0x12058c
+/* [RC 32] Parity register #0 read clear */
+#define PXP2_REG_PXP2_PRTY_STS_CLR_0				 0x120580
+#define PXP2_REG_PXP2_PRTY_STS_CLR_1				 0x120590
+/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
+   indication about backpressure) */
+#define PXP2_REG_RD_ALMOST_FULL_0				 0x120424
+/* [R 8] Debug only: The blocks counter - number of unused block ids */
+#define PXP2_REG_RD_BLK_CNT					 0x120418
+/* [RW 8] Debug only: Total number of available blocks in Tetris Buffer.
+   Must be bigger than 6. Normally should not be changed. */
+#define PXP2_REG_RD_BLK_NUM_CFG 				 0x12040c
+/* [RW 2] CDU byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_CDURD_SWAP_MODE				 0x120404
+/* [RW 1] When '1'; inputs to the PSWRD block are ignored */
+#define PXP2_REG_RD_DISABLE_INPUTS				 0x120374
+/* [R 1] PSWRD internal memories initialization is done */
+#define PXP2_REG_RD_INIT_DONE					 0x120370
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq10 */
+#define PXP2_REG_RD_MAX_BLKS_VQ10				 0x1203a0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq11 */
+#define PXP2_REG_RD_MAX_BLKS_VQ11				 0x1203a4
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq17 */
+#define PXP2_REG_RD_MAX_BLKS_VQ17				 0x1203bc
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq18 */
+#define PXP2_REG_RD_MAX_BLKS_VQ18				 0x1203c0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq19 */
+#define PXP2_REG_RD_MAX_BLKS_VQ19				 0x1203c4
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq22 */
+#define PXP2_REG_RD_MAX_BLKS_VQ22				 0x1203d0
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq25 */
+#define PXP2_REG_RD_MAX_BLKS_VQ25				 0x1203dc
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq6 */
+#define PXP2_REG_RD_MAX_BLKS_VQ6				 0x120390
+/* [RW 8] The maximum number of blocks in Tetris Buffer that can be
+   allocated for vq9 */
+#define PXP2_REG_RD_MAX_BLKS_VQ9				 0x12039c
+/* [RW 2] PBF byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_PBF_SWAP_MODE				 0x1203f4
+/* [R 1] Debug only: Indication if delivery ports are idle */
+#define PXP2_REG_RD_PORT_IS_IDLE_0				 0x12041c
+#define PXP2_REG_RD_PORT_IS_IDLE_1				 0x120420
+/* [RW 2] QM byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_QM_SWAP_MODE				 0x1203f8
+/* [R 7] Debug only: The SR counter - number of unused sub request ids */
+#define PXP2_REG_RD_SR_CNT					 0x120414
+/* [RW 2] SRC byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_SRC_SWAP_MODE				 0x120400
+/* [RW 7] Debug only: Total number of available PCI read sub-requests. Must
+   be bigger than 1. Normally should not be changed. */
+#define PXP2_REG_RD_SR_NUM_CFG					 0x120408
+/* [RW 1] Signals the PSWRD block to start initializing internal memories */
+#define PXP2_REG_RD_START_INIT					 0x12036c
+/* [RW 2] TM byte swapping mode configuration for master read requests */
+#define PXP2_REG_RD_TM_SWAP_MODE				 0x1203fc
+/* [RW 10] Bandwidth addition to VQ0 write requests */
+#define PXP2_REG_RQ_BW_RD_ADD0					 0x1201bc
+/* [RW 10] Bandwidth addition to VQ12 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD12 				 0x1201ec
+/* [RW 10] Bandwidth addition to VQ13 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD13 				 0x1201f0
+/* [RW 10] Bandwidth addition to VQ14 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD14 				 0x1201f4
+/* [RW 10] Bandwidth addition to VQ15 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD15 				 0x1201f8
+/* [RW 10] Bandwidth addition to VQ16 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD16 				 0x1201fc
+/* [RW 10] Bandwidth addition to VQ17 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD17 				 0x120200
+/* [RW 10] Bandwidth addition to VQ18 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD18 				 0x120204
+/* [RW 10] Bandwidth addition to VQ19 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD19 				 0x120208
+/* [RW 10] Bandwidth addition to VQ20 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD20 				 0x12020c
+/* [RW 10] Bandwidth addition to VQ22 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD22 				 0x120210
+/* [RW 10] Bandwidth addition to VQ23 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD23 				 0x120214
+/* [RW 10] Bandwidth addition to VQ24 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD24 				 0x120218
+/* [RW 10] Bandwidth addition to VQ25 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD25 				 0x12021c
+/* [RW 10] Bandwidth addition to VQ26 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD26 				 0x120220
+/* [RW 10] Bandwidth addition to VQ27 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD27 				 0x120224
+/* [RW 10] Bandwidth addition to VQ4 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD4					 0x1201cc
+/* [RW 10] Bandwidth addition to VQ5 read requests */
+#define PXP2_REG_RQ_BW_RD_ADD5					 0x1201d0
+/* [RW 10] Bandwidth Typical L for VQ0 Read requests */
+#define PXP2_REG_RQ_BW_RD_L0					 0x1202ac
+/* [RW 10] Bandwidth Typical L for VQ12 Read requests */
+#define PXP2_REG_RQ_BW_RD_L12					 0x1202dc
+/* [RW 10] Bandwidth Typical L for VQ13 Read requests */
+#define PXP2_REG_RQ_BW_RD_L13					 0x1202e0
+/* [RW 10] Bandwidth Typical L for VQ14 Read requests */
+#define PXP2_REG_RQ_BW_RD_L14					 0x1202e4
+/* [RW 10] Bandwidth Typical L for VQ15 Read requests */
+#define PXP2_REG_RQ_BW_RD_L15					 0x1202e8
+/* [RW 10] Bandwidth Typical L for VQ16 Read requests */
+#define PXP2_REG_RQ_BW_RD_L16					 0x1202ec
+/* [RW 10] Bandwidth Typical L for VQ17 Read requests */
+#define PXP2_REG_RQ_BW_RD_L17					 0x1202f0
+/* [RW 10] Bandwidth Typical L for VQ18 Read requests */
+#define PXP2_REG_RQ_BW_RD_L18					 0x1202f4
+/* [RW 10] Bandwidth Typical L for VQ19 Read requests */
+#define PXP2_REG_RQ_BW_RD_L19					 0x1202f8
+/* [RW 10] Bandwidth Typical L for VQ20 Read requests */
+#define PXP2_REG_RQ_BW_RD_L20					 0x1202fc
+/* [RW 10] Bandwidth Typical L for VQ22 Read requests */
+#define PXP2_REG_RQ_BW_RD_L22					 0x120300
+/* [RW 10] Bandwidth Typical L for VQ23 Read requests */
+#define PXP2_REG_RQ_BW_RD_L23					 0x120304
+/* [RW 10] Bandwidth Typical L for VQ24 Read requests */
+#define PXP2_REG_RQ_BW_RD_L24					 0x120308
+/* [RW 10] Bandwidth Typical L for VQ25 Read requests */
+#define PXP2_REG_RQ_BW_RD_L25					 0x12030c
+/* [RW 10] Bandwidth Typical L for VQ26 Read requests */
+#define PXP2_REG_RQ_BW_RD_L26					 0x120310
+/* [RW 10] Bandwidth Typical L for VQ27 Read requests */
+#define PXP2_REG_RQ_BW_RD_L27					 0x120314
+/* [RW 10] Bandwidth Typical L for VQ4 Read requests */
+#define PXP2_REG_RQ_BW_RD_L4					 0x1202bc
+/* [RW 10] Bandwidth Typical L for VQ5 Read- currently not used */
+#define PXP2_REG_RQ_BW_RD_L5					 0x1202c0
+/* [RW 7] Bandwidth upper bound for VQ0 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND0				 0x120234
+/* [RW 7] Bandwidth upper bound for VQ12 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND12				 0x120264
+/* [RW 7] Bandwidth upper bound for VQ13 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND13				 0x120268
+/* [RW 7] Bandwidth upper bound for VQ14 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND14				 0x12026c
+/* [RW 7] Bandwidth upper bound for VQ15 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND15				 0x120270
+/* [RW 7] Bandwidth upper bound for VQ16 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND16				 0x120274
+/* [RW 7] Bandwidth upper bound for VQ17 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND17				 0x120278
+/* [RW 7] Bandwidth upper bound for VQ18 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND18				 0x12027c
+/* [RW 7] Bandwidth upper bound for VQ19 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND19				 0x120280
+/* [RW 7] Bandwidth upper bound for VQ20 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND20				 0x120284
+/* [RW 7] Bandwidth upper bound for VQ22 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND22				 0x120288
+/* [RW 7] Bandwidth upper bound for VQ23 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND23				 0x12028c
+/* [RW 7] Bandwidth upper bound for VQ24 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND24				 0x120290
+/* [RW 7] Bandwidth upper bound for VQ25 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND25				 0x120294
+/* [RW 7] Bandwidth upper bound for VQ26 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND26				 0x120298
+/* [RW 7] Bandwidth upper bound for VQ27 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND27				 0x12029c
+/* [RW 7] Bandwidth upper bound for VQ4 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND4				 0x120244
+/* [RW 7] Bandwidth upper bound for VQ5 read requests */
+#define PXP2_REG_RQ_BW_RD_UBOUND5				 0x120248
+/* [RW 10] Bandwidth addition to VQ29 write requests */
+#define PXP2_REG_RQ_BW_WR_ADD29 				 0x12022c
+/* [RW 10] Bandwidth addition to VQ30 write requests */
+#define PXP2_REG_RQ_BW_WR_ADD30 				 0x120230
+/* [RW 10] Bandwidth Typical L for VQ29 Write requests */
+#define PXP2_REG_RQ_BW_WR_L29					 0x12031c
+/* [RW 10] Bandwidth Typical L for VQ30 Write requests */
+#define PXP2_REG_RQ_BW_WR_L30					 0x120320
+/* [RW 7] Bandwidth upper bound for VQ29 */
+#define PXP2_REG_RQ_BW_WR_UBOUND29				 0x1202a4
+/* [RW 7] Bandwidth upper bound for VQ30 */
+#define PXP2_REG_RQ_BW_WR_UBOUND30				 0x1202a8
+/* [RW 18] external first_mem_addr field in L2P table for CDU module port 0 */
+#define PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR			 0x120008
+/* [RW 2] Endian mode for cdu */
+#define PXP2_REG_RQ_CDU_ENDIAN_M				 0x1201a0
+#define PXP2_REG_RQ_CDU_FIRST_ILT				 0x12061c
+#define PXP2_REG_RQ_CDU_LAST_ILT				 0x120620
+/* [RW 3] page size in L2P table for CDU module; -4k; -8k; -16k; -32k; -64k;
+   -128k */
+#define PXP2_REG_RQ_CDU_P_SIZE					 0x120018
+/* [R 1] 1' indicates that the requester has finished its internal
+   configuration */
+#define PXP2_REG_RQ_CFG_DONE					 0x1201b4
+/* [RW 2] Endian mode for debug */
+#define PXP2_REG_RQ_DBG_ENDIAN_M				 0x1201a4
+/* [RW 1] When '1'; requests will enter input buffers but wont get out
+   towards the glue */
+#define PXP2_REG_RQ_DISABLE_INPUTS				 0x120330
+/* [RW 4] Determines alignment of write SRs when a request is split into
+ * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
+ * aligned. 4 - 512B aligned. */
+#define PXP2_REG_RQ_DRAM_ALIGN					 0x1205b0
+/* [RW 4] Determines alignment of read SRs when a request is split into
+ * several SRs. 0 - 8B aligned. 1 - 64B aligned. 2 - 128B aligned. 3 - 256B
+ * aligned. 4 - 512B aligned. */
+#define PXP2_REG_RQ_DRAM_ALIGN_RD				 0x12092c
+/* [RW 1] when set the new alignment method (E2) will be applied; when reset
+ * the original alignment method (E1 E1H) will be applied */
+#define PXP2_REG_RQ_DRAM_ALIGN_SEL				 0x120930
+/* [RW 1] If 1 ILT failiue will not result in ELT access; An interrupt will
+   be asserted */
+#define PXP2_REG_RQ_ELT_DISABLE 				 0x12066c
+/* [RW 2] Endian mode for hc */
+#define PXP2_REG_RQ_HC_ENDIAN_M 				 0x1201a8
+/* [RW 1] when '0' ILT logic will work as in A0; otherwise B0; for back
+   compatibility needs; Note that different registers are used per mode */
+#define PXP2_REG_RQ_ILT_MODE					 0x1205b4
+/* [WB 53] Onchip address table */
+#define PXP2_REG_RQ_ONCHIP_AT					 0x122000
+/* [WB 53] Onchip address table - B0 */
+#define PXP2_REG_RQ_ONCHIP_AT_B0				 0x128000
+/* [RW 13] Pending read limiter threshold; in Dwords */
+#define PXP2_REG_RQ_PDR_LIMIT					 0x12033c
+/* [RW 2] Endian mode for qm */
+#define PXP2_REG_RQ_QM_ENDIAN_M 				 0x120194
+#define PXP2_REG_RQ_QM_FIRST_ILT				 0x120634
+#define PXP2_REG_RQ_QM_LAST_ILT 				 0x120638
+/* [RW 3] page size in L2P table for QM module; -4k; -8k; -16k; -32k; -64k;
+   -128k */
+#define PXP2_REG_RQ_QM_P_SIZE					 0x120050
+/* [RW 1] 1' indicates that the RBC has finished configuring the PSWRQ */
+#define PXP2_REG_RQ_RBC_DONE					 0x1201b0
+/* [RW 3] Max burst size filed for read requests port 0; 000 - 128B;
+   001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
+#define PXP2_REG_RQ_RD_MBS0					 0x120160
+/* [RW 3] Max burst size filed for read requests port 1; 000 - 128B;
+   001:256B; 010: 512B; 11:1K:100:2K; 01:4K */
+#define PXP2_REG_RQ_RD_MBS1					 0x120168
+/* [RW 2] Endian mode for src */
+#define PXP2_REG_RQ_SRC_ENDIAN_M				 0x12019c
+#define PXP2_REG_RQ_SRC_FIRST_ILT				 0x12063c
+#define PXP2_REG_RQ_SRC_LAST_ILT				 0x120640
+/* [RW 3] page size in L2P table for SRC module; -4k; -8k; -16k; -32k; -64k;
+   -128k */
+#define PXP2_REG_RQ_SRC_P_SIZE					 0x12006c
+/* [RW 2] Endian mode for tm */
+#define PXP2_REG_RQ_TM_ENDIAN_M 				 0x120198
+#define PXP2_REG_RQ_TM_FIRST_ILT				 0x120644
+#define PXP2_REG_RQ_TM_LAST_ILT 				 0x120648
+/* [RW 3] page size in L2P table for TM module; -4k; -8k; -16k; -32k; -64k;
+   -128k */
+#define PXP2_REG_RQ_TM_P_SIZE					 0x120034
+/* [R 5] Number of entries in the ufifo; his fifo has l2p completions */
+#define PXP2_REG_RQ_UFIFO_NUM_OF_ENTRY				 0x12080c
+/* [RW 18] external first_mem_addr field in L2P table for USDM module port 0 */
+#define PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR			 0x120094
+/* [R 8] Number of entries occupied by vq 0 in pswrq memory */
+#define PXP2_REG_RQ_VQ0_ENTRY_CNT				 0x120810
+/* [R 8] Number of entries occupied by vq 10 in pswrq memory */
+#define PXP2_REG_RQ_VQ10_ENTRY_CNT				 0x120818
+/* [R 8] Number of entries occupied by vq 11 in pswrq memory */
+#define PXP2_REG_RQ_VQ11_ENTRY_CNT				 0x120820
+/* [R 8] Number of entries occupied by vq 12 in pswrq memory */
+#define PXP2_REG_RQ_VQ12_ENTRY_CNT				 0x120828
+/* [R 8] Number of entries occupied by vq 13 in pswrq memory */
+#define PXP2_REG_RQ_VQ13_ENTRY_CNT				 0x120830
+/* [R 8] Number of entries occupied by vq 14 in pswrq memory */
+#define PXP2_REG_RQ_VQ14_ENTRY_CNT				 0x120838
+/* [R 8] Number of entries occupied by vq 15 in pswrq memory */
+#define PXP2_REG_RQ_VQ15_ENTRY_CNT				 0x120840
+/* [R 8] Number of entries occupied by vq 16 in pswrq memory */
+#define PXP2_REG_RQ_VQ16_ENTRY_CNT				 0x120848
+/* [R 8] Number of entries occupied by vq 17 in pswrq memory */
+#define PXP2_REG_RQ_VQ17_ENTRY_CNT				 0x120850
+/* [R 8] Number of entries occupied by vq 18 in pswrq memory */
+#define PXP2_REG_RQ_VQ18_ENTRY_CNT				 0x120858
+/* [R 8] Number of entries occupied by vq 19 in pswrq memory */
+#define PXP2_REG_RQ_VQ19_ENTRY_CNT				 0x120860
+/* [R 8] Number of entries occupied by vq 1 in pswrq memory */
+#define PXP2_REG_RQ_VQ1_ENTRY_CNT				 0x120868
+/* [R 8] Number of entries occupied by vq 20 in pswrq memory */
+#define PXP2_REG_RQ_VQ20_ENTRY_CNT				 0x120870
+/* [R 8] Number of entries occupied by vq 21 in pswrq memory */
+#define PXP2_REG_RQ_VQ21_ENTRY_CNT				 0x120878
+/* [R 8] Number of entries occupied by vq 22 in pswrq memory */
+#define PXP2_REG_RQ_VQ22_ENTRY_CNT				 0x120880
+/* [R 8] Number of entries occupied by vq 23 in pswrq memory */
+#define PXP2_REG_RQ_VQ23_ENTRY_CNT				 0x120888
+/* [R 8] Number of entries occupied by vq 24 in pswrq memory */
+#define PXP2_REG_RQ_VQ24_ENTRY_CNT				 0x120890
+/* [R 8] Number of entries occupied by vq 25 in pswrq memory */
+#define PXP2_REG_RQ_VQ25_ENTRY_CNT				 0x120898
+/* [R 8] Number of entries occupied by vq 26 in pswrq memory */
+#define PXP2_REG_RQ_VQ26_ENTRY_CNT				 0x1208a0
+/* [R 8] Number of entries occupied by vq 27 in pswrq memory */
+#define PXP2_REG_RQ_VQ27_ENTRY_CNT				 0x1208a8
+/* [R 8] Number of entries occupied by vq 28 in pswrq memory */
+#define PXP2_REG_RQ_VQ28_ENTRY_CNT				 0x1208b0
+/* [R 8] Number of entries occupied by vq 29 in pswrq memory */
+#define PXP2_REG_RQ_VQ29_ENTRY_CNT				 0x1208b8
+/* [R 8] Number of entries occupied by vq 2 in pswrq memory */
+#define PXP2_REG_RQ_VQ2_ENTRY_CNT				 0x1208c0
+/* [R 8] Number of entries occupied by vq 30 in pswrq memory */
+#define PXP2_REG_RQ_VQ30_ENTRY_CNT				 0x1208c8
+/* [R 8] Number of entries occupied by vq 31 in pswrq memory */
+#define PXP2_REG_RQ_VQ31_ENTRY_CNT				 0x1208d0
+/* [R 8] Number of entries occupied by vq 3 in pswrq memory */
+#define PXP2_REG_RQ_VQ3_ENTRY_CNT				 0x1208d8
+/* [R 8] Number of entries occupied by vq 4 in pswrq memory */
+#define PXP2_REG_RQ_VQ4_ENTRY_CNT				 0x1208e0
+/* [R 8] Number of entries occupied by vq 5 in pswrq memory */
+#define PXP2_REG_RQ_VQ5_ENTRY_CNT				 0x1208e8
+/* [R 8] Number of entries occupied by vq 6 in pswrq memory */
+#define PXP2_REG_RQ_VQ6_ENTRY_CNT				 0x1208f0
+/* [R 8] Number of entries occupied by vq 7 in pswrq memory */
+#define PXP2_REG_RQ_VQ7_ENTRY_CNT				 0x1208f8
+/* [R 8] Number of entries occupied by vq 8 in pswrq memory */
+#define PXP2_REG_RQ_VQ8_ENTRY_CNT				 0x120900
+/* [R 8] Number of entries occupied by vq 9 in pswrq memory */
+#define PXP2_REG_RQ_VQ9_ENTRY_CNT				 0x120908
+/* [RW 3] Max burst size filed for write requests port 0; 000 - 128B;
+   001:256B; 010: 512B; */
+#define PXP2_REG_RQ_WR_MBS0					 0x12015c
+/* [RW 3] Max burst size filed for write requests port 1; 000 - 128B;
+   001:256B; 010: 512B; */
+#define PXP2_REG_RQ_WR_MBS1					 0x120164
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_CDU_MPS					 0x1205f0
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_CSDM_MPS					 0x1205d0
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_DBG_MPS					 0x1205e8
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_DMAE_MPS					 0x1205ec
+/* [RW 10] if Number of entries in dmae fifo will be higher than this
+   threshold then has_payload indication will be asserted; the default value
+   should be equal to &gt;  write MBS size! */
+#define PXP2_REG_WR_DMAE_TH					 0x120368
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_HC_MPS					 0x1205c8
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_QM_MPS					 0x1205dc
+/* [RW 1] 0 - working in A0 mode;  - working in B0 mode */
+#define PXP2_REG_WR_REV_MODE					 0x120670
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_SRC_MPS					 0x1205e4
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_TM_MPS					 0x1205e0
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_TSDM_MPS					 0x1205d4
+/* [RW 10] if Number of entries in usdmdp fifo will be higher than this
+   threshold then has_payload indication will be asserted; the default value
+   should be equal to &gt;  write MBS size! */
+#define PXP2_REG_WR_USDMDP_TH					 0x120348
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_USDM_MPS					 0x1205cc
+/* [RW 2] 0 - 128B;  - 256B;  - 512B;  - 1024B; when the payload in the
+   buffer reaches this number has_payload will be asserted */
+#define PXP2_REG_WR_XSDM_MPS					 0x1205d8
+/* [R 1] debug only: Indication if PSWHST arbiter is idle */
+#define PXP_REG_HST_ARB_IS_IDLE 				 0x103004
+/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
+   this client is waiting for the arbiter. */
+#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB			 0x103008
+/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue
+   block. Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_DOORBELLS				 0x1030a4
+/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
+   should update according to 'hst_discard_doorbells' register when the state
+   machine is idle */
+#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS			 0x1030a0
+/* [RW 1] When 1; new internal writes arriving to the block are discarded.
+   Should be used for close the gates. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES			 0x1030a8
+/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
+   means this PSWHST is discarding inputs from this client. Each bit should
+   update according to 'hst_discard_internal_writes' register when the state
+   machine is idle. */
+#define PXP_REG_HST_DISCARD_INTERNAL_WRITES_STATUS		 0x10309c
+/* [WB 160] Used for initialization of the inbound interrupts memory */
+#define PXP_REG_HST_INBOUND_INT 				 0x103800
+/* [RW 32] Interrupt mask register #0 read/write */
+#define PXP_REG_PXP_INT_MASK_0					 0x103074
+#define PXP_REG_PXP_INT_MASK_1					 0x103084
+/* [R 32] Interrupt register #0 read */
+#define PXP_REG_PXP_INT_STS_0					 0x103068
+#define PXP_REG_PXP_INT_STS_1					 0x103078
+/* [RC 32] Interrupt register #0 read clear */
+#define PXP_REG_PXP_INT_STS_CLR_0				 0x10306c
+#define PXP_REG_PXP_INT_STS_CLR_1				 0x10307c
+/* [RW 27] Parity mask register #0 read/write */
+#define PXP_REG_PXP_PRTY_MASK					 0x103094
+/* [R 26] Parity register #0 read */
+#define PXP_REG_PXP_PRTY_STS					 0x103088
+/* [RC 27] Parity register #0 read clear */
+#define PXP_REG_PXP_PRTY_STS_CLR				 0x10308c
+/* [RW 4] The activity counter initial increment value sent in the load
+   request */
+#define QM_REG_ACTCTRINITVAL_0					 0x168040
+#define QM_REG_ACTCTRINITVAL_1					 0x168044
+#define QM_REG_ACTCTRINITVAL_2					 0x168048
+#define QM_REG_ACTCTRINITVAL_3					 0x16804c
+/* [RW 32] The base logical address (in bytes) of each physical queue. The
+   index I represents the physical queue number. The 12 lsbs are ignore and
+   considered zero so practically there are only 20 bits in this register;
+   queues 63-0 */
+#define QM_REG_BASEADDR 					 0x168900
+/* [RW 32] The base logical address (in bytes) of each physical queue. The
+   index I represents the physical queue number. The 12 lsbs are ignore and
+   considered zero so practically there are only 20 bits in this register;
+   queues 127-64 */
+#define QM_REG_BASEADDR_EXT_A					 0x16e100
+/* [RW 16] The byte credit cost for each task. This value is for both ports */
+#define QM_REG_BYTECRDCOST					 0x168234
+/* [RW 16] The initial byte credit value for both ports. */
+#define QM_REG_BYTECRDINITVAL					 0x168238
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+   queue uses port 0 else it uses port 1; queues 31-0 */
+#define QM_REG_BYTECRDPORT_LSB					 0x168228
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+   queue uses port 0 else it uses port 1; queues 95-64 */
+#define QM_REG_BYTECRDPORT_LSB_EXT_A				 0x16e520
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+   queue uses port 0 else it uses port 1; queues 63-32 */
+#define QM_REG_BYTECRDPORT_MSB					 0x168224
+/* [RW 32] A bit per physical queue. If the bit is cleared then the physical
+   queue uses port 0 else it uses port 1; queues 127-96 */
+#define QM_REG_BYTECRDPORT_MSB_EXT_A				 0x16e51c
+/* [RW 16] The byte credit value that if above the QM is considered almost
+   full */
+#define QM_REG_BYTECREDITAFULLTHR				 0x168094
+/* [RW 4] The initial credit for interface */
+#define QM_REG_CMINITCRD_0					 0x1680cc
+#define QM_REG_BYTECRDCMDQ_0					 0x16e6e8
+#define QM_REG_CMINITCRD_1					 0x1680d0
+#define QM_REG_CMINITCRD_2					 0x1680d4
+#define QM_REG_CMINITCRD_3					 0x1680d8
+#define QM_REG_CMINITCRD_4					 0x1680dc
+#define QM_REG_CMINITCRD_5					 0x1680e0
+#define QM_REG_CMINITCRD_6					 0x1680e4
+#define QM_REG_CMINITCRD_7					 0x1680e8
+/* [RW 8] A mask bit per CM interface. If this bit is 0 then this interface
+   is masked */
+#define QM_REG_CMINTEN						 0x1680ec
+/* [RW 12] A bit vector which indicates which one of the queues are tied to
+   interface 0 */
+#define QM_REG_CMINTVOQMASK_0					 0x1681f4
+#define QM_REG_CMINTVOQMASK_1					 0x1681f8
+#define QM_REG_CMINTVOQMASK_2					 0x1681fc
+#define QM_REG_CMINTVOQMASK_3					 0x168200
+#define QM_REG_CMINTVOQMASK_4					 0x168204
+#define QM_REG_CMINTVOQMASK_5					 0x168208
+#define QM_REG_CMINTVOQMASK_6					 0x16820c
+#define QM_REG_CMINTVOQMASK_7					 0x168210
+/* [RW 20] The number of connections divided by 16 which dictates the size
+   of each queue which belongs to even function number. */
+#define QM_REG_CONNNUM_0					 0x168020
+/* [R 6] Keep the fill level of the fifo from write client 4 */
+#define QM_REG_CQM_WRC_FIFOLVL					 0x168018
+/* [RW 8] The context regions sent in the CFC load request */
+#define QM_REG_CTXREG_0 					 0x168030
+#define QM_REG_CTXREG_1 					 0x168034
+#define QM_REG_CTXREG_2 					 0x168038
+#define QM_REG_CTXREG_3 					 0x16803c
+/* [RW 12] The VOQ mask used to select the VOQs which needs to be full for
+   bypass enable */
+#define QM_REG_ENBYPVOQMASK					 0x16823c
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+   physical queue uses the byte credit; queues 31-0 */
+#define QM_REG_ENBYTECRD_LSB					 0x168220
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+   physical queue uses the byte credit; queues 95-64 */
+#define QM_REG_ENBYTECRD_LSB_EXT_A				 0x16e518
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+   physical queue uses the byte credit; queues 63-32 */
+#define QM_REG_ENBYTECRD_MSB					 0x16821c
+/* [RW 32] A bit mask per each physical queue. If a bit is set then the
+   physical queue uses the byte credit; queues 127-96 */
+#define QM_REG_ENBYTECRD_MSB_EXT_A				 0x16e514
+/* [RW 4] If cleared then the secondary interface will not be served by the
+   RR arbiter */
+#define QM_REG_ENSEC						 0x1680f0
+/* [RW 32] NA */
+#define QM_REG_FUNCNUMSEL_LSB					 0x168230
+/* [RW 32] NA */
+#define QM_REG_FUNCNUMSEL_MSB					 0x16822c
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+   be use for the almost empty indication to the HW block; queues 31:0 */
+#define QM_REG_HWAEMPTYMASK_LSB 				 0x168218
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+   be use for the almost empty indication to the HW block; queues 95-64 */
+#define QM_REG_HWAEMPTYMASK_LSB_EXT_A				 0x16e510
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+   be use for the almost empty indication to the HW block; queues 63:32 */
+#define QM_REG_HWAEMPTYMASK_MSB 				 0x168214
+/* [RW 32] A mask register to mask the Almost empty signals which will not
+   be use for the almost empty indication to the HW block; queues 127-96 */
+#define QM_REG_HWAEMPTYMASK_MSB_EXT_A				 0x16e50c
+/* [RW 4] The number of outstanding request to CFC */
+#define QM_REG_OUTLDREQ 					 0x168804
+/* [RC 1] A flag to indicate that overflow error occurred in one of the
+   queues. */
+#define QM_REG_OVFERROR 					 0x16805c
+/* [RC 7] the Q where the overflow occurs */
+#define QM_REG_OVFQNUM						 0x168058
+/* [R 16] Pause state for physical queues 15-0 */
+#define QM_REG_PAUSESTATE0					 0x168410
+/* [R 16] Pause state for physical queues 31-16 */
+#define QM_REG_PAUSESTATE1					 0x168414
+/* [R 16] Pause state for physical queues 47-32 */
+#define QM_REG_PAUSESTATE2					 0x16e684
+/* [R 16] Pause state for physical queues 63-48 */
+#define QM_REG_PAUSESTATE3					 0x16e688
+/* [R 16] Pause state for physical queues 79-64 */
+#define QM_REG_PAUSESTATE4					 0x16e68c
+/* [R 16] Pause state for physical queues 95-80 */
+#define QM_REG_PAUSESTATE5					 0x16e690
+/* [R 16] Pause state for physical queues 111-96 */
+#define QM_REG_PAUSESTATE6					 0x16e694
+/* [R 16] Pause state for physical queues 127-112 */
+#define QM_REG_PAUSESTATE7					 0x16e698
+/* [RW 2] The PCI attributes field used in the PCI request. */
+#define QM_REG_PCIREQAT 					 0x168054
+#define QM_REG_PF_EN						 0x16e70c
+/* [R 24] The number of tasks stored in the QM for the PF. only even
+ * functions are valid in E2 (odd I registers will be hard wired to 0) */
+#define QM_REG_PF_USG_CNT_0					 0x16e040
+/* [R 16] NOT USED */
+#define QM_REG_PORT0BYTECRD					 0x168300
+/* [R 16] The byte credit of port 1 */
+#define QM_REG_PORT1BYTECRD					 0x168304
+/* [RW 3] pci function number of queues 15-0 */
+#define QM_REG_PQ2PCIFUNC_0					 0x16e6bc
+#define QM_REG_PQ2PCIFUNC_1					 0x16e6c0
+#define QM_REG_PQ2PCIFUNC_2					 0x16e6c4
+#define QM_REG_PQ2PCIFUNC_3					 0x16e6c8
+#define QM_REG_PQ2PCIFUNC_4					 0x16e6cc
+#define QM_REG_PQ2PCIFUNC_5					 0x16e6d0
+#define QM_REG_PQ2PCIFUNC_6					 0x16e6d4
+#define QM_REG_PQ2PCIFUNC_7					 0x16e6d8
+/* [WB 54] Pointer Table Memory for queues 63-0; The mapping is as follow:
+   ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
+   bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
+#define QM_REG_PTRTBL						 0x168a00
+/* [WB 54] Pointer Table Memory for queues 127-64; The mapping is as follow:
+   ptrtbl[53:30] read pointer; ptrtbl[29:6] write pointer; ptrtbl[5:4] read
+   bank0; ptrtbl[3:2] read bank 1; ptrtbl[1:0] write bank; */
+#define QM_REG_PTRTBL_EXT_A					 0x16e200
+/* [RW 2] Interrupt mask register #0 read/write */
+#define QM_REG_QM_INT_MASK					 0x168444
+/* [R 2] Interrupt register #0 read */
+#define QM_REG_QM_INT_STS					 0x168438
+/* [RW 12] Parity mask register #0 read/write */
+#define QM_REG_QM_PRTY_MASK					 0x168454
+/* [R 12] Parity register #0 read */
+#define QM_REG_QM_PRTY_STS					 0x168448
+/* [RC 12] Parity register #0 read clear */
+#define QM_REG_QM_PRTY_STS_CLR					 0x16844c
+/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
+#define QM_REG_QSTATUS_HIGH					 0x16802c
+/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
+#define QM_REG_QSTATUS_HIGH_EXT_A				 0x16e408
+/* [R 32] Current queues in pipeline: Queues from 0 to 31 */
+#define QM_REG_QSTATUS_LOW					 0x168028
+/* [R 32] Current queues in pipeline: Queues from 64 to 95 */
+#define QM_REG_QSTATUS_LOW_EXT_A				 0x16e404
+/* [R 24] The number of tasks queued for each queue; queues 63-0 */
+#define QM_REG_QTASKCTR_0					 0x168308
+/* [R 24] The number of tasks queued for each queue; queues 127-64 */
+#define QM_REG_QTASKCTR_EXT_A_0 				 0x16e584
+/* [RW 4] Queue tied to VOQ */
+#define QM_REG_QVOQIDX_0					 0x1680f4
+#define QM_REG_QVOQIDX_10					 0x16811c
+#define QM_REG_QVOQIDX_100					 0x16e49c
+#define QM_REG_QVOQIDX_101					 0x16e4a0
+#define QM_REG_QVOQIDX_102					 0x16e4a4
+#define QM_REG_QVOQIDX_103					 0x16e4a8
+#define QM_REG_QVOQIDX_104					 0x16e4ac
+#define QM_REG_QVOQIDX_105					 0x16e4b0
+#define QM_REG_QVOQIDX_106					 0x16e4b4
+#define QM_REG_QVOQIDX_107					 0x16e4b8
+#define QM_REG_QVOQIDX_108					 0x16e4bc
+#define QM_REG_QVOQIDX_109					 0x16e4c0
+#define QM_REG_QVOQIDX_11					 0x168120
+#define QM_REG_QVOQIDX_110					 0x16e4c4
+#define QM_REG_QVOQIDX_111					 0x16e4c8
+#define QM_REG_QVOQIDX_112					 0x16e4cc
+#define QM_REG_QVOQIDX_113					 0x16e4d0
+#define QM_REG_QVOQIDX_114					 0x16e4d4
+#define QM_REG_QVOQIDX_115					 0x16e4d8
+#define QM_REG_QVOQIDX_116					 0x16e4dc
+#define QM_REG_QVOQIDX_117					 0x16e4e0
+#define QM_REG_QVOQIDX_118					 0x16e4e4
+#define QM_REG_QVOQIDX_119					 0x16e4e8
+#define QM_REG_QVOQIDX_12					 0x168124
+#define QM_REG_QVOQIDX_120					 0x16e4ec
+#define QM_REG_QVOQIDX_121					 0x16e4f0
+#define QM_REG_QVOQIDX_122					 0x16e4f4
+#define QM_REG_QVOQIDX_123					 0x16e4f8
+#define QM_REG_QVOQIDX_124					 0x16e4fc
+#define QM_REG_QVOQIDX_125					 0x16e500
+#define QM_REG_QVOQIDX_126					 0x16e504
+#define QM_REG_QVOQIDX_127					 0x16e508
+#define QM_REG_QVOQIDX_13					 0x168128
+#define QM_REG_QVOQIDX_14					 0x16812c
+#define QM_REG_QVOQIDX_15					 0x168130
+#define QM_REG_QVOQIDX_16					 0x168134
+#define QM_REG_QVOQIDX_17					 0x168138
+#define QM_REG_QVOQIDX_21					 0x168148
+#define QM_REG_QVOQIDX_22					 0x16814c
+#define QM_REG_QVOQIDX_23					 0x168150
+#define QM_REG_QVOQIDX_24					 0x168154
+#define QM_REG_QVOQIDX_25					 0x168158
+#define QM_REG_QVOQIDX_26					 0x16815c
+#define QM_REG_QVOQIDX_27					 0x168160
+#define QM_REG_QVOQIDX_28					 0x168164
+#define QM_REG_QVOQIDX_29					 0x168168
+#define QM_REG_QVOQIDX_30					 0x16816c
+#define QM_REG_QVOQIDX_31					 0x168170
+#define QM_REG_QVOQIDX_32					 0x168174
+#define QM_REG_QVOQIDX_33					 0x168178
+#define QM_REG_QVOQIDX_34					 0x16817c
+#define QM_REG_QVOQIDX_35					 0x168180
+#define QM_REG_QVOQIDX_36					 0x168184
+#define QM_REG_QVOQIDX_37					 0x168188
+#define QM_REG_QVOQIDX_38					 0x16818c
+#define QM_REG_QVOQIDX_39					 0x168190
+#define QM_REG_QVOQIDX_40					 0x168194
+#define QM_REG_QVOQIDX_41					 0x168198
+#define QM_REG_QVOQIDX_42					 0x16819c
+#define QM_REG_QVOQIDX_43					 0x1681a0
+#define QM_REG_QVOQIDX_44					 0x1681a4
+#define QM_REG_QVOQIDX_45					 0x1681a8
+#define QM_REG_QVOQIDX_46					 0x1681ac
+#define QM_REG_QVOQIDX_47					 0x1681b0
+#define QM_REG_QVOQIDX_48					 0x1681b4
+#define QM_REG_QVOQIDX_49					 0x1681b8
+#define QM_REG_QVOQIDX_5					 0x168108
+#define QM_REG_QVOQIDX_50					 0x1681bc
+#define QM_REG_QVOQIDX_51					 0x1681c0
+#define QM_REG_QVOQIDX_52					 0x1681c4
+#define QM_REG_QVOQIDX_53					 0x1681c8
+#define QM_REG_QVOQIDX_54					 0x1681cc
+#define QM_REG_QVOQIDX_55					 0x1681d0
+#define QM_REG_QVOQIDX_56					 0x1681d4
+#define QM_REG_QVOQIDX_57					 0x1681d8
+#define QM_REG_QVOQIDX_58					 0x1681dc
+#define QM_REG_QVOQIDX_59					 0x1681e0
+#define QM_REG_QVOQIDX_6					 0x16810c
+#define QM_REG_QVOQIDX_60					 0x1681e4
+#define QM_REG_QVOQIDX_61					 0x1681e8
+#define QM_REG_QVOQIDX_62					 0x1681ec
+#define QM_REG_QVOQIDX_63					 0x1681f0
+#define QM_REG_QVOQIDX_64					 0x16e40c
+#define QM_REG_QVOQIDX_65					 0x16e410
+#define QM_REG_QVOQIDX_69					 0x16e420
+#define QM_REG_QVOQIDX_7					 0x168110
+#define QM_REG_QVOQIDX_70					 0x16e424
+#define QM_REG_QVOQIDX_71					 0x16e428
+#define QM_REG_QVOQIDX_72					 0x16e42c
+#define QM_REG_QVOQIDX_73					 0x16e430
+#define QM_REG_QVOQIDX_74					 0x16e434
+#define QM_REG_QVOQIDX_75					 0x16e438
+#define QM_REG_QVOQIDX_76					 0x16e43c
+#define QM_REG_QVOQIDX_77					 0x16e440
+#define QM_REG_QVOQIDX_78					 0x16e444
+#define QM_REG_QVOQIDX_79					 0x16e448
+#define QM_REG_QVOQIDX_8					 0x168114
+#define QM_REG_QVOQIDX_80					 0x16e44c
+#define QM_REG_QVOQIDX_81					 0x16e450
+#define QM_REG_QVOQIDX_85					 0x16e460
+#define QM_REG_QVOQIDX_86					 0x16e464
+#define QM_REG_QVOQIDX_87					 0x16e468
+#define QM_REG_QVOQIDX_88					 0x16e46c
+#define QM_REG_QVOQIDX_89					 0x16e470
+#define QM_REG_QVOQIDX_9					 0x168118
+#define QM_REG_QVOQIDX_90					 0x16e474
+#define QM_REG_QVOQIDX_91					 0x16e478
+#define QM_REG_QVOQIDX_92					 0x16e47c
+#define QM_REG_QVOQIDX_93					 0x16e480
+#define QM_REG_QVOQIDX_94					 0x16e484
+#define QM_REG_QVOQIDX_95					 0x16e488
+#define QM_REG_QVOQIDX_96					 0x16e48c
+#define QM_REG_QVOQIDX_97					 0x16e490
+#define QM_REG_QVOQIDX_98					 0x16e494
+#define QM_REG_QVOQIDX_99					 0x16e498
+/* [RW 1] Initialization bit command */
+#define QM_REG_SOFT_RESET					 0x168428
+/* [RW 8] The credit cost per every task in the QM. A value per each VOQ */
+#define QM_REG_TASKCRDCOST_0					 0x16809c
+#define QM_REG_TASKCRDCOST_1					 0x1680a0
+#define QM_REG_TASKCRDCOST_2					 0x1680a4
+#define QM_REG_TASKCRDCOST_4					 0x1680ac
+#define QM_REG_TASKCRDCOST_5					 0x1680b0
+/* [R 6] Keep the fill level of the fifo from write client 3 */
+#define QM_REG_TQM_WRC_FIFOLVL					 0x168010
+/* [R 6] Keep the fill level of the fifo from write client 2 */
+#define QM_REG_UQM_WRC_FIFOLVL					 0x168008
+/* [RC 32] Credit update error register */
+#define QM_REG_VOQCRDERRREG					 0x168408
+/* [R 16] The credit value for each VOQ */
+#define QM_REG_VOQCREDIT_0					 0x1682d0
+#define QM_REG_VOQCREDIT_1					 0x1682d4
+#define QM_REG_VOQCREDIT_4					 0x1682e0
+/* [RW 16] The credit value that if above the QM is considered almost full */
+#define QM_REG_VOQCREDITAFULLTHR				 0x168090
+/* [RW 16] The init and maximum credit for each VoQ */
+#define QM_REG_VOQINITCREDIT_0					 0x168060
+#define QM_REG_VOQINITCREDIT_1					 0x168064
+#define QM_REG_VOQINITCREDIT_2					 0x168068
+#define QM_REG_VOQINITCREDIT_4					 0x168070
+#define QM_REG_VOQINITCREDIT_5					 0x168074
+/* [RW 1] The port of which VOQ belongs */
+#define QM_REG_VOQPORT_0					 0x1682a0
+#define QM_REG_VOQPORT_1					 0x1682a4
+#define QM_REG_VOQPORT_2					 0x1682a8
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_0_LSB					 0x168240
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_0_LSB_EXT_A				 0x16e524
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_0_MSB					 0x168244
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_0_MSB_EXT_A				 0x16e528
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_10_LSB					 0x168290
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_10_LSB_EXT_A				 0x16e574
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_10_MSB					 0x168294
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_10_MSB_EXT_A				 0x16e578
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_11_LSB					 0x168298
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_11_LSB_EXT_A				 0x16e57c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_11_MSB					 0x16829c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_11_MSB_EXT_A				 0x16e580
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_1_LSB					 0x168248
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_1_LSB_EXT_A				 0x16e52c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_1_MSB					 0x16824c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_1_MSB_EXT_A				 0x16e530
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_2_LSB					 0x168250
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_2_LSB_EXT_A				 0x16e534
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_2_MSB					 0x168254
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_2_MSB_EXT_A				 0x16e538
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_3_LSB					 0x168258
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_3_LSB_EXT_A				 0x16e53c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_3_MSB_EXT_A				 0x16e540
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_4_LSB					 0x168260
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_4_LSB_EXT_A				 0x16e544
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_4_MSB					 0x168264
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_4_MSB_EXT_A				 0x16e548
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_5_LSB					 0x168268
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_5_LSB_EXT_A				 0x16e54c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_5_MSB					 0x16826c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_5_MSB_EXT_A				 0x16e550
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_6_LSB					 0x168270
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_6_LSB_EXT_A				 0x16e554
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_6_MSB					 0x168274
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_6_MSB_EXT_A				 0x16e558
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_7_LSB					 0x168278
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_7_LSB_EXT_A				 0x16e55c
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_7_MSB					 0x16827c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_7_MSB_EXT_A				 0x16e560
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_8_LSB					 0x168280
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_8_LSB_EXT_A				 0x16e564
+/* [RW 32] The physical queue number associated with each VOQ; queues 63-32 */
+#define QM_REG_VOQQMASK_8_MSB					 0x168284
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_8_MSB_EXT_A				 0x16e568
+/* [RW 32] The physical queue number associated with each VOQ; queues 31-0 */
+#define QM_REG_VOQQMASK_9_LSB					 0x168288
+/* [RW 32] The physical queue number associated with each VOQ; queues 95-64 */
+#define QM_REG_VOQQMASK_9_LSB_EXT_A				 0x16e56c
+/* [RW 32] The physical queue number associated with each VOQ; queues 127-96 */
+#define QM_REG_VOQQMASK_9_MSB_EXT_A				 0x16e570
+/* [RW 32] Wrr weights */
+#define QM_REG_WRRWEIGHTS_0					 0x16880c
+#define QM_REG_WRRWEIGHTS_1					 0x168810
+#define QM_REG_WRRWEIGHTS_10					 0x168814
+#define QM_REG_WRRWEIGHTS_11					 0x168818
+#define QM_REG_WRRWEIGHTS_12					 0x16881c
+#define QM_REG_WRRWEIGHTS_13					 0x168820
+#define QM_REG_WRRWEIGHTS_14					 0x168824
+#define QM_REG_WRRWEIGHTS_15					 0x168828
+#define QM_REG_WRRWEIGHTS_16					 0x16e000
+#define QM_REG_WRRWEIGHTS_17					 0x16e004
+#define QM_REG_WRRWEIGHTS_18					 0x16e008
+#define QM_REG_WRRWEIGHTS_19					 0x16e00c
+#define QM_REG_WRRWEIGHTS_2					 0x16882c
+#define QM_REG_WRRWEIGHTS_20					 0x16e010
+#define QM_REG_WRRWEIGHTS_21					 0x16e014
+#define QM_REG_WRRWEIGHTS_22					 0x16e018
+#define QM_REG_WRRWEIGHTS_23					 0x16e01c
+#define QM_REG_WRRWEIGHTS_24					 0x16e020
+#define QM_REG_WRRWEIGHTS_25					 0x16e024
+#define QM_REG_WRRWEIGHTS_26					 0x16e028
+#define QM_REG_WRRWEIGHTS_27					 0x16e02c
+#define QM_REG_WRRWEIGHTS_28					 0x16e030
+#define QM_REG_WRRWEIGHTS_29					 0x16e034
+#define QM_REG_WRRWEIGHTS_3					 0x168830
+#define QM_REG_WRRWEIGHTS_30					 0x16e038
+#define QM_REG_WRRWEIGHTS_31					 0x16e03c
+#define QM_REG_WRRWEIGHTS_4					 0x168834
+#define QM_REG_WRRWEIGHTS_5					 0x168838
+#define QM_REG_WRRWEIGHTS_6					 0x16883c
+#define QM_REG_WRRWEIGHTS_7					 0x168840
+#define QM_REG_WRRWEIGHTS_8					 0x168844
+#define QM_REG_WRRWEIGHTS_9					 0x168848
+/* [R 6] Keep the fill level of the fifo from write client 1 */
+#define QM_REG_XQM_WRC_FIFOLVL					 0x168000
+/* [W 1] reset to parity interrupt */
+#define SEM_FAST_REG_PARITY_RST					 0x18840
+#define SRC_REG_COUNTFREE0					 0x40500
+/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
+   ports. If set the searcher support 8 functions. */
+#define SRC_REG_E1HMF_ENABLE					 0x404cc
+#define SRC_REG_FIRSTFREE0					 0x40510
+#define SRC_REG_KEYRSS0_0					 0x40408
+#define SRC_REG_KEYRSS0_7					 0x40424
+#define SRC_REG_KEYRSS1_9					 0x40454
+#define SRC_REG_KEYSEARCH_0					 0x40458
+#define SRC_REG_KEYSEARCH_1					 0x4045c
+#define SRC_REG_KEYSEARCH_2					 0x40460
+#define SRC_REG_KEYSEARCH_3					 0x40464
+#define SRC_REG_KEYSEARCH_4					 0x40468
+#define SRC_REG_KEYSEARCH_5					 0x4046c
+#define SRC_REG_KEYSEARCH_6					 0x40470
+#define SRC_REG_KEYSEARCH_7					 0x40474
+#define SRC_REG_KEYSEARCH_8					 0x40478
+#define SRC_REG_KEYSEARCH_9					 0x4047c
+#define SRC_REG_LASTFREE0					 0x40530
+#define SRC_REG_NUMBER_HASH_BITS0				 0x40400
+/* [RW 1] Reset internal state machines. */
+#define SRC_REG_SOFT_RST					 0x4049c
+/* [R 3] Interrupt register #0 read */
+#define SRC_REG_SRC_INT_STS					 0x404ac
+/* [RW 3] Parity mask register #0 read/write */
+#define SRC_REG_SRC_PRTY_MASK					 0x404c8
+/* [R 3] Parity register #0 read */
+#define SRC_REG_SRC_PRTY_STS					 0x404bc
+/* [RC 3] Parity register #0 read clear */
+#define SRC_REG_SRC_PRTY_STS_CLR				 0x404c0
+/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
+#define TCM_REG_CAM_OCCUP					 0x5017c
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define TCM_REG_CDU_AG_RD_IFEN					 0x50034
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+   are disregarded; all other signals are treated as usual; if 1 - normal
+   activity. */
+#define TCM_REG_CDU_AG_WR_IFEN					 0x50030
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define TCM_REG_CDU_SM_RD_IFEN					 0x5003c
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+   input is disregarded; all other signals are treated as usual; if 1 -
+   normal activity. */
+#define TCM_REG_CDU_SM_WR_IFEN					 0x50038
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 1 at start-up. */
+#define TCM_REG_CFC_INIT_CRD					 0x50204
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_CP_WEIGHT					 0x500c0
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define TCM_REG_CSEM_IFEN					 0x5002c
+/* [RC 1] Message length mismatch (relative to last indication) at the In#9
+   interface. */
+#define TCM_REG_CSEM_LENGTH_MIS 				 0x50174
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_CSEM_WEIGHT					 0x500bc
+/* [RW 8] The Event ID in case of ErrorFlg is set in the input message. */
+#define TCM_REG_ERR_EVNT_ID					 0x500a0
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define TCM_REG_ERR_TCM_HDR					 0x5009c
+/* [RW 8] The Event ID for Timers expiration. */
+#define TCM_REG_EXPR_EVNT_ID					 0x500a4
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define TCM_REG_FIC0_INIT_CRD					 0x5020c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define TCM_REG_FIC1_INIT_CRD					 0x50210
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+   - strict priority defined by ~tcm_registers_gr_ag_pr.gr_ag_pr;
+   ~tcm_registers_gr_ld0_pr.gr_ld0_pr and
+   ~tcm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define TCM_REG_GR_ARB_TYPE					 0x50114
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed that the Store channel is the
+   compliment of the other 3 groups. */
+#define TCM_REG_GR_LD0_PR					 0x5011c
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed that the Store channel is the
+   compliment of the other 3 groups. */
+#define TCM_REG_GR_LD1_PR					 0x50120
+/* [RW 4] The number of double REG-pairs; loaded from the STORM context and
+   sent to STORM; for a specific connection type. The double REG-pairs are
+   used to align to STORM context row size of 128 bits. The offset of these
+   data in the STORM context is always 0. Index _i stands for the connection
+   type (one of 16). */
+#define TCM_REG_N_SM_CTX_LD_0					 0x50050
+#define TCM_REG_N_SM_CTX_LD_1					 0x50054
+#define TCM_REG_N_SM_CTX_LD_2					 0x50058
+#define TCM_REG_N_SM_CTX_LD_3					 0x5005c
+#define TCM_REG_N_SM_CTX_LD_4					 0x50060
+#define TCM_REG_N_SM_CTX_LD_5					 0x50064
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_PBF_IFEN					 0x50024
+/* [RC 1] Message length mismatch (relative to last indication) at the In#7
+   interface. */
+#define TCM_REG_PBF_LENGTH_MIS					 0x5016c
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_PBF_WEIGHT					 0x500b4
+#define TCM_REG_PHYS_QNUM0_0					 0x500e0
+#define TCM_REG_PHYS_QNUM0_1					 0x500e4
+#define TCM_REG_PHYS_QNUM1_0					 0x500e8
+#define TCM_REG_PHYS_QNUM1_1					 0x500ec
+#define TCM_REG_PHYS_QNUM2_0					 0x500f0
+#define TCM_REG_PHYS_QNUM2_1					 0x500f4
+#define TCM_REG_PHYS_QNUM3_0					 0x500f8
+#define TCM_REG_PHYS_QNUM3_1					 0x500fc
+/* [RW 1] Input prs Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_PRS_IFEN					 0x50020
+/* [RC 1] Message length mismatch (relative to last indication) at the In#6
+   interface. */
+#define TCM_REG_PRS_LENGTH_MIS					 0x50168
+/* [RW 3] The weight of the input prs in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_PRS_WEIGHT					 0x500b0
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define TCM_REG_STOP_EVNT_ID					 0x500a8
+/* [RC 1] Message length mismatch (relative to last indication) at the STORM
+   interface. */
+#define TCM_REG_STORM_LENGTH_MIS				 0x50160
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define TCM_REG_STORM_TCM_IFEN					 0x50010
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_STORM_WEIGHT					 0x500ac
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_TCM_CFC_IFEN					 0x50040
+/* [RW 11] Interrupt mask register #0 read/write */
+#define TCM_REG_TCM_INT_MASK					 0x501dc
+/* [R 11] Interrupt register #0 read */
+#define TCM_REG_TCM_INT_STS					 0x501d0
+/* [RW 27] Parity mask register #0 read/write */
+#define TCM_REG_TCM_PRTY_MASK					 0x501ec
+/* [R 27] Parity register #0 read */
+#define TCM_REG_TCM_PRTY_STS					 0x501e0
+/* [RC 27] Parity register #0 read clear */
+#define TCM_REG_TCM_PRTY_STS_CLR				 0x501e4
+/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
+   REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+   Is used to determine the number of the AG context REG-pairs written back;
+   when the input message Reg1WbFlg isn't set. */
+#define TCM_REG_TCM_REG0_SZ					 0x500d8
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_TCM_STORM0_IFEN 				 0x50004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_TCM_STORM1_IFEN 				 0x50008
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_TCM_TQM_IFEN					 0x5000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define TCM_REG_TCM_TQM_USE_Q					 0x500d4
+/* [RW 28] The CM header for Timers expiration command. */
+#define TCM_REG_TM_TCM_HDR					 0x50098
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define TCM_REG_TM_TCM_IFEN					 0x5001c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TM_WEIGHT					 0x500d0
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 32 at start-up. */
+#define TCM_REG_TQM_INIT_CRD					 0x5021c
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TQM_P_WEIGHT					 0x500c8
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TQM_S_WEIGHT					 0x500cc
+/* [RW 28] The CM header value for QM request (primary). */
+#define TCM_REG_TQM_TCM_HDR_P					 0x50090
+/* [RW 28] The CM header value for QM request (secondary). */
+#define TCM_REG_TQM_TCM_HDR_S					 0x50094
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_TQM_TCM_IFEN					 0x50014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define TCM_REG_TSDM_IFEN					 0x50018
+/* [RC 1] Message length mismatch (relative to last indication) at the SDM
+   interface. */
+#define TCM_REG_TSDM_LENGTH_MIS 				 0x50164
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_TSDM_WEIGHT					 0x500c4
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define TCM_REG_USEM_IFEN					 0x50028
+/* [RC 1] Message length mismatch (relative to last indication) at the In#8
+   interface. */
+#define TCM_REG_USEM_LENGTH_MIS 				 0x50170
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define TCM_REG_USEM_WEIGHT					 0x500b8
+/* [RW 21] Indirect access to the descriptor table of the XX protection
+   mechanism. The fields are: [5:0] - length of the message; 15:6] - message
+   pointer; 20:16] - next pointer. */
+#define TCM_REG_XX_DESCR_TABLE					 0x50280
+#define TCM_REG_XX_DESCR_TABLE_SIZE				 29
+/* [R 6] Use to read the value of XX protection Free counter. */
+#define TCM_REG_XX_FREE 					 0x50178
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+   of the Input Stage XX protection buffer by the XX protection pending
+   messages. Max credit available - 127.Write writes the initial credit
+   value; read returns the current value of the credit counter. Must be
+   initialized to 19 at start-up. */
+#define TCM_REG_XX_INIT_CRD					 0x50220
+/* [RW 6] Maximum link list size (messages locked) per connection in the XX
+   protection. */
+#define TCM_REG_XX_MAX_LL_SZ					 0x50044
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+   protection. ~tcm_registers_xx_free.xx_free is read on read. */
+#define TCM_REG_XX_MSG_NUM					 0x50224
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define TCM_REG_XX_OVFL_EVNT_ID 				 0x50048
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+   The fields are:[4:0] - tail pointer; [10:5] - Link List size; 15:11] -
+   header pointer. */
+#define TCM_REG_XX_TABLE					 0x50240
+/* [RW 4] Load value for cfc ac credit cnt. */
+#define TM_REG_CFC_AC_CRDCNT_VAL				 0x164208
+/* [RW 4] Load value for cfc cld credit cnt. */
+#define TM_REG_CFC_CLD_CRDCNT_VAL				 0x164210
+/* [RW 8] Client0 context region. */
+#define TM_REG_CL0_CONT_REGION					 0x164030
+/* [RW 8] Client1 context region. */
+#define TM_REG_CL1_CONT_REGION					 0x164034
+/* [RW 8] Client2 context region. */
+#define TM_REG_CL2_CONT_REGION					 0x164038
+/* [RW 2] Client in High priority client number. */
+#define TM_REG_CLIN_PRIOR0_CLIENT				 0x164024
+/* [RW 4] Load value for clout0 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT0_VAL				 0x164220
+/* [RW 4] Load value for clout1 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT1_VAL				 0x164228
+/* [RW 4] Load value for clout2 cred cnt. */
+#define TM_REG_CLOUT_CRDCNT2_VAL				 0x164230
+/* [RW 1] Enable client0 input. */
+#define TM_REG_EN_CL0_INPUT					 0x164008
+/* [RW 1] Enable client1 input. */
+#define TM_REG_EN_CL1_INPUT					 0x16400c
+/* [RW 1] Enable client2 input. */
+#define TM_REG_EN_CL2_INPUT					 0x164010
+#define TM_REG_EN_LINEAR0_TIMER 				 0x164014
+/* [RW 1] Enable real time counter. */
+#define TM_REG_EN_REAL_TIME_CNT 				 0x1640d8
+/* [RW 1] Enable for Timers state machines. */
+#define TM_REG_EN_TIMERS					 0x164000
+/* [RW 4] Load value for expiration credit cnt. CFC max number of
+   outstanding load requests for timers (expiration) context loading. */
+#define TM_REG_EXP_CRDCNT_VAL					 0x164238
+/* [RW 32] Linear0 logic address. */
+#define TM_REG_LIN0_LOGIC_ADDR					 0x164240
+/* [RW 18] Linear0 Max active cid (in banks of 32 entries). */
+#define TM_REG_LIN0_MAX_ACTIVE_CID				 0x164048
+/* [ST 16] Linear0 Number of scans counter. */
+#define TM_REG_LIN0_NUM_SCANS					 0x1640a0
+/* [WB 64] Linear0 phy address. */
+#define TM_REG_LIN0_PHY_ADDR					 0x164270
+/* [RW 1] Linear0 physical address valid. */
+#define TM_REG_LIN0_PHY_ADDR_VALID				 0x164248
+#define TM_REG_LIN0_SCAN_ON					 0x1640d0
+/* [RW 24] Linear0 array scan timeout. */
+#define TM_REG_LIN0_SCAN_TIME					 0x16403c
+#define TM_REG_LIN0_VNIC_UC					 0x164128
+/* [RW 32] Linear1 logic address. */
+#define TM_REG_LIN1_LOGIC_ADDR					 0x164250
+/* [WB 64] Linear1 phy address. */
+#define TM_REG_LIN1_PHY_ADDR					 0x164280
+/* [RW 1] Linear1 physical address valid. */
+#define TM_REG_LIN1_PHY_ADDR_VALID				 0x164258
+/* [RW 6] Linear timer set_clear fifo threshold. */
+#define TM_REG_LIN_SETCLR_FIFO_ALFULL_THR			 0x164070
+/* [RW 2] Load value for pci arbiter credit cnt. */
+#define TM_REG_PCIARB_CRDCNT_VAL				 0x164260
+/* [RW 20] The amount of hardware cycles for each timer tick. */
+#define TM_REG_TIMER_TICK_SIZE					 0x16401c
+/* [RW 8] Timers Context region. */
+#define TM_REG_TM_CONTEXT_REGION				 0x164044
+/* [RW 1] Interrupt mask register #0 read/write */
+#define TM_REG_TM_INT_MASK					 0x1640fc
+/* [R 1] Interrupt register #0 read */
+#define TM_REG_TM_INT_STS					 0x1640f0
+/* [RW 7] Parity mask register #0 read/write */
+#define TM_REG_TM_PRTY_MASK					 0x16410c
+/* [RC 7] Parity register #0 read clear */
+#define TM_REG_TM_PRTY_STS_CLR					 0x164104
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define TSDM_REG_AGG_INT_EVENT_0				 0x42038
+#define TSDM_REG_AGG_INT_EVENT_1				 0x4203c
+#define TSDM_REG_AGG_INT_EVENT_2				 0x42040
+#define TSDM_REG_AGG_INT_EVENT_3				 0x42044
+#define TSDM_REG_AGG_INT_EVENT_4				 0x42048
+/* [RW 1] The T bit for aggregated interrupt 0 */
+#define TSDM_REG_AGG_INT_T_0					 0x420b8
+#define TSDM_REG_AGG_INT_T_1					 0x420bc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define TSDM_REG_CFC_RSP_START_ADDR				 0x42008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define TSDM_REG_CMP_COUNTER_MAX0				 0x4201c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define TSDM_REG_CMP_COUNTER_MAX1				 0x42020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define TSDM_REG_CMP_COUNTER_MAX2				 0x42024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define TSDM_REG_CMP_COUNTER_MAX3				 0x42028
+/* [RW 13] The start address in the internal RAM for the completion
+   counters. */
+#define TSDM_REG_CMP_COUNTER_START_ADDR 			 0x4200c
+#define TSDM_REG_ENABLE_IN1					 0x42238
+#define TSDM_REG_ENABLE_IN2					 0x4223c
+#define TSDM_REG_ENABLE_OUT1					 0x42240
+#define TSDM_REG_ENABLE_OUT2					 0x42244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+   interface without receiving any ACK. */
+#define TSDM_REG_INIT_CREDIT_PXP_CTRL				 0x424bc
+/* [ST 32] The number of ACK after placement messages received */
+#define TSDM_REG_NUM_OF_ACK_AFTER_PLACE 			 0x4227c
+/* [ST 32] The number of packet end messages received from the parser */
+#define TSDM_REG_NUM_OF_PKT_END_MSG				 0x42274
+/* [ST 32] The number of requests received from the pxp async if */
+#define TSDM_REG_NUM_OF_PXP_ASYNC_REQ				 0x42278
+/* [ST 32] The number of commands received in queue 0 */
+#define TSDM_REG_NUM_OF_Q0_CMD					 0x42248
+/* [ST 32] The number of commands received in queue 10 */
+#define TSDM_REG_NUM_OF_Q10_CMD 				 0x4226c
+/* [ST 32] The number of commands received in queue 11 */
+#define TSDM_REG_NUM_OF_Q11_CMD 				 0x42270
+/* [ST 32] The number of commands received in queue 1 */
+#define TSDM_REG_NUM_OF_Q1_CMD					 0x4224c
+/* [ST 32] The number of commands received in queue 3 */
+#define TSDM_REG_NUM_OF_Q3_CMD					 0x42250
+/* [ST 32] The number of commands received in queue 4 */
+#define TSDM_REG_NUM_OF_Q4_CMD					 0x42254
+/* [ST 32] The number of commands received in queue 5 */
+#define TSDM_REG_NUM_OF_Q5_CMD					 0x42258
+/* [ST 32] The number of commands received in queue 6 */
+#define TSDM_REG_NUM_OF_Q6_CMD					 0x4225c
+/* [ST 32] The number of commands received in queue 7 */
+#define TSDM_REG_NUM_OF_Q7_CMD					 0x42260
+/* [ST 32] The number of commands received in queue 8 */
+#define TSDM_REG_NUM_OF_Q8_CMD					 0x42264
+/* [ST 32] The number of commands received in queue 9 */
+#define TSDM_REG_NUM_OF_Q9_CMD					 0x42268
+/* [RW 13] The start address in the internal RAM for the packet end message */
+#define TSDM_REG_PCK_END_MSG_START_ADDR 			 0x42014
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define TSDM_REG_Q_COUNTER_START_ADDR				 0x42010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define TSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY			 0x42548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define TSDM_REG_SYNC_PARSER_EMPTY				 0x42550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define TSDM_REG_SYNC_SYNC_EMPTY				 0x42558
+/* [RW 32] Tick for timer counter. Applicable only when
+   ~tsdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define TSDM_REG_TIMER_TICK					 0x42000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define TSDM_REG_TSDM_INT_MASK_0				 0x4229c
+#define TSDM_REG_TSDM_INT_MASK_1				 0x422ac
+/* [R 32] Interrupt register #0 read */
+#define TSDM_REG_TSDM_INT_STS_0 				 0x42290
+#define TSDM_REG_TSDM_INT_STS_1 				 0x422a0
+/* [RW 11] Parity mask register #0 read/write */
+#define TSDM_REG_TSDM_PRTY_MASK 				 0x422bc
+/* [R 11] Parity register #0 read */
+#define TSDM_REG_TSDM_PRTY_STS					 0x422b0
+/* [RC 11] Parity register #0 read clear */
+#define TSDM_REG_TSDM_PRTY_STS_CLR				 0x422b4
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define TSEM_REG_ARB_CYCLE_SIZE 				 0x180034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define TSEM_REG_ARB_ELEMENT0					 0x180020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~tsem_registers_arb_element0.arb_element0 */
+#define TSEM_REG_ARB_ELEMENT1					 0x180024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~tsem_registers_arb_element0.arb_element0
+   and ~tsem_registers_arb_element1.arb_element1 */
+#define TSEM_REG_ARB_ELEMENT2					 0x180028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+   not be equal to register ~tsem_registers_arb_element0.arb_element0 and
+   ~tsem_registers_arb_element1.arb_element1 and
+   ~tsem_registers_arb_element2.arb_element2 */
+#define TSEM_REG_ARB_ELEMENT3					 0x18002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~tsem_registers_arb_element0.arb_element0
+   and ~tsem_registers_arb_element1.arb_element1 and
+   ~tsem_registers_arb_element2.arb_element2 and
+   ~tsem_registers_arb_element3.arb_element3 */
+#define TSEM_REG_ARB_ELEMENT4					 0x180030
+#define TSEM_REG_ENABLE_IN					 0x1800a4
+#define TSEM_REG_ENABLE_OUT					 0x1800a8
+/* [RW 32] This address space contains all registers and memories that are
+   placed in SEM_FAST block. The SEM_FAST registers are described in
+   appendix B. In order to access the sem_fast registers the base address
+   ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define TSEM_REG_FAST_MEMORY					 0x1a0000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+   by the microcode */
+#define TSEM_REG_FIC0_DISABLE					 0x180224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+   by the microcode */
+#define TSEM_REG_FIC1_DISABLE					 0x180234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+   the middle of the work */
+#define TSEM_REG_INT_TABLE					 0x180400
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC0 */
+#define TSEM_REG_MSG_NUM_FIC0					 0x180000
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC1 */
+#define TSEM_REG_MSG_NUM_FIC1					 0x180004
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC0 */
+#define TSEM_REG_MSG_NUM_FOC0					 0x180008
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC1 */
+#define TSEM_REG_MSG_NUM_FOC1					 0x18000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC2 */
+#define TSEM_REG_MSG_NUM_FOC2					 0x180010
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC3 */
+#define TSEM_REG_MSG_NUM_FOC3					 0x180014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+   during run_time by the microcode */
+#define TSEM_REG_PAS_DISABLE					 0x18024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define TSEM_REG_PASSIVE_BUFFER 				 0x181000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define TSEM_REG_PRAM						 0x1c0000
+/* [R 8] Valid sleeping threads indication have bit per thread */
+#define TSEM_REG_SLEEP_THREADS_VALID				 0x18026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define TSEM_REG_SLOW_EXT_STORE_EMPTY				 0x1802a0
+/* [RW 8] List of free threads . There is a bit per thread. */
+#define TSEM_REG_THREADS_LIST					 0x1802e4
+/* [RC 32] Parity register #0 read clear */
+#define TSEM_REG_TSEM_PRTY_STS_CLR_0				 0x180118
+#define TSEM_REG_TSEM_PRTY_STS_CLR_1				 0x180128
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define TSEM_REG_TS_0_AS					 0x180038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define TSEM_REG_TS_10_AS					 0x180060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define TSEM_REG_TS_11_AS					 0x180064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define TSEM_REG_TS_12_AS					 0x180068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define TSEM_REG_TS_13_AS					 0x18006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define TSEM_REG_TS_14_AS					 0x180070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define TSEM_REG_TS_15_AS					 0x180074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define TSEM_REG_TS_16_AS					 0x180078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define TSEM_REG_TS_17_AS					 0x18007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define TSEM_REG_TS_18_AS					 0x180080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define TSEM_REG_TS_1_AS					 0x18003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define TSEM_REG_TS_2_AS					 0x180040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define TSEM_REG_TS_3_AS					 0x180044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define TSEM_REG_TS_4_AS					 0x180048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define TSEM_REG_TS_5_AS					 0x18004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define TSEM_REG_TS_6_AS					 0x180050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define TSEM_REG_TS_7_AS					 0x180054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define TSEM_REG_TS_8_AS					 0x180058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define TSEM_REG_TS_9_AS					 0x18005c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define TSEM_REG_TSEM_INT_MASK_0				 0x180100
+#define TSEM_REG_TSEM_INT_MASK_1				 0x180110
+/* [R 32] Interrupt register #0 read */
+#define TSEM_REG_TSEM_INT_STS_0 				 0x1800f4
+#define TSEM_REG_TSEM_INT_STS_1 				 0x180104
+/* [RW 32] Parity mask register #0 read/write */
+#define TSEM_REG_TSEM_PRTY_MASK_0				 0x180120
+#define TSEM_REG_TSEM_PRTY_MASK_1				 0x180130
+/* [R 32] Parity register #0 read */
+#define TSEM_REG_TSEM_PRTY_STS_0				 0x180114
+#define TSEM_REG_TSEM_PRTY_STS_1				 0x180124
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define TSEM_REG_VFPF_ERR_NUM					 0x180380
+/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
+ * [10:8] of the address should be the offset within the accessed LCID
+ * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
+ * LCID100. The RBC address should be 12'ha64. */
+#define UCM_REG_AG_CTX						 0xe2000
+/* [R 5] Used to read the XX protection CAM occupancy counter. */
+#define UCM_REG_CAM_OCCUP					 0xe0170
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define UCM_REG_CDU_AG_RD_IFEN					 0xe0038
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+   are disregarded; all other signals are treated as usual; if 1 - normal
+   activity. */
+#define UCM_REG_CDU_AG_WR_IFEN					 0xe0034
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define UCM_REG_CDU_SM_RD_IFEN					 0xe0040
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+   input is disregarded; all other signals are treated as usual; if 1 -
+   normal activity. */
+#define UCM_REG_CDU_SM_WR_IFEN					 0xe003c
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 1 at start-up. */
+#define UCM_REG_CFC_INIT_CRD					 0xe0204
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_CP_WEIGHT					 0xe00c4
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define UCM_REG_CSEM_IFEN					 0xe0028
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the csem interface is detected. */
+#define UCM_REG_CSEM_LENGTH_MIS 				 0xe0160
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_CSEM_WEIGHT					 0xe00b8
+/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define UCM_REG_DORQ_IFEN					 0xe0030
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the dorq interface is detected. */
+#define UCM_REG_DORQ_LENGTH_MIS 				 0xe0168
+/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_DORQ_WEIGHT					 0xe00c0
+/* [RW 8] The Event ID in case ErrorFlg input message bit is set. */
+#define UCM_REG_ERR_EVNT_ID					 0xe00a4
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define UCM_REG_ERR_UCM_HDR					 0xe00a0
+/* [RW 8] The Event ID for Timers expiration. */
+#define UCM_REG_EXPR_EVNT_ID					 0xe00a8
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define UCM_REG_FIC0_INIT_CRD					 0xe020c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define UCM_REG_FIC1_INIT_CRD					 0xe0210
+/* [RW 1] Arbitration between Input Arbiter groups: 0 - fair Round-Robin; 1
+   - strict priority defined by ~ucm_registers_gr_ag_pr.gr_ag_pr;
+   ~ucm_registers_gr_ld0_pr.gr_ld0_pr and
+   ~ucm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define UCM_REG_GR_ARB_TYPE					 0xe0144
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed that the Store channel group is
+   compliment to the others. */
+#define UCM_REG_GR_LD0_PR					 0xe014c
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed that the Store channel group is
+   compliment to the others. */
+#define UCM_REG_GR_LD1_PR					 0xe0150
+/* [RW 2] The queue index for invalidate counter flag decision. */
+#define UCM_REG_INV_CFLG_Q					 0xe00e4
+/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
+   sent to STORM; for a specific connection type. the double REG-pairs are
+   used in order to align to STORM context row size of 128 bits. The offset
+   of these data in the STORM context is always 0. Index _i stands for the
+   connection type (one of 16). */
+#define UCM_REG_N_SM_CTX_LD_0					 0xe0054
+#define UCM_REG_N_SM_CTX_LD_1					 0xe0058
+#define UCM_REG_N_SM_CTX_LD_2					 0xe005c
+#define UCM_REG_N_SM_CTX_LD_3					 0xe0060
+#define UCM_REG_N_SM_CTX_LD_4					 0xe0064
+#define UCM_REG_N_SM_CTX_LD_5					 0xe0068
+#define UCM_REG_PHYS_QNUM0_0					 0xe0110
+#define UCM_REG_PHYS_QNUM0_1					 0xe0114
+#define UCM_REG_PHYS_QNUM1_0					 0xe0118
+#define UCM_REG_PHYS_QNUM1_1					 0xe011c
+#define UCM_REG_PHYS_QNUM2_0					 0xe0120
+#define UCM_REG_PHYS_QNUM2_1					 0xe0124
+#define UCM_REG_PHYS_QNUM3_0					 0xe0128
+#define UCM_REG_PHYS_QNUM3_1					 0xe012c
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define UCM_REG_STOP_EVNT_ID					 0xe00ac
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the STORM interface is detected. */
+#define UCM_REG_STORM_LENGTH_MIS				 0xe0154
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define UCM_REG_STORM_UCM_IFEN					 0xe0010
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_STORM_WEIGHT					 0xe00b0
+/* [RW 4] Timers output initial credit. Max credit available - 15.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 4 at start-up. */
+#define UCM_REG_TM_INIT_CRD					 0xe021c
+/* [RW 28] The CM header for Timers expiration command. */
+#define UCM_REG_TM_UCM_HDR					 0xe009c
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define UCM_REG_TM_UCM_IFEN					 0xe001c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_TM_WEIGHT					 0xe00d4
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define UCM_REG_TSEM_IFEN					 0xe0024
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the tsem interface is detected. */
+#define UCM_REG_TSEM_LENGTH_MIS 				 0xe015c
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_TSEM_WEIGHT					 0xe00b4
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define UCM_REG_UCM_CFC_IFEN					 0xe0044
+/* [RW 11] Interrupt mask register #0 read/write */
+#define UCM_REG_UCM_INT_MASK					 0xe01d4
+/* [R 11] Interrupt register #0 read */
+#define UCM_REG_UCM_INT_STS					 0xe01c8
+/* [RW 27] Parity mask register #0 read/write */
+#define UCM_REG_UCM_PRTY_MASK					 0xe01e4
+/* [R 27] Parity register #0 read */
+#define UCM_REG_UCM_PRTY_STS					 0xe01d8
+/* [RC 27] Parity register #0 read clear */
+#define UCM_REG_UCM_PRTY_STS_CLR				 0xe01dc
+/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
+   REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+   Is used to determine the number of the AG context REG-pairs written back;
+   when the Reg1WbFlg isn't set. */
+#define UCM_REG_UCM_REG0_SZ					 0xe00dc
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define UCM_REG_UCM_STORM0_IFEN 				 0xe0004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define UCM_REG_UCM_STORM1_IFEN 				 0xe0008
+/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define UCM_REG_UCM_TM_IFEN					 0xe0020
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define UCM_REG_UCM_UQM_IFEN					 0xe000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define UCM_REG_UCM_UQM_USE_Q					 0xe00d8
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 32 at start-up. */
+#define UCM_REG_UQM_INIT_CRD					 0xe0220
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_UQM_P_WEIGHT					 0xe00cc
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_UQM_S_WEIGHT					 0xe00d0
+/* [RW 28] The CM header value for QM request (primary). */
+#define UCM_REG_UQM_UCM_HDR_P					 0xe0094
+/* [RW 28] The CM header value for QM request (secondary). */
+#define UCM_REG_UQM_UCM_HDR_S					 0xe0098
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define UCM_REG_UQM_UCM_IFEN					 0xe0014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define UCM_REG_USDM_IFEN					 0xe0018
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the SDM interface is detected. */
+#define UCM_REG_USDM_LENGTH_MIS 				 0xe0158
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_USDM_WEIGHT					 0xe00c8
+/* [RW 1] Input xsem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define UCM_REG_XSEM_IFEN					 0xe002c
+/* [RC 1] Set when the message length mismatch (relative to last indication)
+   at the xsem interface isdetected. */
+#define UCM_REG_XSEM_LENGTH_MIS 				 0xe0164
+/* [RW 3] The weight of the input xsem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define UCM_REG_XSEM_WEIGHT					 0xe00bc
+/* [RW 20] Indirect access to the descriptor table of the XX protection
+   mechanism. The fields are:[5:0] - message length; 14:6] - message
+   pointer; 19:15] - next pointer. */
+#define UCM_REG_XX_DESCR_TABLE					 0xe0280
+#define UCM_REG_XX_DESCR_TABLE_SIZE				 27
+/* [R 6] Use to read the XX protection Free counter. */
+#define UCM_REG_XX_FREE 					 0xe016c
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+   of the Input Stage XX protection buffer by the XX protection pending
+   messages. Write writes the initial credit value; read returns the current
+   value of the credit counter. Must be initialized to 12 at start-up. */
+#define UCM_REG_XX_INIT_CRD					 0xe0224
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+   protection. ~ucm_registers_xx_free.xx_free read on read. */
+#define UCM_REG_XX_MSG_NUM					 0xe0228
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define UCM_REG_XX_OVFL_EVNT_ID 				 0xe004c
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+   The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
+   header pointer. */
+#define UCM_REG_XX_TABLE					 0xe0300
+#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE			 (0x1<<28)
+#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA			 (0x1<<15)
+#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK			 (0x1<<24)
+#define UMAC_COMMAND_CONFIG_REG_PAD_EN				 (0x1<<5)
+#define UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE			 (0x1<<8)
+#define UMAC_COMMAND_CONFIG_REG_PROMIS_EN			 (0x1<<4)
+#define UMAC_COMMAND_CONFIG_REG_RX_ENA				 (0x1<<1)
+#define UMAC_COMMAND_CONFIG_REG_SW_RESET			 (0x1<<13)
+#define UMAC_COMMAND_CONFIG_REG_TX_ENA				 (0x1<<0)
+#define UMAC_REG_COMMAND_CONFIG					 0x8
+/* [RW 32] Register Bit 0 refers to Bit 16 of the MAC address; Bit 1 refers
+ * to bit 17 of the MAC address etc. */
+#define UMAC_REG_MAC_ADDR0					 0xc
+/* [RW 16] Register Bit 0 refers to Bit 0 of the MAC address; Register Bit 1
+ * refers to Bit 1 of the MAC address etc. Bits 16 to 31 are reserved. */
+#define UMAC_REG_MAC_ADDR1					 0x10
+/* [RW 14] Defines a 14-Bit maximum frame length used by the MAC receive
+ * logic to check frames. */
+#define UMAC_REG_MAXFR						 0x14
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define USDM_REG_AGG_INT_EVENT_0				 0xc4038
+#define USDM_REG_AGG_INT_EVENT_1				 0xc403c
+#define USDM_REG_AGG_INT_EVENT_2				 0xc4040
+#define USDM_REG_AGG_INT_EVENT_4				 0xc4048
+#define USDM_REG_AGG_INT_EVENT_5				 0xc404c
+#define USDM_REG_AGG_INT_EVENT_6				 0xc4050
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+   or auto-mask-mode (1) */
+#define USDM_REG_AGG_INT_MODE_0 				 0xc41b8
+#define USDM_REG_AGG_INT_MODE_1 				 0xc41bc
+#define USDM_REG_AGG_INT_MODE_4 				 0xc41c8
+#define USDM_REG_AGG_INT_MODE_5 				 0xc41cc
+#define USDM_REG_AGG_INT_MODE_6 				 0xc41d0
+/* [RW 1] The T bit for aggregated interrupt 5 */
+#define USDM_REG_AGG_INT_T_5					 0xc40cc
+#define USDM_REG_AGG_INT_T_6					 0xc40d0
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define USDM_REG_CFC_RSP_START_ADDR				 0xc4008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define USDM_REG_CMP_COUNTER_MAX0				 0xc401c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define USDM_REG_CMP_COUNTER_MAX1				 0xc4020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define USDM_REG_CMP_COUNTER_MAX2				 0xc4024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define USDM_REG_CMP_COUNTER_MAX3				 0xc4028
+/* [RW 13] The start address in the internal RAM for the completion
+   counters. */
+#define USDM_REG_CMP_COUNTER_START_ADDR 			 0xc400c
+#define USDM_REG_ENABLE_IN1					 0xc4238
+#define USDM_REG_ENABLE_IN2					 0xc423c
+#define USDM_REG_ENABLE_OUT1					 0xc4240
+#define USDM_REG_ENABLE_OUT2					 0xc4244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+   interface without receiving any ACK. */
+#define USDM_REG_INIT_CREDIT_PXP_CTRL				 0xc44c0
+/* [ST 32] The number of ACK after placement messages received */
+#define USDM_REG_NUM_OF_ACK_AFTER_PLACE 			 0xc4280
+/* [ST 32] The number of packet end messages received from the parser */
+#define USDM_REG_NUM_OF_PKT_END_MSG				 0xc4278
+/* [ST 32] The number of requests received from the pxp async if */
+#define USDM_REG_NUM_OF_PXP_ASYNC_REQ				 0xc427c
+/* [ST 32] The number of commands received in queue 0 */
+#define USDM_REG_NUM_OF_Q0_CMD					 0xc4248
+/* [ST 32] The number of commands received in queue 10 */
+#define USDM_REG_NUM_OF_Q10_CMD 				 0xc4270
+/* [ST 32] The number of commands received in queue 11 */
+#define USDM_REG_NUM_OF_Q11_CMD 				 0xc4274
+/* [ST 32] The number of commands received in queue 1 */
+#define USDM_REG_NUM_OF_Q1_CMD					 0xc424c
+/* [ST 32] The number of commands received in queue 2 */
+#define USDM_REG_NUM_OF_Q2_CMD					 0xc4250
+/* [ST 32] The number of commands received in queue 3 */
+#define USDM_REG_NUM_OF_Q3_CMD					 0xc4254
+/* [ST 32] The number of commands received in queue 4 */
+#define USDM_REG_NUM_OF_Q4_CMD					 0xc4258
+/* [ST 32] The number of commands received in queue 5 */
+#define USDM_REG_NUM_OF_Q5_CMD					 0xc425c
+/* [ST 32] The number of commands received in queue 6 */
+#define USDM_REG_NUM_OF_Q6_CMD					 0xc4260
+/* [ST 32] The number of commands received in queue 7 */
+#define USDM_REG_NUM_OF_Q7_CMD					 0xc4264
+/* [ST 32] The number of commands received in queue 8 */
+#define USDM_REG_NUM_OF_Q8_CMD					 0xc4268
+/* [ST 32] The number of commands received in queue 9 */
+#define USDM_REG_NUM_OF_Q9_CMD					 0xc426c
+/* [RW 13] The start address in the internal RAM for the packet end message */
+#define USDM_REG_PCK_END_MSG_START_ADDR 			 0xc4014
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define USDM_REG_Q_COUNTER_START_ADDR				 0xc4010
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define USDM_REG_RSP_PXP_CTRL_RDATA_EMPTY			 0xc4550
+/* [R 1] parser fifo empty in sdm_sync block */
+#define USDM_REG_SYNC_PARSER_EMPTY				 0xc4558
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define USDM_REG_SYNC_SYNC_EMPTY				 0xc4560
+/* [RW 32] Tick for timer counter. Applicable only when
+   ~usdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define USDM_REG_TIMER_TICK					 0xc4000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define USDM_REG_USDM_INT_MASK_0				 0xc42a0
+#define USDM_REG_USDM_INT_MASK_1				 0xc42b0
+/* [R 32] Interrupt register #0 read */
+#define USDM_REG_USDM_INT_STS_0 				 0xc4294
+#define USDM_REG_USDM_INT_STS_1 				 0xc42a4
+/* [RW 11] Parity mask register #0 read/write */
+#define USDM_REG_USDM_PRTY_MASK 				 0xc42c0
+/* [R 11] Parity register #0 read */
+#define USDM_REG_USDM_PRTY_STS					 0xc42b4
+/* [RC 11] Parity register #0 read clear */
+#define USDM_REG_USDM_PRTY_STS_CLR				 0xc42b8
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define USEM_REG_ARB_CYCLE_SIZE 				 0x300034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define USEM_REG_ARB_ELEMENT0					 0x300020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~usem_registers_arb_element0.arb_element0 */
+#define USEM_REG_ARB_ELEMENT1					 0x300024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~usem_registers_arb_element0.arb_element0
+   and ~usem_registers_arb_element1.arb_element1 */
+#define USEM_REG_ARB_ELEMENT2					 0x300028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+   not be equal to register ~usem_registers_arb_element0.arb_element0 and
+   ~usem_registers_arb_element1.arb_element1 and
+   ~usem_registers_arb_element2.arb_element2 */
+#define USEM_REG_ARB_ELEMENT3					 0x30002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~usem_registers_arb_element0.arb_element0
+   and ~usem_registers_arb_element1.arb_element1 and
+   ~usem_registers_arb_element2.arb_element2 and
+   ~usem_registers_arb_element3.arb_element3 */
+#define USEM_REG_ARB_ELEMENT4					 0x300030
+#define USEM_REG_ENABLE_IN					 0x3000a4
+#define USEM_REG_ENABLE_OUT					 0x3000a8
+/* [RW 32] This address space contains all registers and memories that are
+   placed in SEM_FAST block. The SEM_FAST registers are described in
+   appendix B. In order to access the sem_fast registers the base address
+   ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define USEM_REG_FAST_MEMORY					 0x320000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+   by the microcode */
+#define USEM_REG_FIC0_DISABLE					 0x300224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+   by the microcode */
+#define USEM_REG_FIC1_DISABLE					 0x300234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+   the middle of the work */
+#define USEM_REG_INT_TABLE					 0x300400
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC0 */
+#define USEM_REG_MSG_NUM_FIC0					 0x300000
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC1 */
+#define USEM_REG_MSG_NUM_FIC1					 0x300004
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC0 */
+#define USEM_REG_MSG_NUM_FOC0					 0x300008
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC1 */
+#define USEM_REG_MSG_NUM_FOC1					 0x30000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC2 */
+#define USEM_REG_MSG_NUM_FOC2					 0x300010
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC3 */
+#define USEM_REG_MSG_NUM_FOC3					 0x300014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+   during run_time by the microcode */
+#define USEM_REG_PAS_DISABLE					 0x30024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define USEM_REG_PASSIVE_BUFFER 				 0x302000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define USEM_REG_PRAM						 0x340000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define USEM_REG_SLEEP_THREADS_VALID				 0x30026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define USEM_REG_SLOW_EXT_STORE_EMPTY				 0x3002a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define USEM_REG_THREADS_LIST					 0x3002e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define USEM_REG_TS_0_AS					 0x300038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define USEM_REG_TS_10_AS					 0x300060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define USEM_REG_TS_11_AS					 0x300064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define USEM_REG_TS_12_AS					 0x300068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define USEM_REG_TS_13_AS					 0x30006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define USEM_REG_TS_14_AS					 0x300070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define USEM_REG_TS_15_AS					 0x300074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define USEM_REG_TS_16_AS					 0x300078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define USEM_REG_TS_17_AS					 0x30007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define USEM_REG_TS_18_AS					 0x300080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define USEM_REG_TS_1_AS					 0x30003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define USEM_REG_TS_2_AS					 0x300040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define USEM_REG_TS_3_AS					 0x300044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define USEM_REG_TS_4_AS					 0x300048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define USEM_REG_TS_5_AS					 0x30004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define USEM_REG_TS_6_AS					 0x300050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define USEM_REG_TS_7_AS					 0x300054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define USEM_REG_TS_8_AS					 0x300058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define USEM_REG_TS_9_AS					 0x30005c
+/* [RW 32] Interrupt mask register #0 read/write */
+#define USEM_REG_USEM_INT_MASK_0				 0x300110
+#define USEM_REG_USEM_INT_MASK_1				 0x300120
+/* [R 32] Interrupt register #0 read */
+#define USEM_REG_USEM_INT_STS_0 				 0x300104
+#define USEM_REG_USEM_INT_STS_1 				 0x300114
+/* [RW 32] Parity mask register #0 read/write */
+#define USEM_REG_USEM_PRTY_MASK_0				 0x300130
+#define USEM_REG_USEM_PRTY_MASK_1				 0x300140
+/* [R 32] Parity register #0 read */
+#define USEM_REG_USEM_PRTY_STS_0				 0x300124
+#define USEM_REG_USEM_PRTY_STS_1				 0x300134
+/* [RC 32] Parity register #0 read clear */
+#define USEM_REG_USEM_PRTY_STS_CLR_0				 0x300128
+#define USEM_REG_USEM_PRTY_STS_CLR_1				 0x300138
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define USEM_REG_VFPF_ERR_NUM					 0x300380
+#define VFC_MEMORIES_RST_REG_CAM_RST				 (0x1<<0)
+#define VFC_MEMORIES_RST_REG_RAM_RST				 (0x1<<1)
+#define VFC_REG_MEMORIES_RST					 0x1943c
+/* [RW 32] Indirect access to AG context with 32-bits granularity. The bits
+ * [12:8] of the address should be the offset within the accessed LCID
+ * context; the bits [7:0] are the accessed LCID.Example: to write to REG10
+ * LCID100. The RBC address should be 13'ha64. */
+#define XCM_REG_AG_CTX						 0x28000
+/* [RW 2] The queue index for registration on Aux1 counter flag. */
+#define XCM_REG_AUX1_Q						 0x20134
+/* [RW 2] Per each decision rule the queue index to register to. */
+#define XCM_REG_AUX_CNT_FLG_Q_19				 0x201b0
+/* [R 5] Used to read the XX protection CAM occupancy counter. */
+#define XCM_REG_CAM_OCCUP					 0x20244
+/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define XCM_REG_CDU_AG_RD_IFEN					 0x20044
+/* [RW 1] CDU AG write Interface enable. If 0 - the request and valid input
+   are disregarded; all other signals are treated as usual; if 1 - normal
+   activity. */
+#define XCM_REG_CDU_AG_WR_IFEN					 0x20040
+/* [RW 1] CDU STORM read Interface enable. If 0 - the request input is
+   disregarded; valid output is deasserted; all other signals are treated as
+   usual; if 1 - normal activity. */
+#define XCM_REG_CDU_SM_RD_IFEN					 0x2004c
+/* [RW 1] CDU STORM write Interface enable. If 0 - the request and valid
+   input is disregarded; all other signals are treated as usual; if 1 -
+   normal activity. */
+#define XCM_REG_CDU_SM_WR_IFEN					 0x20048
+/* [RW 4] CFC output initial credit. Max credit available - 15.Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 1 at start-up. */
+#define XCM_REG_CFC_INIT_CRD					 0x20404
+/* [RW 3] The weight of the CP input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_CP_WEIGHT					 0x200dc
+/* [RW 1] Input csem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_CSEM_IFEN					 0x20028
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the csem interface. */
+#define XCM_REG_CSEM_LENGTH_MIS 				 0x20228
+/* [RW 3] The weight of the input csem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_CSEM_WEIGHT					 0x200c4
+/* [RW 1] Input dorq Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_DORQ_IFEN					 0x20030
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the dorq interface. */
+#define XCM_REG_DORQ_LENGTH_MIS 				 0x20230
+/* [RW 3] The weight of the input dorq in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_DORQ_WEIGHT					 0x200cc
+/* [RW 8] The Event ID in case the ErrorFlg input message bit is set. */
+#define XCM_REG_ERR_EVNT_ID					 0x200b0
+/* [RW 28] The CM erroneous header for QM and Timers formatting. */
+#define XCM_REG_ERR_XCM_HDR					 0x200ac
+/* [RW 8] The Event ID for Timers expiration. */
+#define XCM_REG_EXPR_EVNT_ID					 0x200b4
+/* [RW 8] FIC0 output initial credit. Max credit available - 255.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define XCM_REG_FIC0_INIT_CRD					 0x2040c
+/* [RW 8] FIC1 output initial credit. Max credit available - 255.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 64 at start-up. */
+#define XCM_REG_FIC1_INIT_CRD					 0x20410
+#define XCM_REG_GLB_DEL_ACK_MAX_CNT_0				 0x20118
+#define XCM_REG_GLB_DEL_ACK_MAX_CNT_1				 0x2011c
+#define XCM_REG_GLB_DEL_ACK_TMR_VAL_0				 0x20108
+#define XCM_REG_GLB_DEL_ACK_TMR_VAL_1				 0x2010c
+/* [RW 1] Arbitratiojn between Input Arbiter groups: 0 - fair Round-Robin; 1
+   - strict priority defined by ~xcm_registers_gr_ag_pr.gr_ag_pr;
+   ~xcm_registers_gr_ld0_pr.gr_ld0_pr and
+   ~xcm_registers_gr_ld1_pr.gr_ld1_pr. */
+#define XCM_REG_GR_ARB_TYPE					 0x2020c
+/* [RW 2] Load (FIC0) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed that the Channel group is the
+   compliment of the other 3 groups. */
+#define XCM_REG_GR_LD0_PR					 0x20214
+/* [RW 2] Load (FIC1) channel group priority. The lowest priority is 0; the
+   highest priority is 3. It is supposed that the Channel group is the
+   compliment of the other 3 groups. */
+#define XCM_REG_GR_LD1_PR					 0x20218
+/* [RW 1] Input nig0 Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_NIG0_IFEN					 0x20038
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the nig0 interface. */
+#define XCM_REG_NIG0_LENGTH_MIS 				 0x20238
+/* [RW 3] The weight of the input nig0 in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_NIG0_WEIGHT					 0x200d4
+/* [RW 1] Input nig1 Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_NIG1_IFEN					 0x2003c
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the nig1 interface. */
+#define XCM_REG_NIG1_LENGTH_MIS 				 0x2023c
+/* [RW 5] The number of double REG-pairs; loaded from the STORM context and
+   sent to STORM; for a specific connection type. The double REG-pairs are
+   used in order to align to STORM context row size of 128 bits. The offset
+   of these data in the STORM context is always 0. Index _i stands for the
+   connection type (one of 16). */
+#define XCM_REG_N_SM_CTX_LD_0					 0x20060
+#define XCM_REG_N_SM_CTX_LD_1					 0x20064
+#define XCM_REG_N_SM_CTX_LD_2					 0x20068
+#define XCM_REG_N_SM_CTX_LD_3					 0x2006c
+#define XCM_REG_N_SM_CTX_LD_4					 0x20070
+#define XCM_REG_N_SM_CTX_LD_5					 0x20074
+/* [RW 1] Input pbf Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define XCM_REG_PBF_IFEN					 0x20034
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the pbf interface. */
+#define XCM_REG_PBF_LENGTH_MIS					 0x20234
+/* [RW 3] The weight of the input pbf in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_PBF_WEIGHT					 0x200d0
+#define XCM_REG_PHYS_QNUM3_0					 0x20100
+#define XCM_REG_PHYS_QNUM3_1					 0x20104
+/* [RW 8] The Event ID for Timers formatting in case of stop done. */
+#define XCM_REG_STOP_EVNT_ID					 0x200b8
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the STORM interface. */
+#define XCM_REG_STORM_LENGTH_MIS				 0x2021c
+/* [RW 3] The weight of the STORM input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_STORM_WEIGHT					 0x200bc
+/* [RW 1] STORM - CM Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_STORM_XCM_IFEN					 0x20010
+/* [RW 4] Timers output initial credit. Max credit available - 15.Write
+   writes the initial credit value; read returns the current value of the
+   credit counter. Must be initialized to 4 at start-up. */
+#define XCM_REG_TM_INIT_CRD					 0x2041c
+/* [RW 3] The weight of the Timers input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_TM_WEIGHT					 0x200ec
+/* [RW 28] The CM header for Timers expiration command. */
+#define XCM_REG_TM_XCM_HDR					 0x200a8
+/* [RW 1] Timers - CM Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_TM_XCM_IFEN					 0x2001c
+/* [RW 1] Input tsem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_TSEM_IFEN					 0x20024
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the tsem interface. */
+#define XCM_REG_TSEM_LENGTH_MIS 				 0x20224
+/* [RW 3] The weight of the input tsem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_TSEM_WEIGHT					 0x200c0
+/* [RW 2] The queue index for registration on UNA greater NXT decision rule. */
+#define XCM_REG_UNA_GT_NXT_Q					 0x20120
+/* [RW 1] Input usem Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_USEM_IFEN					 0x2002c
+/* [RC 1] Message length mismatch (relative to last indication) at the usem
+   interface. */
+#define XCM_REG_USEM_LENGTH_MIS 				 0x2022c
+/* [RW 3] The weight of the input usem in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_USEM_WEIGHT					 0x200c8
+#define XCM_REG_WU_DA_CNT_CMD00 				 0x201d4
+#define XCM_REG_WU_DA_CNT_CMD01 				 0x201d8
+#define XCM_REG_WU_DA_CNT_CMD10 				 0x201dc
+#define XCM_REG_WU_DA_CNT_CMD11 				 0x201e0
+#define XCM_REG_WU_DA_CNT_UPD_VAL00				 0x201e4
+#define XCM_REG_WU_DA_CNT_UPD_VAL01				 0x201e8
+#define XCM_REG_WU_DA_CNT_UPD_VAL10				 0x201ec
+#define XCM_REG_WU_DA_CNT_UPD_VAL11				 0x201f0
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00			 0x201c4
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD01			 0x201c8
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD10			 0x201cc
+#define XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD11			 0x201d0
+/* [RW 1] CM - CFC Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define XCM_REG_XCM_CFC_IFEN					 0x20050
+/* [RW 14] Interrupt mask register #0 read/write */
+#define XCM_REG_XCM_INT_MASK					 0x202b4
+/* [R 14] Interrupt register #0 read */
+#define XCM_REG_XCM_INT_STS					 0x202a8
+/* [RW 30] Parity mask register #0 read/write */
+#define XCM_REG_XCM_PRTY_MASK					 0x202c4
+/* [R 30] Parity register #0 read */
+#define XCM_REG_XCM_PRTY_STS					 0x202b8
+/* [RC 30] Parity register #0 read clear */
+#define XCM_REG_XCM_PRTY_STS_CLR				 0x202bc
+
+/* [RW 4] The size of AG context region 0 in REG-pairs. Designates the MS
+   REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
+   Is used to determine the number of the AG context REG-pairs written back;
+   when the Reg1WbFlg isn't set. */
+#define XCM_REG_XCM_REG0_SZ					 0x200f4
+/* [RW 1] CM - STORM 0 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define XCM_REG_XCM_STORM0_IFEN 				 0x20004
+/* [RW 1] CM - STORM 1 Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define XCM_REG_XCM_STORM1_IFEN 				 0x20008
+/* [RW 1] CM - Timers Interface enable. If 0 - the valid input is
+   disregarded; acknowledge output is deasserted; all other signals are
+   treated as usual; if 1 - normal activity. */
+#define XCM_REG_XCM_TM_IFEN					 0x20020
+/* [RW 1] CM - QM Interface enable. If 0 - the acknowledge input is
+   disregarded; valid is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define XCM_REG_XCM_XQM_IFEN					 0x2000c
+/* [RW 1] If set the Q index; received from the QM is inserted to event ID. */
+#define XCM_REG_XCM_XQM_USE_Q					 0x200f0
+/* [RW 4] The value by which CFC updates the activity counter at QM bypass. */
+#define XCM_REG_XQM_BYP_ACT_UPD 				 0x200fc
+/* [RW 6] QM output initial credit. Max credit available - 32.Write writes
+   the initial credit value; read returns the current value of the credit
+   counter. Must be initialized to 32 at start-up. */
+#define XCM_REG_XQM_INIT_CRD					 0x20420
+/* [RW 3] The weight of the QM (primary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XQM_P_WEIGHT					 0x200e4
+/* [RW 3] The weight of the QM (secondary) input in the WRR mechanism. 0
+   stands for weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XQM_S_WEIGHT					 0x200e8
+/* [RW 28] The CM header value for QM request (primary). */
+#define XCM_REG_XQM_XCM_HDR_P					 0x200a0
+/* [RW 28] The CM header value for QM request (secondary). */
+#define XCM_REG_XQM_XCM_HDR_S					 0x200a4
+/* [RW 1] QM - CM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define XCM_REG_XQM_XCM_IFEN					 0x20014
+/* [RW 1] Input SDM Interface enable. If 0 - the valid input is disregarded;
+   acknowledge output is deasserted; all other signals are treated as usual;
+   if 1 - normal activity. */
+#define XCM_REG_XSDM_IFEN					 0x20018
+/* [RC 1] Set at message length mismatch (relative to last indication) at
+   the SDM interface. */
+#define XCM_REG_XSDM_LENGTH_MIS 				 0x20220
+/* [RW 3] The weight of the SDM input in the WRR mechanism. 0 stands for
+   weight 8 (the most prioritised); 1 stands for weight 1(least
+   prioritised); 2 stands for weight 2; tc. */
+#define XCM_REG_XSDM_WEIGHT					 0x200e0
+/* [RW 17] Indirect access to the descriptor table of the XX protection
+   mechanism. The fields are: [5:0] - message length; 11:6] - message
+   pointer; 16:12] - next pointer. */
+#define XCM_REG_XX_DESCR_TABLE					 0x20480
+#define XCM_REG_XX_DESCR_TABLE_SIZE				 32
+/* [R 6] Used to read the XX protection Free counter. */
+#define XCM_REG_XX_FREE 					 0x20240
+/* [RW 6] Initial value for the credit counter; responsible for fulfilling
+   of the Input Stage XX protection buffer by the XX protection pending
+   messages. Max credit available - 3.Write writes the initial credit value;
+   read returns the current value of the credit counter. Must be initialized
+   to 2 at start-up. */
+#define XCM_REG_XX_INIT_CRD					 0x20424
+/* [RW 6] The maximum number of pending messages; which may be stored in XX
+   protection. ~xcm_registers_xx_free.xx_free read on read. */
+#define XCM_REG_XX_MSG_NUM					 0x20428
+/* [RW 8] The Event ID; sent to the STORM in case of XX overflow. */
+#define XCM_REG_XX_OVFL_EVNT_ID 				 0x20058
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_LOCAL_FAULT_STATUS	 (0x1<<0)
+#define XMAC_CLEAR_RX_LSS_STATUS_REG_CLEAR_REMOTE_FAULT_STATUS	 (0x1<<1)
+#define XMAC_CTRL_REG_CORE_LOCAL_LPBK				 (0x1<<3)
+#define XMAC_CTRL_REG_RX_EN					 (0x1<<1)
+#define XMAC_CTRL_REG_SOFT_RESET				 (0x1<<6)
+#define XMAC_CTRL_REG_TX_EN					 (0x1<<0)
+#define XMAC_PAUSE_CTRL_REG_RX_PAUSE_EN				 (0x1<<18)
+#define XMAC_PAUSE_CTRL_REG_TX_PAUSE_EN				 (0x1<<17)
+#define XMAC_PFC_CTRL_HI_REG_PFC_REFRESH_EN			 (0x1<<0)
+#define XMAC_PFC_CTRL_HI_REG_PFC_STATS_EN			 (0x1<<3)
+#define XMAC_PFC_CTRL_HI_REG_RX_PFC_EN				 (0x1<<4)
+#define XMAC_PFC_CTRL_HI_REG_TX_PFC_EN				 (0x1<<5)
+#define XMAC_REG_CLEAR_RX_LSS_STATUS				 0x60
+#define XMAC_REG_CTRL						 0
+/* [RW 16] Upper 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
+ * packets transmitted by the MAC */
+#define XMAC_REG_CTRL_SA_HI					 0x2c
+/* [RW 32] Lower 48 bits of ctrl_sa register. Used as the SA in PAUSE/PFC
+ * packets transmitted by the MAC */
+#define XMAC_REG_CTRL_SA_LO					 0x28
+#define XMAC_REG_PAUSE_CTRL					 0x68
+#define XMAC_REG_PFC_CTRL					 0x70
+#define XMAC_REG_PFC_CTRL_HI					 0x74
+#define XMAC_REG_RX_LSS_STATUS					 0x58
+/* [RW 14] Maximum packet size in receive direction; exclusive of preamble &
+ * CRC in strip mode */
+#define XMAC_REG_RX_MAX_SIZE					 0x40
+#define XMAC_REG_TX_CTRL					 0x20
+/* [RW 16] Indirect access to the XX table of the XX protection mechanism.
+   The fields are:[4:0] - tail pointer; 9:5] - Link List size; 14:10] -
+   header pointer. */
+#define XCM_REG_XX_TABLE					 0x20500
+/* [RW 8] The event id for aggregated interrupt 0 */
+#define XSDM_REG_AGG_INT_EVENT_0				 0x166038
+#define XSDM_REG_AGG_INT_EVENT_1				 0x16603c
+#define XSDM_REG_AGG_INT_EVENT_10				 0x166060
+#define XSDM_REG_AGG_INT_EVENT_11				 0x166064
+#define XSDM_REG_AGG_INT_EVENT_12				 0x166068
+#define XSDM_REG_AGG_INT_EVENT_13				 0x16606c
+#define XSDM_REG_AGG_INT_EVENT_14				 0x166070
+#define XSDM_REG_AGG_INT_EVENT_2				 0x166040
+#define XSDM_REG_AGG_INT_EVENT_3				 0x166044
+#define XSDM_REG_AGG_INT_EVENT_4				 0x166048
+#define XSDM_REG_AGG_INT_EVENT_5				 0x16604c
+#define XSDM_REG_AGG_INT_EVENT_6				 0x166050
+#define XSDM_REG_AGG_INT_EVENT_7				 0x166054
+#define XSDM_REG_AGG_INT_EVENT_8				 0x166058
+#define XSDM_REG_AGG_INT_EVENT_9				 0x16605c
+/* [RW 1] For each aggregated interrupt index whether the mode is normal (0)
+   or auto-mask-mode (1) */
+#define XSDM_REG_AGG_INT_MODE_0 				 0x1661b8
+#define XSDM_REG_AGG_INT_MODE_1 				 0x1661bc
+/* [RW 13] The start address in the internal RAM for the cfc_rsp lcid */
+#define XSDM_REG_CFC_RSP_START_ADDR				 0x166008
+/* [RW 16] The maximum value of the completion counter #0 */
+#define XSDM_REG_CMP_COUNTER_MAX0				 0x16601c
+/* [RW 16] The maximum value of the completion counter #1 */
+#define XSDM_REG_CMP_COUNTER_MAX1				 0x166020
+/* [RW 16] The maximum value of the completion counter #2 */
+#define XSDM_REG_CMP_COUNTER_MAX2				 0x166024
+/* [RW 16] The maximum value of the completion counter #3 */
+#define XSDM_REG_CMP_COUNTER_MAX3				 0x166028
+/* [RW 13] The start address in the internal RAM for the completion
+   counters. */
+#define XSDM_REG_CMP_COUNTER_START_ADDR 			 0x16600c
+#define XSDM_REG_ENABLE_IN1					 0x166238
+#define XSDM_REG_ENABLE_IN2					 0x16623c
+#define XSDM_REG_ENABLE_OUT1					 0x166240
+#define XSDM_REG_ENABLE_OUT2					 0x166244
+/* [RW 4] The initial number of messages that can be sent to the pxp control
+   interface without receiving any ACK. */
+#define XSDM_REG_INIT_CREDIT_PXP_CTRL				 0x1664bc
+/* [ST 32] The number of ACK after placement messages received */
+#define XSDM_REG_NUM_OF_ACK_AFTER_PLACE 			 0x16627c
+/* [ST 32] The number of packet end messages received from the parser */
+#define XSDM_REG_NUM_OF_PKT_END_MSG				 0x166274
+/* [ST 32] The number of requests received from the pxp async if */
+#define XSDM_REG_NUM_OF_PXP_ASYNC_REQ				 0x166278
+/* [ST 32] The number of commands received in queue 0 */
+#define XSDM_REG_NUM_OF_Q0_CMD					 0x166248
+/* [ST 32] The number of commands received in queue 10 */
+#define XSDM_REG_NUM_OF_Q10_CMD 				 0x16626c
+/* [ST 32] The number of commands received in queue 11 */
+#define XSDM_REG_NUM_OF_Q11_CMD 				 0x166270
+/* [ST 32] The number of commands received in queue 1 */
+#define XSDM_REG_NUM_OF_Q1_CMD					 0x16624c
+/* [ST 32] The number of commands received in queue 3 */
+#define XSDM_REG_NUM_OF_Q3_CMD					 0x166250
+/* [ST 32] The number of commands received in queue 4 */
+#define XSDM_REG_NUM_OF_Q4_CMD					 0x166254
+/* [ST 32] The number of commands received in queue 5 */
+#define XSDM_REG_NUM_OF_Q5_CMD					 0x166258
+/* [ST 32] The number of commands received in queue 6 */
+#define XSDM_REG_NUM_OF_Q6_CMD					 0x16625c
+/* [ST 32] The number of commands received in queue 7 */
+#define XSDM_REG_NUM_OF_Q7_CMD					 0x166260
+/* [ST 32] The number of commands received in queue 8 */
+#define XSDM_REG_NUM_OF_Q8_CMD					 0x166264
+/* [ST 32] The number of commands received in queue 9 */
+#define XSDM_REG_NUM_OF_Q9_CMD					 0x166268
+/* [RW 13] The start address in the internal RAM for queue counters */
+#define XSDM_REG_Q_COUNTER_START_ADDR				 0x166010
+/* [W 17] Generate an operation after completion; bit-16 is
+ * AggVectIdx_valid; bits 15:8 are AggVectIdx; bits 7:5 are the TRIG and
+ * bits 4:0 are the T124Param[4:0] */
+#define XSDM_REG_OPERATION_GEN					 0x1664c4
+/* [R 1] pxp_ctrl rd_data fifo empty in sdm_dma_rsp block */
+#define XSDM_REG_RSP_PXP_CTRL_RDATA_EMPTY			 0x166548
+/* [R 1] parser fifo empty in sdm_sync block */
+#define XSDM_REG_SYNC_PARSER_EMPTY				 0x166550
+/* [R 1] parser serial fifo empty in sdm_sync block */
+#define XSDM_REG_SYNC_SYNC_EMPTY				 0x166558
+/* [RW 32] Tick for timer counter. Applicable only when
+   ~xsdm_registers_timer_tick_enable.timer_tick_enable =1 */
+#define XSDM_REG_TIMER_TICK					 0x166000
+/* [RW 32] Interrupt mask register #0 read/write */
+#define XSDM_REG_XSDM_INT_MASK_0				 0x16629c
+#define XSDM_REG_XSDM_INT_MASK_1				 0x1662ac
+/* [R 32] Interrupt register #0 read */
+#define XSDM_REG_XSDM_INT_STS_0 				 0x166290
+#define XSDM_REG_XSDM_INT_STS_1 				 0x1662a0
+/* [RW 11] Parity mask register #0 read/write */
+#define XSDM_REG_XSDM_PRTY_MASK 				 0x1662bc
+/* [R 11] Parity register #0 read */
+#define XSDM_REG_XSDM_PRTY_STS					 0x1662b0
+/* [RC 11] Parity register #0 read clear */
+#define XSDM_REG_XSDM_PRTY_STS_CLR				 0x1662b4
+/* [RW 5] The number of time_slots in the arbitration cycle */
+#define XSEM_REG_ARB_CYCLE_SIZE 				 0x280034
+/* [RW 3] The source that is associated with arbitration element 0. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2 */
+#define XSEM_REG_ARB_ELEMENT0					 0x280020
+/* [RW 3] The source that is associated with arbitration element 1. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~xsem_registers_arb_element0.arb_element0 */
+#define XSEM_REG_ARB_ELEMENT1					 0x280024
+/* [RW 3] The source that is associated with arbitration element 2. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~xsem_registers_arb_element0.arb_element0
+   and ~xsem_registers_arb_element1.arb_element1 */
+#define XSEM_REG_ARB_ELEMENT2					 0x280028
+/* [RW 3] The source that is associated with arbitration element 3. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.Could
+   not be equal to register ~xsem_registers_arb_element0.arb_element0 and
+   ~xsem_registers_arb_element1.arb_element1 and
+   ~xsem_registers_arb_element2.arb_element2 */
+#define XSEM_REG_ARB_ELEMENT3					 0x28002c
+/* [RW 3] The source that is associated with arbitration element 4. Source
+   decoding is: 0- foc0; 1-fic1; 2-sleeping thread with priority 0; 3-
+   sleeping thread with priority 1; 4- sleeping thread with priority 2.
+   Could not be equal to register ~xsem_registers_arb_element0.arb_element0
+   and ~xsem_registers_arb_element1.arb_element1 and
+   ~xsem_registers_arb_element2.arb_element2 and
+   ~xsem_registers_arb_element3.arb_element3 */
+#define XSEM_REG_ARB_ELEMENT4					 0x280030
+#define XSEM_REG_ENABLE_IN					 0x2800a4
+#define XSEM_REG_ENABLE_OUT					 0x2800a8
+/* [RW 32] This address space contains all registers and memories that are
+   placed in SEM_FAST block. The SEM_FAST registers are described in
+   appendix B. In order to access the sem_fast registers the base address
+   ~fast_memory.fast_memory should be added to eachsem_fast register offset. */
+#define XSEM_REG_FAST_MEMORY					 0x2a0000
+/* [RW 1] Disables input messages from FIC0 May be updated during run_time
+   by the microcode */
+#define XSEM_REG_FIC0_DISABLE					 0x280224
+/* [RW 1] Disables input messages from FIC1 May be updated during run_time
+   by the microcode */
+#define XSEM_REG_FIC1_DISABLE					 0x280234
+/* [RW 15] Interrupt table Read and write access to it is not possible in
+   the middle of the work */
+#define XSEM_REG_INT_TABLE					 0x280400
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC0 */
+#define XSEM_REG_MSG_NUM_FIC0					 0x280000
+/* [ST 24] Statistics register. The number of messages that entered through
+   FIC1 */
+#define XSEM_REG_MSG_NUM_FIC1					 0x280004
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC0 */
+#define XSEM_REG_MSG_NUM_FOC0					 0x280008
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC1 */
+#define XSEM_REG_MSG_NUM_FOC1					 0x28000c
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC2 */
+#define XSEM_REG_MSG_NUM_FOC2					 0x280010
+/* [ST 24] Statistics register. The number of messages that were sent to
+   FOC3 */
+#define XSEM_REG_MSG_NUM_FOC3					 0x280014
+/* [RW 1] Disables input messages from the passive buffer May be updated
+   during run_time by the microcode */
+#define XSEM_REG_PAS_DISABLE					 0x28024c
+/* [WB 128] Debug only. Passive buffer memory */
+#define XSEM_REG_PASSIVE_BUFFER 				 0x282000
+/* [WB 46] pram memory. B45 is parity; b[44:0] - data. */
+#define XSEM_REG_PRAM						 0x2c0000
+/* [R 16] Valid sleeping threads indication have bit per thread */
+#define XSEM_REG_SLEEP_THREADS_VALID				 0x28026c
+/* [R 1] EXT_STORE FIFO is empty in sem_slow_ls_ext */
+#define XSEM_REG_SLOW_EXT_STORE_EMPTY				 0x2802a0
+/* [RW 16] List of free threads . There is a bit per thread. */
+#define XSEM_REG_THREADS_LIST					 0x2802e4
+/* [RW 3] The arbitration scheme of time_slot 0 */
+#define XSEM_REG_TS_0_AS					 0x280038
+/* [RW 3] The arbitration scheme of time_slot 10 */
+#define XSEM_REG_TS_10_AS					 0x280060
+/* [RW 3] The arbitration scheme of time_slot 11 */
+#define XSEM_REG_TS_11_AS					 0x280064
+/* [RW 3] The arbitration scheme of time_slot 12 */
+#define XSEM_REG_TS_12_AS					 0x280068
+/* [RW 3] The arbitration scheme of time_slot 13 */
+#define XSEM_REG_TS_13_AS					 0x28006c
+/* [RW 3] The arbitration scheme of time_slot 14 */
+#define XSEM_REG_TS_14_AS					 0x280070
+/* [RW 3] The arbitration scheme of time_slot 15 */
+#define XSEM_REG_TS_15_AS					 0x280074
+/* [RW 3] The arbitration scheme of time_slot 16 */
+#define XSEM_REG_TS_16_AS					 0x280078
+/* [RW 3] The arbitration scheme of time_slot 17 */
+#define XSEM_REG_TS_17_AS					 0x28007c
+/* [RW 3] The arbitration scheme of time_slot 18 */
+#define XSEM_REG_TS_18_AS					 0x280080
+/* [RW 3] The arbitration scheme of time_slot 1 */
+#define XSEM_REG_TS_1_AS					 0x28003c
+/* [RW 3] The arbitration scheme of time_slot 2 */
+#define XSEM_REG_TS_2_AS					 0x280040
+/* [RW 3] The arbitration scheme of time_slot 3 */
+#define XSEM_REG_TS_3_AS					 0x280044
+/* [RW 3] The arbitration scheme of time_slot 4 */
+#define XSEM_REG_TS_4_AS					 0x280048
+/* [RW 3] The arbitration scheme of time_slot 5 */
+#define XSEM_REG_TS_5_AS					 0x28004c
+/* [RW 3] The arbitration scheme of time_slot 6 */
+#define XSEM_REG_TS_6_AS					 0x280050
+/* [RW 3] The arbitration scheme of time_slot 7 */
+#define XSEM_REG_TS_7_AS					 0x280054
+/* [RW 3] The arbitration scheme of time_slot 8 */
+#define XSEM_REG_TS_8_AS					 0x280058
+/* [RW 3] The arbitration scheme of time_slot 9 */
+#define XSEM_REG_TS_9_AS					 0x28005c
+/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
+ * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
+#define XSEM_REG_VFPF_ERR_NUM					 0x280380
+/* [RW 32] Interrupt mask register #0 read/write */
+#define XSEM_REG_XSEM_INT_MASK_0				 0x280110
+#define XSEM_REG_XSEM_INT_MASK_1				 0x280120
+/* [R 32] Interrupt register #0 read */
+#define XSEM_REG_XSEM_INT_STS_0 				 0x280104
+#define XSEM_REG_XSEM_INT_STS_1 				 0x280114
+/* [RW 32] Parity mask register #0 read/write */
+#define XSEM_REG_XSEM_PRTY_MASK_0				 0x280130
+#define XSEM_REG_XSEM_PRTY_MASK_1				 0x280140
+/* [R 32] Parity register #0 read */
+#define XSEM_REG_XSEM_PRTY_STS_0				 0x280124
+#define XSEM_REG_XSEM_PRTY_STS_1				 0x280134
+/* [RC 32] Parity register #0 read clear */
+#define XSEM_REG_XSEM_PRTY_STS_CLR_0				 0x280128
+#define XSEM_REG_XSEM_PRTY_STS_CLR_1				 0x280138
+#define MCPR_NVM_ACCESS_ENABLE_EN				 (1L<<0)
+#define MCPR_NVM_ACCESS_ENABLE_WR_EN				 (1L<<1)
+#define MCPR_NVM_ADDR_NVM_ADDR_VALUE				 (0xffffffL<<0)
+#define MCPR_NVM_CFG4_FLASH_SIZE				 (0x7L<<0)
+#define MCPR_NVM_COMMAND_DOIT					 (1L<<4)
+#define MCPR_NVM_COMMAND_DONE					 (1L<<3)
+#define MCPR_NVM_COMMAND_FIRST					 (1L<<7)
+#define MCPR_NVM_COMMAND_LAST					 (1L<<8)
+#define MCPR_NVM_COMMAND_WR					 (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_ARB1				 (1L<<9)
+#define MCPR_NVM_SW_ARB_ARB_REQ_CLR1				 (1L<<5)
+#define MCPR_NVM_SW_ARB_ARB_REQ_SET1				 (1L<<1)
+#define BIGMAC_REGISTER_BMAC_CONTROL				 (0x00<<3)
+#define BIGMAC_REGISTER_BMAC_XGXS_CONTROL			 (0x01<<3)
+#define BIGMAC_REGISTER_CNT_MAX_SIZE				 (0x05<<3)
+#define BIGMAC_REGISTER_RX_CONTROL				 (0x21<<3)
+#define BIGMAC_REGISTER_RX_LLFC_MSG_FLDS			 (0x46<<3)
+#define BIGMAC_REGISTER_RX_LSS_STATUS				 (0x43<<3)
+#define BIGMAC_REGISTER_RX_MAX_SIZE				 (0x23<<3)
+#define BIGMAC_REGISTER_RX_STAT_GR64				 (0x26<<3)
+#define BIGMAC_REGISTER_RX_STAT_GRIPJ				 (0x42<<3)
+#define BIGMAC_REGISTER_TX_CONTROL				 (0x07<<3)
+#define BIGMAC_REGISTER_TX_MAX_SIZE				 (0x09<<3)
+#define BIGMAC_REGISTER_TX_PAUSE_THRESHOLD			 (0x0A<<3)
+#define BIGMAC_REGISTER_TX_SOURCE_ADDR				 (0x08<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTBYT				 (0x20<<3)
+#define BIGMAC_REGISTER_TX_STAT_GTPKT				 (0x0C<<3)
+#define BIGMAC2_REGISTER_BMAC_CONTROL				 (0x00<<3)
+#define BIGMAC2_REGISTER_BMAC_XGXS_CONTROL			 (0x01<<3)
+#define BIGMAC2_REGISTER_CNT_MAX_SIZE				 (0x05<<3)
+#define BIGMAC2_REGISTER_PFC_CONTROL				 (0x06<<3)
+#define BIGMAC2_REGISTER_RX_CONTROL				 (0x3A<<3)
+#define BIGMAC2_REGISTER_RX_LLFC_MSG_FLDS			 (0x62<<3)
+#define BIGMAC2_REGISTER_RX_LSS_STAT				 (0x3E<<3)
+#define BIGMAC2_REGISTER_RX_MAX_SIZE				 (0x3C<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GR64				 (0x40<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRIPJ				 (0x5f<<3)
+#define BIGMAC2_REGISTER_RX_STAT_GRPP				 (0x51<<3)
+#define BIGMAC2_REGISTER_TX_CONTROL				 (0x1C<<3)
+#define BIGMAC2_REGISTER_TX_MAX_SIZE				 (0x1E<<3)
+#define BIGMAC2_REGISTER_TX_PAUSE_CONTROL			 (0x20<<3)
+#define BIGMAC2_REGISTER_TX_SOURCE_ADDR			 (0x1D<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTBYT				 (0x39<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPOK				 (0x22<<3)
+#define BIGMAC2_REGISTER_TX_STAT_GTPP				 (0x24<<3)
+#define EMAC_LED_1000MB_OVERRIDE				 (1L<<1)
+#define EMAC_LED_100MB_OVERRIDE 				 (1L<<2)
+#define EMAC_LED_10MB_OVERRIDE					 (1L<<3)
+#define EMAC_LED_2500MB_OVERRIDE				 (1L<<12)
+#define EMAC_LED_OVERRIDE					 (1L<<0)
+#define EMAC_LED_TRAFFIC					 (1L<<6)
+#define EMAC_MDIO_COMM_COMMAND_ADDRESS				 (0L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_22				 (2L<<26)
+#define EMAC_MDIO_COMM_COMMAND_READ_45				 (3L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_22				 (1L<<26)
+#define EMAC_MDIO_COMM_COMMAND_WRITE_45 			 (1L<<26)
+#define EMAC_MDIO_COMM_DATA					 (0xffffL<<0)
+#define EMAC_MDIO_COMM_START_BUSY				 (1L<<29)
+#define EMAC_MDIO_MODE_AUTO_POLL				 (1L<<4)
+#define EMAC_MDIO_MODE_CLAUSE_45				 (1L<<31)
+#define EMAC_MDIO_MODE_CLOCK_CNT				 (0x3ffL<<16)
+#define EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT			 16
+#define EMAC_MDIO_STATUS_10MB					 (1L<<1)
+#define EMAC_MODE_25G_MODE					 (1L<<5)
+#define EMAC_MODE_HALF_DUPLEX					 (1L<<1)
+#define EMAC_MODE_PORT_GMII					 (2L<<2)
+#define EMAC_MODE_PORT_MII					 (1L<<2)
+#define EMAC_MODE_PORT_MII_10M					 (3L<<2)
+#define EMAC_MODE_RESET 					 (1L<<0)
+#define EMAC_REG_EMAC_LED					 0xc
+#define EMAC_REG_EMAC_MAC_MATCH 				 0x10
+#define EMAC_REG_EMAC_MDIO_COMM 				 0xac
+#define EMAC_REG_EMAC_MDIO_MODE 				 0xb4
+#define EMAC_REG_EMAC_MDIO_STATUS				 0xb0
+#define EMAC_REG_EMAC_MODE					 0x0
+#define EMAC_REG_EMAC_RX_MODE					 0xc8
+#define EMAC_REG_EMAC_RX_MTU_SIZE				 0x9c
+#define EMAC_REG_EMAC_RX_STAT_AC				 0x180
+#define EMAC_REG_EMAC_RX_STAT_AC_28				 0x1f4
+#define EMAC_REG_EMAC_RX_STAT_AC_COUNT				 23
+#define EMAC_REG_EMAC_TX_MODE					 0xbc
+#define EMAC_REG_EMAC_TX_STAT_AC				 0x280
+#define EMAC_REG_EMAC_TX_STAT_AC_COUNT				 22
+#define EMAC_REG_RX_PFC_MODE					 0x320
+#define EMAC_REG_RX_PFC_MODE_PRIORITIES				 (1L<<2)
+#define EMAC_REG_RX_PFC_MODE_RX_EN				 (1L<<1)
+#define EMAC_REG_RX_PFC_MODE_TX_EN				 (1L<<0)
+#define EMAC_REG_RX_PFC_PARAM					 0x324
+#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT			 0
+#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT		 16
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD				 0x328
+#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT			 (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT				 0x330
+#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT			 (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD				 0x32c
+#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT			 (0xffff<<0)
+#define EMAC_REG_RX_PFC_STATS_XON_SENT				 0x334
+#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT			 (0xffff<<0)
+#define EMAC_RX_MODE_FLOW_EN					 (1L<<2)
+#define EMAC_RX_MODE_KEEP_MAC_CONTROL				 (1L<<3)
+#define EMAC_RX_MODE_KEEP_VLAN_TAG				 (1L<<10)
+#define EMAC_RX_MODE_PROMISCUOUS				 (1L<<8)
+#define EMAC_RX_MODE_RESET					 (1L<<0)
+#define EMAC_RX_MTU_SIZE_JUMBO_ENA				 (1L<<31)
+#define EMAC_TX_MODE_EXT_PAUSE_EN				 (1L<<3)
+#define EMAC_TX_MODE_FLOW_EN					 (1L<<4)
+#define EMAC_TX_MODE_RESET					 (1L<<0)
+#define MISC_REGISTERS_GPIO_0					 0
+#define MISC_REGISTERS_GPIO_1					 1
+#define MISC_REGISTERS_GPIO_2					 2
+#define MISC_REGISTERS_GPIO_3					 3
+#define MISC_REGISTERS_GPIO_CLR_POS				 16
+#define MISC_REGISTERS_GPIO_FLOAT				 (0xffL<<24)
+#define MISC_REGISTERS_GPIO_FLOAT_POS				 24
+#define MISC_REGISTERS_GPIO_HIGH				 1
+#define MISC_REGISTERS_GPIO_INPUT_HI_Z				 2
+#define MISC_REGISTERS_GPIO_INT_CLR_POS 			 24
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_CLR			 0
+#define MISC_REGISTERS_GPIO_INT_OUTPUT_SET			 1
+#define MISC_REGISTERS_GPIO_INT_SET_POS 			 16
+#define MISC_REGISTERS_GPIO_LOW 				 0
+#define MISC_REGISTERS_GPIO_OUTPUT_HIGH 			 1
+#define MISC_REGISTERS_GPIO_OUTPUT_LOW				 0
+#define MISC_REGISTERS_GPIO_PORT_SHIFT				 4
+#define MISC_REGISTERS_GPIO_SET_POS				 8
+#define MISC_REGISTERS_RESET_REG_1_CLEAR			 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_HC			 (0x1<<29)
+#define MISC_REGISTERS_RESET_REG_1_RST_NIG			 (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXP			 (0x1<<26)
+#define MISC_REGISTERS_RESET_REG_1_RST_PXPV			 (0x1<<27)
+#define MISC_REGISTERS_RESET_REG_1_SET				 0x584
+#define MISC_REGISTERS_RESET_REG_2_CLEAR			 0x598
+#define MISC_REGISTERS_RESET_REG_2_MSTAT0			 (0x1<<24)
+#define MISC_REGISTERS_RESET_REG_2_MSTAT1			 (0x1<<25)
+#define MISC_REGISTERS_RESET_REG_2_PGLC				 (0x1<<19)
+#define MISC_REGISTERS_RESET_REG_2_RST_ATC			 (0x1<<17)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0			 (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_2_RST_BMAC1			 (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0			 (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE		 (0x1<<14)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1			 (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE		 (0x1<<15)
+#define MISC_REGISTERS_RESET_REG_2_RST_GRC			 (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B	 (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE	 (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU	 (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_2_RST_MDIO			 (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE		 (0x1<<11)
+#define MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO			 (0x1<<13)
+#define MISC_REGISTERS_RESET_REG_2_RST_RBCN			 (0x1<<9)
+#define MISC_REGISTERS_RESET_REG_2_SET				 0x594
+#define MISC_REGISTERS_RESET_REG_2_UMAC0			 (0x1<<20)
+#define MISC_REGISTERS_RESET_REG_2_UMAC1			 (0x1<<21)
+#define MISC_REGISTERS_RESET_REG_2_XMAC				 (0x1<<22)
+#define MISC_REGISTERS_RESET_REG_2_XMAC_SOFT			 (0x1<<23)
+#define MISC_REGISTERS_RESET_REG_3_CLEAR			 0x5a8
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ	 (0x1<<1)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN	 (0x1<<2)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_PWRDWN_SD (0x1<<3)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_RSTB_HW  (0x1<<0)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_IDDQ	 (0x1<<5)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN	 (0x1<<6)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_PWRDWN_SD  (0x1<<7)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_RSTB_HW	 (0x1<<4)
+#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_XGXS0_TXD_FIFO_RSTB (0x1<<8)
+#define MISC_REGISTERS_RESET_REG_3_SET				 0x5a4
+#define MISC_REGISTERS_SPIO_4					 4
+#define MISC_REGISTERS_SPIO_5					 5
+#define MISC_REGISTERS_SPIO_7					 7
+#define MISC_REGISTERS_SPIO_CLR_POS				 16
+#define MISC_REGISTERS_SPIO_FLOAT				 (0xffL<<24)
+#define MISC_REGISTERS_SPIO_FLOAT_POS				 24
+#define MISC_REGISTERS_SPIO_INPUT_HI_Z				 2
+#define MISC_REGISTERS_SPIO_INT_OLD_SET_POS			 16
+#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 			 1
+#define MISC_REGISTERS_SPIO_OUTPUT_LOW				 0
+#define MISC_REGISTERS_SPIO_SET_POS				 8
+#define HW_LOCK_DRV_FLAGS					 10
+#define HW_LOCK_MAX_RESOURCE_VALUE				 31
+#define HW_LOCK_RESOURCE_GPIO					 1
+#define HW_LOCK_RESOURCE_MDIO					 0
+#define HW_LOCK_RESOURCE_PORT0_ATT_MASK				 3
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0			 8
+#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1			 9
+#define HW_LOCK_RESOURCE_SPIO					 2
+#define HW_LOCK_RESOURCE_UNDI					 5
+#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT			 (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR			 (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR			 (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_CCM_HW_INTERRUPT			 (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR			 (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_CDU_HW_INTERRUPT			 (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR			 (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT			 (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR			 (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_CSDM_HW_INTERRUPT			 (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR			 (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_HW_INTERRUPT			 (0x1<<1)
+#define AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR			 (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR			 (0x1<<18)
+#define AEU_INPUTS_ATTN_BITS_DMAE_HW_INTERRUPT			 (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR			 (0x1<<10)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT		 (0x1<<13)
+#define AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR		 (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_GPIO0_FUNCTION_0			 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR			 (0x1<<12)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY		 (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY		 (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY		 (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY		 (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT			 (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR			 (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR			 (0x1<<14)
+#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR		 (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_HW_INTERRUPT		 (0x1<<31)
+#define AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR		 (0x1<<30)
+#define AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR			 (0x1<<0)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT			 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR			 (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_HW_INTERRUPT	 (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR	 (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT			 (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR			 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_QM_HW_INTERRUPT			 (0x1<<3)
+#define AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR			 (0x1<<2)
+#define AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR		 (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_SPIO5				 (0x1<<15)
+#define AEU_INPUTS_ATTN_BITS_TCM_HW_INTERRUPT			 (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR			 (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_HW_INTERRUPT		 (0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR		 (0x1<<4)
+#define AEU_INPUTS_ATTN_BITS_TSDM_HW_INTERRUPT			 (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR			 (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_HW_INTERRUPT			 (0x1<<29)
+#define AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR			 (0x1<<28)
+#define AEU_INPUTS_ATTN_BITS_UCM_HW_INTERRUPT			 (0x1<<23)
+#define AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR			 (0x1<<22)
+#define AEU_INPUTS_ATTN_BITS_UPB_HW_INTERRUPT			 (0x1<<27)
+#define AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR			 (0x1<<26)
+#define AEU_INPUTS_ATTN_BITS_USDM_HW_INTERRUPT			 (0x1<<21)
+#define AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR			 (0x1<<20)
+#define AEU_INPUTS_ATTN_BITS_USEMI_HW_INTERRUPT			 (0x1<<25)
+#define AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR			 (0x1<<24)
+#define AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR		 (0x1<<16)
+#define AEU_INPUTS_ATTN_BITS_XCM_HW_INTERRUPT			 (0x1<<9)
+#define AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR			 (0x1<<8)
+#define AEU_INPUTS_ATTN_BITS_XSDM_HW_INTERRUPT			 (0x1<<7)
+#define AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR			 (0x1<<6)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_HW_INTERRUPT			 (0x1<<11)
+#define AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR			 (0x1<<10)
+
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0			(0x1<<5)
+#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1			(0x1<<9)
+
+#define RESERVED_GENERAL_ATTENTION_BIT_0	0
+
+#define EVEREST_GEN_ATTN_IN_USE_MASK		0x7ffe0
+#define EVEREST_LATCHED_ATTN_IN_USE_MASK	0xffe00000
+
+#define RESERVED_GENERAL_ATTENTION_BIT_6	6
+#define RESERVED_GENERAL_ATTENTION_BIT_7	7
+#define RESERVED_GENERAL_ATTENTION_BIT_8	8
+#define RESERVED_GENERAL_ATTENTION_BIT_9	9
+#define RESERVED_GENERAL_ATTENTION_BIT_10	10
+#define RESERVED_GENERAL_ATTENTION_BIT_11	11
+#define RESERVED_GENERAL_ATTENTION_BIT_12	12
+#define RESERVED_GENERAL_ATTENTION_BIT_13	13
+#define RESERVED_GENERAL_ATTENTION_BIT_14	14
+#define RESERVED_GENERAL_ATTENTION_BIT_15	15
+#define RESERVED_GENERAL_ATTENTION_BIT_16	16
+#define RESERVED_GENERAL_ATTENTION_BIT_17	17
+#define RESERVED_GENERAL_ATTENTION_BIT_18	18
+#define RESERVED_GENERAL_ATTENTION_BIT_19	19
+#define RESERVED_GENERAL_ATTENTION_BIT_20	20
+#define RESERVED_GENERAL_ATTENTION_BIT_21	21
+
+/* storm asserts attention bits */
+#define TSTORM_FATAL_ASSERT_ATTENTION_BIT     RESERVED_GENERAL_ATTENTION_BIT_7
+#define USTORM_FATAL_ASSERT_ATTENTION_BIT     RESERVED_GENERAL_ATTENTION_BIT_8
+#define CSTORM_FATAL_ASSERT_ATTENTION_BIT     RESERVED_GENERAL_ATTENTION_BIT_9
+#define XSTORM_FATAL_ASSERT_ATTENTION_BIT     RESERVED_GENERAL_ATTENTION_BIT_10
+
+/* mcp error attention bit */
+#define MCP_FATAL_ASSERT_ATTENTION_BIT	      RESERVED_GENERAL_ATTENTION_BIT_11
+
+/*E1H NIG status sync attention mapped to group 4-7*/
+#define LINK_SYNC_ATTENTION_BIT_FUNC_0	    RESERVED_GENERAL_ATTENTION_BIT_12
+#define LINK_SYNC_ATTENTION_BIT_FUNC_1	    RESERVED_GENERAL_ATTENTION_BIT_13
+#define LINK_SYNC_ATTENTION_BIT_FUNC_2	    RESERVED_GENERAL_ATTENTION_BIT_14
+#define LINK_SYNC_ATTENTION_BIT_FUNC_3	    RESERVED_GENERAL_ATTENTION_BIT_15
+#define LINK_SYNC_ATTENTION_BIT_FUNC_4	    RESERVED_GENERAL_ATTENTION_BIT_16
+#define LINK_SYNC_ATTENTION_BIT_FUNC_5	    RESERVED_GENERAL_ATTENTION_BIT_17
+#define LINK_SYNC_ATTENTION_BIT_FUNC_6	    RESERVED_GENERAL_ATTENTION_BIT_18
+#define LINK_SYNC_ATTENTION_BIT_FUNC_7	    RESERVED_GENERAL_ATTENTION_BIT_19
+
+
+#define LATCHED_ATTN_RBCR			23
+#define LATCHED_ATTN_RBCT			24
+#define LATCHED_ATTN_RBCN			25
+#define LATCHED_ATTN_RBCU			26
+#define LATCHED_ATTN_RBCP			27
+#define LATCHED_ATTN_TIMEOUT_GRC		28
+#define LATCHED_ATTN_RSVD_GRC			29
+#define LATCHED_ATTN_ROM_PARITY_MCP		30
+#define LATCHED_ATTN_UM_RX_PARITY_MCP		31
+#define LATCHED_ATTN_UM_TX_PARITY_MCP		32
+#define LATCHED_ATTN_SCPAD_PARITY_MCP		33
+
+#define GENERAL_ATTEN_WORD(atten_name)	       ((94 + atten_name) / 32)
+#define GENERAL_ATTEN_OFFSET(atten_name)\
+	(1UL << ((94 + atten_name) % 32))
+/*
+ * This file defines GRC base address for every block.
+ * This file is included by chipsim, asm microcode and cpp microcode.
+ * These values are used in Design.xml on regBase attribute
+ * Use the base with the generated offsets of specific registers.
+ */
+
+#define GRCBASE_PXPCS		0x000000
+#define GRCBASE_PCICONFIG	0x002000
+#define GRCBASE_PCIREG		0x002400
+#define GRCBASE_EMAC0		0x008000
+#define GRCBASE_EMAC1		0x008400
+#define GRCBASE_DBU		0x008800
+#define GRCBASE_MISC		0x00A000
+#define GRCBASE_DBG		0x00C000
+#define GRCBASE_NIG		0x010000
+#define GRCBASE_XCM		0x020000
+#define GRCBASE_PRS		0x040000
+#define GRCBASE_SRCH		0x040400
+#define GRCBASE_TSDM		0x042000
+#define GRCBASE_TCM		0x050000
+#define GRCBASE_BRB1		0x060000
+#define GRCBASE_MCP		0x080000
+#define GRCBASE_UPB		0x0C1000
+#define GRCBASE_CSDM		0x0C2000
+#define GRCBASE_USDM		0x0C4000
+#define GRCBASE_CCM		0x0D0000
+#define GRCBASE_UCM		0x0E0000
+#define GRCBASE_CDU		0x101000
+#define GRCBASE_DMAE		0x102000
+#define GRCBASE_PXP		0x103000
+#define GRCBASE_CFC		0x104000
+#define GRCBASE_HC		0x108000
+#define GRCBASE_PXP2		0x120000
+#define GRCBASE_PBF		0x140000
+#define GRCBASE_UMAC0		0x160000
+#define GRCBASE_UMAC1		0x160400
+#define GRCBASE_XPB		0x161000
+#define GRCBASE_MSTAT0	    0x162000
+#define GRCBASE_MSTAT1	    0x162800
+#define GRCBASE_XMAC0		0x163000
+#define GRCBASE_XMAC1		0x163800
+#define GRCBASE_TIMERS		0x164000
+#define GRCBASE_XSDM		0x166000
+#define GRCBASE_QM		0x168000
+#define GRCBASE_DQ		0x170000
+#define GRCBASE_TSEM		0x180000
+#define GRCBASE_CSEM		0x200000
+#define GRCBASE_XSEM		0x280000
+#define GRCBASE_USEM		0x300000
+#define GRCBASE_MISC_AEU	GRCBASE_MISC
+
+
+/* offset of configuration space in the pci core register */
+#define PCICFG_OFFSET					0x2000
+#define PCICFG_VENDOR_ID_OFFSET 			0x00
+#define PCICFG_DEVICE_ID_OFFSET 			0x02
+#define PCICFG_COMMAND_OFFSET				0x04
+#define PCICFG_COMMAND_IO_SPACE 		(1<<0)
+#define PCICFG_COMMAND_MEM_SPACE		(1<<1)
+#define PCICFG_COMMAND_BUS_MASTER		(1<<2)
+#define PCICFG_COMMAND_SPECIAL_CYCLES		(1<<3)
+#define PCICFG_COMMAND_MWI_CYCLES		(1<<4)
+#define PCICFG_COMMAND_VGA_SNOOP		(1<<5)
+#define PCICFG_COMMAND_PERR_ENA 		(1<<6)
+#define PCICFG_COMMAND_STEPPING 		(1<<7)
+#define PCICFG_COMMAND_SERR_ENA 		(1<<8)
+#define PCICFG_COMMAND_FAST_B2B 		(1<<9)
+#define PCICFG_COMMAND_INT_DISABLE		(1<<10)
+#define PCICFG_COMMAND_RESERVED 		(0x1f<<11)
+#define PCICFG_STATUS_OFFSET				0x06
+#define PCICFG_REVESION_ID_OFFSET			0x08
+#define PCICFG_CACHE_LINE_SIZE				0x0c
+#define PCICFG_LATENCY_TIMER				0x0d
+#define PCICFG_BAR_1_LOW				0x10
+#define PCICFG_BAR_1_HIGH				0x14
+#define PCICFG_BAR_2_LOW				0x18
+#define PCICFG_BAR_2_HIGH				0x1c
+#define PCICFG_SUBSYSTEM_VENDOR_ID_OFFSET		0x2c
+#define PCICFG_SUBSYSTEM_ID_OFFSET			0x2e
+#define PCICFG_INT_LINE 				0x3c
+#define PCICFG_INT_PIN					0x3d
+#define PCICFG_PM_CAPABILITY				0x48
+#define PCICFG_PM_CAPABILITY_VERSION		(0x3<<16)
+#define PCICFG_PM_CAPABILITY_CLOCK		(1<<19)
+#define PCICFG_PM_CAPABILITY_RESERVED		(1<<20)
+#define PCICFG_PM_CAPABILITY_DSI		(1<<21)
+#define PCICFG_PM_CAPABILITY_AUX_CURRENT	(0x7<<22)
+#define PCICFG_PM_CAPABILITY_D1_SUPPORT 	(1<<25)
+#define PCICFG_PM_CAPABILITY_D2_SUPPORT 	(1<<26)
+#define PCICFG_PM_CAPABILITY_PME_IN_D0		(1<<27)
+#define PCICFG_PM_CAPABILITY_PME_IN_D1		(1<<28)
+#define PCICFG_PM_CAPABILITY_PME_IN_D2		(1<<29)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_HOT	(1<<30)
+#define PCICFG_PM_CAPABILITY_PME_IN_D3_COLD	(1<<31)
+#define PCICFG_PM_CSR_OFFSET				0x4c
+#define PCICFG_PM_CSR_STATE			(0x3<<0)
+#define PCICFG_PM_CSR_PME_ENABLE		(1<<8)
+#define PCICFG_PM_CSR_PME_STATUS		(1<<15)
+#define PCICFG_MSI_CAP_ID_OFFSET			0x58
+#define PCICFG_MSI_CONTROL_ENABLE		(0x1<<16)
+#define PCICFG_MSI_CONTROL_MCAP 		(0x7<<17)
+#define PCICFG_MSI_CONTROL_MENA 		(0x7<<20)
+#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP	(0x1<<23)
+#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE	(0x1<<24)
+#define PCICFG_GRC_ADDRESS				0x78
+#define PCICFG_GRC_DATA 				0x80
+#define PCICFG_MSIX_CAP_ID_OFFSET			0xa0
+#define PCICFG_MSIX_CONTROL_TABLE_SIZE		(0x7ff<<16)
+#define PCICFG_MSIX_CONTROL_RESERVED		(0x7<<27)
+#define PCICFG_MSIX_CONTROL_FUNC_MASK		(0x1<<30)
+#define PCICFG_MSIX_CONTROL_MSIX_ENABLE 	(0x1<<31)
+
+#define PCICFG_DEVICE_CONTROL				0xb4
+#define PCICFG_DEVICE_STATUS				0xb6
+#define PCICFG_DEVICE_STATUS_CORR_ERR_DET	(1<<0)
+#define PCICFG_DEVICE_STATUS_NON_FATAL_ERR_DET	(1<<1)
+#define PCICFG_DEVICE_STATUS_FATAL_ERR_DET	(1<<2)
+#define PCICFG_DEVICE_STATUS_UNSUP_REQ_DET	(1<<3)
+#define PCICFG_DEVICE_STATUS_AUX_PWR_DET	(1<<4)
+#define PCICFG_DEVICE_STATUS_NO_PEND		(1<<5)
+#define PCICFG_LINK_CONTROL				0xbc
+
+
+#define BAR_USTRORM_INTMEM				0x400000
+#define BAR_CSTRORM_INTMEM				0x410000
+#define BAR_XSTRORM_INTMEM				0x420000
+#define BAR_TSTRORM_INTMEM				0x430000
+
+/* for accessing the IGU in case of status block ACK */
+#define BAR_IGU_INTMEM					0x440000
+
+#define BAR_DOORBELL_OFFSET				0x800000
+
+#define BAR_ME_REGISTER 				0x450000
+
+/* config_2 offset */
+#define GRC_CONFIG_2_SIZE_REG				0x408
+#define PCI_CONFIG_2_BAR1_SIZE			(0xfL<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_DISABLED 	(0L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64K		(1L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128K		(2L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256K		(3L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512K		(4L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1M		(5L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_2M		(6L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_4M		(7L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_8M		(8L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_16M		(9L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_32M		(10L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_64M		(11L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_128M		(12L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_256M		(13L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_512M		(14L<<0)
+#define PCI_CONFIG_2_BAR1_SIZE_1G		(15L<<0)
+#define PCI_CONFIG_2_BAR1_64ENA 		(1L<<4)
+#define PCI_CONFIG_2_EXP_ROM_RETRY		(1L<<5)
+#define PCI_CONFIG_2_CFG_CYCLE_RETRY		(1L<<6)
+#define PCI_CONFIG_2_FIRST_CFG_DONE		(1L<<7)
+#define PCI_CONFIG_2_EXP_ROM_SIZE		(0xffL<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_DISABLED	(0L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2K		(1L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4K		(2L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8K		(3L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16K		(4L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32K		(5L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_64K		(6L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_128K		(7L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_256K		(8L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_512K		(9L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_1M		(10L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_2M		(11L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_4M		(12L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_8M		(13L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_16M		(14L<<8)
+#define PCI_CONFIG_2_EXP_ROM_SIZE_32M		(15L<<8)
+#define PCI_CONFIG_2_BAR_PREFETCH		(1L<<16)
+#define PCI_CONFIG_2_RESERVED0			(0x7fffL<<17)
+
+/* config_3 offset */
+#define GRC_CONFIG_3_SIZE_REG				0x40c
+#define PCI_CONFIG_3_STICKY_BYTE		(0xffL<<0)
+#define PCI_CONFIG_3_FORCE_PME			(1L<<24)
+#define PCI_CONFIG_3_PME_STATUS 		(1L<<25)
+#define PCI_CONFIG_3_PME_ENABLE 		(1L<<26)
+#define PCI_CONFIG_3_PM_STATE			(0x3L<<27)
+#define PCI_CONFIG_3_VAUX_PRESET		(1L<<30)
+#define PCI_CONFIG_3_PCI_POWER			(1L<<31)
+
+#define GRC_BAR2_CONFIG 				0x4e0
+#define PCI_CONFIG_2_BAR2_SIZE			(0xfL<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_DISABLED 	(0L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64K		(1L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128K		(2L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256K		(3L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512K		(4L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1M		(5L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_2M		(6L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_4M		(7L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_8M		(8L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_16M		(9L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_32M		(10L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_64M		(11L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_128M		(12L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_256M		(13L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_512M		(14L<<0)
+#define PCI_CONFIG_2_BAR2_SIZE_1G		(15L<<0)
+#define PCI_CONFIG_2_BAR2_64ENA 		(1L<<4)
+
+#define PCI_PM_DATA_A					0x410
+#define PCI_PM_DATA_B					0x414
+#define PCI_ID_VAL1					0x434
+#define PCI_ID_VAL2					0x438
+
+#define PXPCS_TL_CONTROL_5		    0x814
+#define PXPCS_TL_CONTROL_5_UNKNOWNTYPE_ERR_ATTN    (1 << 29) /*WC*/
+#define PXPCS_TL_CONTROL_5_BOUNDARY4K_ERR_ATTN	   (1 << 28)   /*WC*/
+#define PXPCS_TL_CONTROL_5_MRRS_ERR_ATTN   (1 << 27)   /*WC*/
+#define PXPCS_TL_CONTROL_5_MPS_ERR_ATTN    (1 << 26)   /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_BRIDGE_FORWARD_ERR  (1 << 25)   /*WC*/
+#define PXPCS_TL_CONTROL_5_TTX_TXINTF_OVERFLOW	   (1 << 24)   /*WC*/
+#define PXPCS_TL_CONTROL_5_PHY_ERR_ATTN    (1 << 23)   /*RO*/
+#define PXPCS_TL_CONTROL_5_DL_ERR_ATTN	   (1 << 22)   /*RO*/
+#define PXPCS_TL_CONTROL_5_TTX_ERR_NP_TAG_IN_USE   (1 << 21)   /*WC*/
+#define PXPCS_TL_CONTROL_5_TRX_ERR_UNEXP_RTAG  (1 << 20)   /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT1   (1 << 19)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT1   (1 << 18)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC1   (1 << 17)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP1   (1 << 16)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW1   (1 << 15)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL1  (1 << 14)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT1    (1 << 13)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT1    (1 << 12)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL1    (1 << 11)   /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP1   (1 << 10)   /*WC*/
+#define PXPCS_TL_CONTROL_5_PRI_SIG_TARGET_ABORT    (1 << 9)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNSPPORT    (1 << 8)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_ECRC    (1 << 7)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MALF_TLP    (1 << 6)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_RX_OFLOW    (1 << 5)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_UNEXP_CPL   (1 << 4)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_MASTER_ABRT     (1 << 3)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_CPL_TIMEOUT     (1 << 2)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_FC_PRTL	   (1 << 1)    /*WC*/
+#define PXPCS_TL_CONTROL_5_ERR_PSND_TLP    (1 << 0)    /*WC*/
+
+
+#define PXPCS_TL_FUNC345_STAT	   0x854
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT4    (1 << 29)   /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4\
+	(1 << 28) /* Unsupported Request Error Status in function4, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC4\
+	(1 << 27) /* ECRC Error TLP Status Status in function 4, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP4\
+	(1 << 26) /* Malformed TLP Status Status in function 4, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW4\
+	(1 << 25) /* Receiver Overflow Status Status in function 4, if \
+	set, generate pcie_err_attn output when this error is seen.. WC \
+	*/
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL4\
+	(1 << 24) /* Unexpected Completion Status Status in function 4, \
+	if set, generate pcie_err_attn output when this error is seen. WC \
+	*/
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT4\
+	(1 << 23) /* Receive UR Statusin function 4. If set, generate \
+	pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT4\
+	(1 << 22) /* Completer Timeout Status Status in function 4, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL4\
+	(1 << 21) /* Flow Control Protocol Error Status Status in \
+	function 4, if set, generate pcie_err_attn output when this error \
+	is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP4\
+	(1 << 20) /* Poisoned Error Status Status in function 4, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT3    (1 << 19)   /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3\
+	(1 << 18) /* Unsupported Request Error Status in function3, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC3\
+	(1 << 17) /* ECRC Error TLP Status Status in function 3, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP3\
+	(1 << 16) /* Malformed TLP Status Status in function 3, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW3\
+	(1 << 15) /* Receiver Overflow Status Status in function 3, if \
+	set, generate pcie_err_attn output when this error is seen.. WC \
+	*/
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL3\
+	(1 << 14) /* Unexpected Completion Status Status in function 3, \
+	if set, generate pcie_err_attn output when this error is seen. WC \
+	*/
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT3\
+	(1 << 13) /* Receive UR Statusin function 3. If set, generate \
+	pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT3\
+	(1 << 12) /* Completer Timeout Status Status in function 3, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL3\
+	(1 << 11) /* Flow Control Protocol Error Status Status in \
+	function 3, if set, generate pcie_err_attn output when this error \
+	is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP3\
+	(1 << 10) /* Poisoned Error Status Status in function 3, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_PRI_SIG_TARGET_ABORT2    (1 << 9)    /* WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2\
+	(1 << 8) /* Unsupported Request Error Status for Function 2, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_ECRC2\
+	(1 << 7) /* ECRC Error TLP Status Status for Function 2, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_MALF_TLP2\
+	(1 << 6) /* Malformed TLP Status Status for Function 2, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_RX_OFLOW2\
+	(1 << 5) /* Receiver Overflow Status Status for Function 2, if \
+	set, generate pcie_err_attn output when this error is seen.. WC \
+	*/
+#define PXPCS_TL_FUNC345_STAT_ERR_UNEXP_CPL2\
+	(1 << 4) /* Unexpected Completion Status Status for Function 2, \
+	if set, generate pcie_err_attn output when this error is seen. WC \
+	*/
+#define PXPCS_TL_FUNC345_STAT_ERR_MASTER_ABRT2\
+	(1 << 3) /* Receive UR Statusfor Function 2. If set, generate \
+	pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_CPL_TIMEOUT2\
+	(1 << 2) /* Completer Timeout Status Status for Function 2, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_FC_PRTL2\
+	(1 << 1) /* Flow Control Protocol Error Status Status for \
+	Function 2, if set, generate pcie_err_attn output when this error \
+	is seen. WC */
+#define PXPCS_TL_FUNC345_STAT_ERR_PSND_TLP2\
+	(1 << 0) /* Poisoned Error Status Status for Function 2, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define PXPCS_TL_FUNC678_STAT  0x85C
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT7    (1 << 29)   /*	 WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7\
+	(1 << 28) /* Unsupported Request Error Status in function7, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC7\
+	(1 << 27) /* ECRC Error TLP Status Status in function 7, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP7\
+	(1 << 26) /* Malformed TLP Status Status in function 7, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW7\
+	(1 << 25) /* Receiver Overflow Status Status in function 7, if \
+	set, generate pcie_err_attn output when this error is seen.. WC \
+	*/
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL7\
+	(1 << 24) /* Unexpected Completion Status Status in function 7, \
+	if set, generate pcie_err_attn output when this error is seen. WC \
+	*/
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT7\
+	(1 << 23) /* Receive UR Statusin function 7. If set, generate \
+	pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT7\
+	(1 << 22) /* Completer Timeout Status Status in function 7, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL7\
+	(1 << 21) /* Flow Control Protocol Error Status Status in \
+	function 7, if set, generate pcie_err_attn output when this error \
+	is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP7\
+	(1 << 20) /* Poisoned Error Status Status in function 7, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT6    (1 << 19)    /*	  WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6\
+	(1 << 18) /* Unsupported Request Error Status in function6, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC6\
+	(1 << 17) /* ECRC Error TLP Status Status in function 6, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP6\
+	(1 << 16) /* Malformed TLP Status Status in function 6, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW6\
+	(1 << 15) /* Receiver Overflow Status Status in function 6, if \
+	set, generate pcie_err_attn output when this error is seen.. WC \
+	*/
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL6\
+	(1 << 14) /* Unexpected Completion Status Status in function 6, \
+	if set, generate pcie_err_attn output when this error is seen. WC \
+	*/
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT6\
+	(1 << 13) /* Receive UR Statusin function 6. If set, generate \
+	pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT6\
+	(1 << 12) /* Completer Timeout Status Status in function 6, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL6\
+	(1 << 11) /* Flow Control Protocol Error Status Status in \
+	function 6, if set, generate pcie_err_attn output when this error \
+	is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP6\
+	(1 << 10) /* Poisoned Error Status Status in function 6, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_PRI_SIG_TARGET_ABORT5    (1 << 9) /*    WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5\
+	(1 << 8) /* Unsupported Request Error Status for Function 5, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_ECRC5\
+	(1 << 7) /* ECRC Error TLP Status Status for Function 5, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_MALF_TLP5\
+	(1 << 6) /* Malformed TLP Status Status for Function 5, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_RX_OFLOW5\
+	(1 << 5) /* Receiver Overflow Status Status for Function 5, if \
+	set, generate pcie_err_attn output when this error is seen.. WC \
+	*/
+#define PXPCS_TL_FUNC678_STAT_ERR_UNEXP_CPL5\
+	(1 << 4) /* Unexpected Completion Status Status for Function 5, \
+	if set, generate pcie_err_attn output when this error is seen. WC \
+	*/
+#define PXPCS_TL_FUNC678_STAT_ERR_MASTER_ABRT5\
+	(1 << 3) /* Receive UR Statusfor Function 5. If set, generate \
+	pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_CPL_TIMEOUT5\
+	(1 << 2) /* Completer Timeout Status Status for Function 5, if \
+	set, generate pcie_err_attn output when this error is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_FC_PRTL5\
+	(1 << 1) /* Flow Control Protocol Error Status Status for \
+	Function 5, if set, generate pcie_err_attn output when this error \
+	is seen. WC */
+#define PXPCS_TL_FUNC678_STAT_ERR_PSND_TLP5\
+	(1 << 0) /* Poisoned Error Status Status for Function 5, if set, \
+	generate pcie_err_attn output when this error is seen.. WC */
+
+
+#define BAR_USTRORM_INTMEM				0x400000
+#define BAR_CSTRORM_INTMEM				0x410000
+#define BAR_XSTRORM_INTMEM				0x420000
+#define BAR_TSTRORM_INTMEM				0x430000
+
+/* for accessing the IGU in case of status block ACK */
+#define BAR_IGU_INTMEM					0x440000
+
+#define BAR_DOORBELL_OFFSET				0x800000
+
+#define BAR_ME_REGISTER				0x450000
+#define ME_REG_PF_NUM_SHIFT		0
+#define ME_REG_PF_NUM\
+	(7L<<ME_REG_PF_NUM_SHIFT) /* Relative PF Num */
+#define ME_REG_VF_VALID		(1<<8)
+#define ME_REG_VF_NUM_SHIFT		9
+#define ME_REG_VF_NUM_MASK		(0x3f<<ME_REG_VF_NUM_SHIFT)
+#define ME_REG_VF_ERR			(0x1<<3)
+#define ME_REG_ABS_PF_NUM_SHIFT	16
+#define ME_REG_ABS_PF_NUM\
+	(7L<<ME_REG_ABS_PF_NUM_SHIFT) /* Absolute PF Num */
+
+
+#define MDIO_REG_BANK_CL73_IEEEB0	0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL	0x0
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN	0x0200
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN		0x1000
+#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST	0x8000
+
+#define MDIO_REG_BANK_CL73_IEEEB1	0x10
+#define MDIO_CL73_IEEEB1_AN_ADV1		0x00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE			0x0400
+#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC		0x0800
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH		0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK		0x0C00
+#define MDIO_CL73_IEEEB1_AN_ADV2		0x01
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M		0x0000
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX		0x0020
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4		0x0040
+#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR		0x0080
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1		0x03
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE		0x0400
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC		0x0800
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH		0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK		0x0C00
+
+#define MDIO_REG_BANK_RX0				0x80b0
+#define MDIO_RX0_RX_STATUS				0x10
+#define MDIO_RX0_RX_STATUS_SIGDET			0x8000
+#define MDIO_RX0_RX_STATUS_RX_SEQ_DONE			0x1000
+#define MDIO_RX0_RX_EQ_BOOST				0x1c
+#define MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK	0x7
+#define MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL		0x10
+
+#define MDIO_REG_BANK_RX1				0x80c0
+#define MDIO_RX1_RX_EQ_BOOST				0x1c
+#define MDIO_RX1_RX_EQ_BOOST_EQUALIZER_CTRL_MASK	0x7
+#define MDIO_RX1_RX_EQ_BOOST_OFFSET_CTRL		0x10
+
+#define MDIO_REG_BANK_RX2				0x80d0
+#define MDIO_RX2_RX_EQ_BOOST				0x1c
+#define MDIO_RX2_RX_EQ_BOOST_EQUALIZER_CTRL_MASK	0x7
+#define MDIO_RX2_RX_EQ_BOOST_OFFSET_CTRL		0x10
+
+#define MDIO_REG_BANK_RX3				0x80e0
+#define MDIO_RX3_RX_EQ_BOOST				0x1c
+#define MDIO_RX3_RX_EQ_BOOST_EQUALIZER_CTRL_MASK	0x7
+#define MDIO_RX3_RX_EQ_BOOST_OFFSET_CTRL		0x10
+
+#define MDIO_REG_BANK_RX_ALL				0x80f0
+#define MDIO_RX_ALL_RX_EQ_BOOST 			0x1c
+#define MDIO_RX_ALL_RX_EQ_BOOST_EQUALIZER_CTRL_MASK	0x7
+#define MDIO_RX_ALL_RX_EQ_BOOST_OFFSET_CTRL	0x10
+
+#define MDIO_REG_BANK_TX0				0x8060
+#define MDIO_TX0_TX_DRIVER				0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK		0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT		12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 		0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT		8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK		0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT		4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK		0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT		1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T			1
+
+#define MDIO_REG_BANK_TX1				0x8070
+#define MDIO_TX1_TX_DRIVER				0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK		0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT		12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 		0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT		8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK		0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT		4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK		0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT		1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T			1
+
+#define MDIO_REG_BANK_TX2				0x8080
+#define MDIO_TX2_TX_DRIVER				0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK		0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT		12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 		0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT		8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK		0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT		4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK		0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT		1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T			1
+
+#define MDIO_REG_BANK_TX3				0x8090
+#define MDIO_TX3_TX_DRIVER				0x17
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK		0xf000
+#define MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT		12
+#define MDIO_TX0_TX_DRIVER_IDRIVER_MASK 		0x0f00
+#define MDIO_TX0_TX_DRIVER_IDRIVER_SHIFT		8
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_MASK		0x00f0
+#define MDIO_TX0_TX_DRIVER_IPREDRIVER_SHIFT		4
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_MASK		0x000e
+#define MDIO_TX0_TX_DRIVER_IFULLSPD_SHIFT		1
+#define MDIO_TX0_TX_DRIVER_ICBUF1T			1
+
+#define MDIO_REG_BANK_XGXS_BLOCK0			0x8000
+#define MDIO_BLOCK0_XGXS_CONTROL			0x10
+
+#define MDIO_REG_BANK_XGXS_BLOCK1			0x8010
+#define MDIO_BLOCK1_LANE_CTRL0				0x15
+#define MDIO_BLOCK1_LANE_CTRL1				0x16
+#define MDIO_BLOCK1_LANE_CTRL2				0x17
+#define MDIO_BLOCK1_LANE_PRBS				0x19
+
+#define MDIO_REG_BANK_XGXS_BLOCK2			0x8100
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP			0x10
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE		0x8000
+#define MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE	0x4000
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP		0x11
+#define MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE		0x8000
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G	0x14
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS	0x0001
+#define MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS	0x0010
+#define MDIO_XGXS_BLOCK2_TEST_MODE_LANE 	0x15
+
+#define MDIO_REG_BANK_GP_STATUS 			0x8120
+#define MDIO_GP_STATUS_TOP_AN_STATUS1				0x1B
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE	0x0001
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL37_AUTONEG_COMPLETE	0x0002
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS		0x0004
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS		0x0008
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE	0x0010
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_LP_NP_BAM_ABLE	0x0020
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_TXSIDE	0x0040
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_PAUSE_RSOLUTION_RXSIDE	0x0080
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_MASK 	0x3f00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10M		0x0000
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_100M 	0x0100
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G		0x0200
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_2_5G 	0x0300
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_5G		0x0400
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_6G		0x0500
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_HIG	0x0600
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_CX4	0x0700
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12G_HIG	0x0800
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_12_5G	0x0900
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_13G		0x0A00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_15G		0x0B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_16G		0x0C00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_1G_KX	0x0D00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KX4	0x0E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_KR	0x0F00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_XFI	0x1B00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_20G_DXGXS	0x1E00
+#define MDIO_GP_STATUS_TOP_AN_STATUS1_ACTUAL_SPEED_10G_SFI	0x1F00
+
+
+#define MDIO_REG_BANK_10G_PARALLEL_DETECT		0x8130
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS		0x10
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK		0x8000
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL		0x11
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN	0x1
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK		0x13
+#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT		(0xb71<<1)
+
+#define MDIO_REG_BANK_SERDES_DIGITAL			0x8300
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1			0x10
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE 		0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_TBI_IF			0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN		0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT	0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET			0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE			0x0020
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2			0x11
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN			0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_AN_FST_TMR 		0x0040
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1			0x14
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SGMII			0x0001
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_LINK			0x0002
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_DUPLEX			0x0004
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_MASK			0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_SHIFT 		3
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_2_5G			0x0018
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G			0x0010
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M			0x0008
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M			0x0000
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2			0x15
+#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 		0x0002
+#define MDIO_SERDES_DIGITAL_MISC1				0x18
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK			0xE000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M			0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_100M			0x2000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_125M			0x4000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M			0x6000
+#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_187_5M			0x8000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL			0x0010
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK			0x000f
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_2_5G			0x0000
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_5G			0x0001
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_6G			0x0002
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_HIG			0x0003
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4			0x0004
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12G			0x0005
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_12_5G			0x0006
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_13G			0x0007
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_15G			0x0008
+#define MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_16G			0x0009
+
+#define MDIO_REG_BANK_OVER_1G				0x8320
+#define MDIO_OVER_1G_DIGCTL_3_4 				0x14
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_MASK				0xffe0
+#define MDIO_OVER_1G_DIGCTL_3_4_MP_ID_SHIFT				5
+#define MDIO_OVER_1G_UP1					0x19
+#define MDIO_OVER_1G_UP1_2_5G						0x0001
+#define MDIO_OVER_1G_UP1_5G						0x0002
+#define MDIO_OVER_1G_UP1_6G						0x0004
+#define MDIO_OVER_1G_UP1_10G						0x0010
+#define MDIO_OVER_1G_UP1_10GH						0x0008
+#define MDIO_OVER_1G_UP1_12G						0x0020
+#define MDIO_OVER_1G_UP1_12_5G						0x0040
+#define MDIO_OVER_1G_UP1_13G						0x0080
+#define MDIO_OVER_1G_UP1_15G						0x0100
+#define MDIO_OVER_1G_UP1_16G						0x0200
+#define MDIO_OVER_1G_UP2					0x1A
+#define MDIO_OVER_1G_UP2_IPREDRIVER_MASK				0x0007
+#define MDIO_OVER_1G_UP2_IDRIVER_MASK					0x0038
+#define MDIO_OVER_1G_UP2_PREEMPHASIS_MASK				0x03C0
+#define MDIO_OVER_1G_UP3					0x1B
+#define MDIO_OVER_1G_UP3_HIGIG2 					0x0001
+#define MDIO_OVER_1G_LP_UP1					0x1C
+#define MDIO_OVER_1G_LP_UP2					0x1D
+#define MDIO_OVER_1G_LP_UP2_MR_ADV_OVER_1G_MASK 			0x03ff
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK				0x0780
+#define MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT				7
+#define MDIO_OVER_1G_LP_UP3						0x1E
+
+#define MDIO_REG_BANK_REMOTE_PHY			0x8330
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS				0x10
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_OVER1G_MSG	0x0010
+#define MDIO_REMOTE_PHY_MISC_RX_STATUS_CL37_FSM_RECEIVED_BRCM_OUI_MSG	0x0600
+
+#define MDIO_REG_BANK_BAM_NEXT_PAGE			0x8350
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL			0x10
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE			0x0001
+#define MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN			0x0002
+
+#define MDIO_REG_BANK_CL73_USERB0		0x8370
+#define MDIO_CL73_USERB0_CL73_UCTRL				0x10
+#define MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL			0x0002
+#define MDIO_CL73_USERB0_CL73_USTAT1				0x11
+#define MDIO_CL73_USERB0_CL73_USTAT1_LINK_STATUS_CHECK			0x0100
+#define MDIO_CL73_USERB0_CL73_USTAT1_AN_GOOD_CHECK_BAM37		0x0400
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1 			0x12
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN				0x8000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN		0x4000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN		0x2000
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3 			0x14
+#define MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR 		0x0001
+
+#define MDIO_REG_BANK_AER_BLOCK 		0xFFD0
+#define MDIO_AER_BLOCK_AER_REG					0x1E
+
+#define MDIO_REG_BANK_COMBO_IEEE0		0xFFE0
+#define MDIO_COMBO_IEEE0_MII_CONTROL				0x10
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK			0x2040
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_10			0x0000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100			0x2000
+#define MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000			0x0040
+#define MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX 			0x0100
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN				0x0200
+#define MDIO_COMBO_IEEO_MII_CONTROL_AN_EN				0x1000
+#define MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK				0x4000
+#define MDIO_COMBO_IEEO_MII_CONTROL_RESET				0x8000
+#define MDIO_COMBO_IEEE0_MII_STATUS				0x11
+#define MDIO_COMBO_IEEE0_MII_STATUS_LINK_PASS				0x0004
+#define MDIO_COMBO_IEEE0_MII_STATUS_AUTONEG_COMPLETE			0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV				0x14
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX			0x0020
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_HALF_DUPLEX			0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK			0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE			0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_SYMMETRIC			0x0080
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC			0x0100
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH			0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_ADV_NEXT_PAGE 			0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1 	0x15
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_NEXT_PAGE	0x8000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_ACK		0x4000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_MASK	0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_NONE	0x0000
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_PAUSE_BOTH	0x0180
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_HALF_DUP_CAP	0x0040
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_FULL_DUP_CAP	0x0020
+/*WhenthelinkpartnerisinSGMIImode(bit0=1),then
+bit15=link,bit12=duplex,bits11:10=speed,bit14=acknowledge.
+Theotherbitsarereservedandshouldbezero*/
+#define MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1_SGMII_MODE	0x0001
+
+
+#define MDIO_PMA_DEVAD			0x1
+/*ieee*/
+#define MDIO_PMA_REG_CTRL		0x0
+#define MDIO_PMA_REG_STATUS		0x1
+#define MDIO_PMA_REG_10G_CTRL2		0x7
+#define MDIO_PMA_REG_TX_DISABLE		0x0009
+#define MDIO_PMA_REG_RX_SD		0xa
+/*bcm*/
+#define MDIO_PMA_REG_BCM_CTRL		0x0096
+#define MDIO_PMA_REG_FEC_CTRL		0x00ab
+#define MDIO_PMA_REG_PHY_IDENTIFIER	0xc800
+#define MDIO_PMA_REG_DIGITAL_CTRL	0xc808
+#define MDIO_PMA_REG_DIGITAL_STATUS	0xc809
+#define MDIO_PMA_REG_TX_POWER_DOWN	0xca02
+#define MDIO_PMA_REG_CMU_PLL_BYPASS	0xca09
+#define MDIO_PMA_REG_MISC_CTRL		0xca0a
+#define MDIO_PMA_REG_GEN_CTRL		0xca10
+#define MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP	0x0188
+#define MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET		0x018a
+#define MDIO_PMA_REG_M8051_MSGIN_REG	0xca12
+#define MDIO_PMA_REG_M8051_MSGOUT_REG	0xca13
+#define MDIO_PMA_REG_ROM_VER1		0xca19
+#define MDIO_PMA_REG_ROM_VER2		0xca1a
+#define MDIO_PMA_REG_EDC_FFE_MAIN	0xca1b
+#define MDIO_PMA_REG_PLL_BANDWIDTH	0xca1d
+#define MDIO_PMA_REG_PLL_CTRL		0xca1e
+#define MDIO_PMA_REG_MISC_CTRL0 	0xca23
+#define MDIO_PMA_REG_LRM_MODE		0xca3f
+#define MDIO_PMA_REG_CDR_BANDWIDTH	0xca46
+#define MDIO_PMA_REG_MISC_CTRL1 	0xca85
+
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL		0x8000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK	0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE		0x0000
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_COMPLETE	0x0004
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IN_PROGRESS	0x0008
+#define MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_FAILED 	0x000c
+#define MDIO_PMA_REG_SFP_TWO_WIRE_BYTE_CNT	0x8002
+#define MDIO_PMA_REG_SFP_TWO_WIRE_MEM_ADDR	0x8003
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_BUF	0xc820
+#define MDIO_PMA_REG_8726_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8726_TX_CTRL1		0xca01
+#define MDIO_PMA_REG_8726_TX_CTRL2		0xca05
+
+#define MDIO_PMA_REG_8727_TWO_WIRE_SLAVE_ADDR	0x8005
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_BUF	0x8007
+#define MDIO_PMA_REG_8727_TWO_WIRE_DATA_MASK 0xff
+#define MDIO_PMA_REG_8727_TX_CTRL1		0xca02
+#define MDIO_PMA_REG_8727_TX_CTRL2		0xca05
+#define MDIO_PMA_REG_8727_PCS_OPT_CTRL		0xc808
+#define MDIO_PMA_REG_8727_GPIO_CTRL		0xc80e
+#define MDIO_PMA_REG_8727_PCS_GP		0xc842
+#define MDIO_PMA_REG_8727_OPT_CFG_REG		0xc8e4
+
+#define MDIO_AN_REG_8727_MISC_CTRL		0x8309
+
+#define MDIO_PMA_REG_8073_CHIP_REV			0xc801
+#define MDIO_PMA_REG_8073_SPEED_LINK_STATUS		0xc820
+#define MDIO_PMA_REG_8073_XAUI_WA			0xc841
+#define MDIO_PMA_REG_8073_OPT_DIGITAL_CTRL		0xcd08
+
+#define MDIO_PMA_REG_7101_RESET 	0xc000
+#define MDIO_PMA_REG_7107_LED_CNTL	0xc007
+#define MDIO_PMA_REG_7107_LINK_LED_CNTL 0xc009
+#define MDIO_PMA_REG_7101_VER1		0xc026
+#define MDIO_PMA_REG_7101_VER2		0xc027
+
+#define MDIO_PMA_REG_8481_PMD_SIGNAL			0xa811
+#define MDIO_PMA_REG_8481_LED1_MASK			0xa82c
+#define MDIO_PMA_REG_8481_LED2_MASK			0xa82f
+#define MDIO_PMA_REG_8481_LED3_MASK			0xa832
+#define MDIO_PMA_REG_8481_LED3_BLINK			0xa834
+#define MDIO_PMA_REG_8481_LED5_MASK			0xa838
+#define MDIO_PMA_REG_8481_SIGNAL_MASK			0xa835
+#define MDIO_PMA_REG_8481_LINK_SIGNAL			0xa83b
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_MASK	0x800
+#define MDIO_PMA_REG_8481_LINK_SIGNAL_LED4_ENABLE_SHIFT 11
+
+
+#define MDIO_WIS_DEVAD			0x2
+/*bcm*/
+#define MDIO_WIS_REG_LASI_CNTL		0x9002
+#define MDIO_WIS_REG_LASI_STATUS	0x9005
+
+#define MDIO_PCS_DEVAD			0x3
+#define MDIO_PCS_REG_STATUS		0x0020
+#define MDIO_PCS_REG_LASI_STATUS	0x9005
+#define MDIO_PCS_REG_7101_DSP_ACCESS	0xD000
+#define MDIO_PCS_REG_7101_SPI_MUX	0xD008
+#define MDIO_PCS_REG_7101_SPI_CTRL_ADDR 0xE12A
+#define MDIO_PCS_REG_7101_SPI_RESET_BIT (5)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR 0xE02A
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_WRITE_ENABLE_CMD (6)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_BULK_ERASE_CMD	 (0xC7)
+#define MDIO_PCS_REG_7101_SPI_FIFO_ADDR_PAGE_PROGRAM_CMD (2)
+#define MDIO_PCS_REG_7101_SPI_BYTES_TO_TRANSFER_ADDR 0xE028
+
+
+#define MDIO_XS_DEVAD			0x4
+#define MDIO_XS_PLL_SEQUENCER		0x8000
+#define MDIO_XS_SFX7101_XGXS_TEST1	0xc00a
+
+#define MDIO_XS_8706_REG_BANK_RX0	0x80bc
+#define MDIO_XS_8706_REG_BANK_RX1	0x80cc
+#define MDIO_XS_8706_REG_BANK_RX2	0x80dc
+#define MDIO_XS_8706_REG_BANK_RX3	0x80ec
+#define MDIO_XS_8706_REG_BANK_RXA	0x80fc
+
+#define MDIO_XS_REG_8073_RX_CTRL_PCIE	0x80FA
+
+#define MDIO_AN_DEVAD			0x7
+/*ieee*/
+#define MDIO_AN_REG_CTRL		0x0000
+#define MDIO_AN_REG_STATUS		0x0001
+#define MDIO_AN_REG_STATUS_AN_COMPLETE		0x0020
+#define MDIO_AN_REG_ADV_PAUSE		0x0010
+#define MDIO_AN_REG_ADV_PAUSE_PAUSE		0x0400
+#define MDIO_AN_REG_ADV_PAUSE_ASYMMETRIC	0x0800
+#define MDIO_AN_REG_ADV_PAUSE_BOTH		0x0C00
+#define MDIO_AN_REG_ADV_PAUSE_MASK		0x0C00
+#define MDIO_AN_REG_ADV 		0x0011
+#define MDIO_AN_REG_ADV2		0x0012
+#define MDIO_AN_REG_LP_AUTO_NEG 	0x0013
+#define MDIO_AN_REG_MASTER_STATUS	0x0021
+/*bcm*/
+#define MDIO_AN_REG_LINK_STATUS 	0x8304
+#define MDIO_AN_REG_CL37_CL73		0x8370
+#define MDIO_AN_REG_CL37_AN		0xffe0
+#define MDIO_AN_REG_CL37_FC_LD		0xffe4
+#define MDIO_AN_REG_CL37_FC_LP		0xffe5
+
+#define MDIO_AN_REG_8073_2_5G		0x8329
+#define MDIO_AN_REG_8073_BAM		0x8350
+
+#define MDIO_AN_REG_8481_10GBASE_T_AN_CTRL	0x0020
+#define MDIO_AN_REG_8481_LEGACY_MII_CTRL	0xffe0
+#define MDIO_AN_REG_8481_LEGACY_MII_STATUS	0xffe1
+#define MDIO_AN_REG_8481_LEGACY_AN_ADV		0xffe4
+#define MDIO_AN_REG_8481_LEGACY_AN_EXPANSION	0xffe6
+#define MDIO_AN_REG_8481_1000T_CTRL		0xffe9
+#define MDIO_AN_REG_8481_EXPANSION_REG_RD_RW	0xfff5
+#define MDIO_AN_REG_8481_EXPANSION_REG_ACCESS	0xfff7
+#define MDIO_AN_REG_8481_AUX_CTRL		0xfff8
+#define MDIO_AN_REG_8481_LEGACY_SHADOW		0xfffc
+
+/* BCM84823 only */
+#define MDIO_CTL_DEVAD			0x1e
+#define MDIO_CTL_REG_84823_MEDIA		0x401a
+#define MDIO_CTL_REG_84823_MEDIA_MAC_MASK		0x0018
+	/* These pins configure the BCM84823 interface to MAC after reset. */
+#define MDIO_CTL_REG_84823_CTRL_MAC_XFI			0x0008
+#define MDIO_CTL_REG_84823_MEDIA_MAC_XAUI_M		0x0010
+	/* These pins configure the BCM84823 interface to Line after reset. */
+#define MDIO_CTL_REG_84823_MEDIA_LINE_MASK		0x0060
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XAUI_L		0x0020
+#define MDIO_CTL_REG_84823_MEDIA_LINE_XFI		0x0040
+	/* When this pin is active high during reset, 10GBASE-T core is power
+	 * down, When it is active low the 10GBASE-T is power up
+	 */
+#define MDIO_CTL_REG_84823_MEDIA_COPPER_CORE_DOWN	0x0080
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_MASK		0x0100
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER	0x0000
+#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER		0x0100
+#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G			0x1000
+#define MDIO_CTL_REG_84823_USER_CTRL_REG		0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS		0x0080
+
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1		0xa8e3
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN		0x0080
+
+/* BCM84833 only */
+#define MDIO_84833_TOP_CFG_XGPHY_STRAP1			0x401a
+#define MDIO_84833_SUPER_ISOLATE		0x8000
+/* These are mailbox register set used by 84833. */
+#define MDIO_84833_TOP_CFG_SCRATCH_REG0			0x4005
+#define MDIO_84833_TOP_CFG_SCRATCH_REG1			0x4006
+#define MDIO_84833_TOP_CFG_SCRATCH_REG2			0x4007
+#define MDIO_84833_TOP_CFG_SCRATCH_REG3			0x4008
+#define MDIO_84833_TOP_CFG_SCRATCH_REG4			0x4009
+#define MDIO_84833_TOP_CFG_DATA3_REG			0x4011
+#define MDIO_84833_TOP_CFG_DATA4_REG			0x4012
+
+/* Mailbox command set used by 84833. */
+#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE		0x2
+/* Mailbox status set used by 84833. */
+#define PHY84833_CMD_RECEIVED				0x0001
+#define PHY84833_CMD_IN_PROGRESS			0x0002
+#define PHY84833_CMD_COMPLETE_PASS			0x0004
+#define PHY84833_CMD_COMPLETE_ERROR			0x0008
+#define PHY84833_CMD_OPEN_FOR_CMDS			0x0010
+#define PHY84833_CMD_SYSTEM_BOOT			0x0020
+#define PHY84833_CMD_NOT_OPEN_FOR_CMDS			0x0040
+#define PHY84833_CMD_CLEAR_COMPLETE			0x0080
+#define PHY84833_CMD_OPEN_OVERRIDE			0xa5a5
+
+
+/* 84833 F/W Feature Commands */
+#define PHY84833_DIAG_CMD_GET_EEE_MODE			0x27
+#define PHY84833_DIAG_CMD_SET_EEE_MODE			0x28
+
+/* Warpcore clause 45 addressing */
+#define MDIO_WC_DEVAD					0x3
+#define MDIO_WC_REG_IEEE0BLK_MIICNTL			0x0
+#define MDIO_WC_REG_IEEE0BLK_AUTONEGNP			0x7
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT0	0x10
+#define MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1	0x11
+#define MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150  0x96
+#define MDIO_WC_REG_XGXSBLK0_XGXSCONTROL		0x8000
+#define MDIO_WC_REG_XGXSBLK0_MISCCONTROL1		0x800e
+#define MDIO_WC_REG_XGXSBLK1_DESKEW			0x8010
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL0			0x8015
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL1			0x8016
+#define MDIO_WC_REG_XGXSBLK1_LANECTRL2			0x8017
+#define MDIO_WC_REG_TX0_ANA_CTRL0			0x8061
+#define MDIO_WC_REG_TX1_ANA_CTRL0			0x8071
+#define MDIO_WC_REG_TX2_ANA_CTRL0			0x8081
+#define MDIO_WC_REG_TX3_ANA_CTRL0			0x8091
+#define MDIO_WC_REG_TX0_TX_DRIVER			0x8067
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_OFFSET		0x04
+#define MDIO_WC_REG_TX0_TX_DRIVER_IPRE_DRIVER_MASK			0x00f0
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_OFFSET		0x08
+#define MDIO_WC_REG_TX0_TX_DRIVER_IDRIVER_MASK				0x0f00
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_OFFSET		0x0c
+#define MDIO_WC_REG_TX0_TX_DRIVER_POST2_COEFF_MASK			0x7000
+#define MDIO_WC_REG_TX1_TX_DRIVER			0x8077
+#define MDIO_WC_REG_TX2_TX_DRIVER			0x8087
+#define MDIO_WC_REG_TX3_TX_DRIVER			0x8097
+#define MDIO_WC_REG_RX0_ANARXCONTROL1G			0x80b9
+#define MDIO_WC_REG_RX2_ANARXCONTROL1G			0x80d9
+#define MDIO_WC_REG_RX0_PCI_CTRL			0x80ba
+#define MDIO_WC_REG_RX1_PCI_CTRL			0x80ca
+#define MDIO_WC_REG_RX2_PCI_CTRL			0x80da
+#define MDIO_WC_REG_RX3_PCI_CTRL			0x80ea
+#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G		0x8104
+#define MDIO_WC_REG_XGXS_STATUS3			0x8129
+#define MDIO_WC_REG_PAR_DET_10G_STATUS			0x8130
+#define MDIO_WC_REG_PAR_DET_10G_CTRL			0x8131
+#define MDIO_WC_REG_XGXS_X2_CONTROL2			0x8141
+#define MDIO_WC_REG_XGXS_RX_LN_SWAP1			0x816B
+#define MDIO_WC_REG_XGXS_TX_LN_SWAP1			0x8169
+#define MDIO_WC_REG_GP2_STATUS_GP_2_0			0x81d0
+#define MDIO_WC_REG_GP2_STATUS_GP_2_1			0x81d1
+#define MDIO_WC_REG_GP2_STATUS_GP_2_2			0x81d2
+#define MDIO_WC_REG_GP2_STATUS_GP_2_3			0x81d3
+#define MDIO_WC_REG_GP2_STATUS_GP_2_4			0x81d4
+#define MDIO_WC_REG_UC_INFO_B0_DEAD_TRAP		0x81EE
+#define MDIO_WC_REG_UC_INFO_B1_VERSION			0x81F0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE		0x81F2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE0_OFFSET	0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_DEFAULT	    0x0
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_OPT_LR	    0x1
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_DAC	    0x2
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_SFP_XLAUI	    0x3
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_MODE_LONG_CH_6G	    0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE1_OFFSET	0x4
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE2_OFFSET	0x8
+#define MDIO_WC_REG_UC_INFO_B1_FIRMWARE_LANE3_OFFSET	0xc
+#define MDIO_WC_REG_UC_INFO_B1_CRC			0x81FE
+#define MDIO_WC_REG_DSC_SMC				0x8213
+#define MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0		0x821e
+#define MDIO_WC_REG_TX_FIR_TAP				0x82e2
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_OFFSET		0x00
+#define MDIO_WC_REG_TX_FIR_TAP_PRE_TAP_MASK			0x000f
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_OFFSET		0x04
+#define MDIO_WC_REG_TX_FIR_TAP_MAIN_TAP_MASK		0x03f0
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_OFFSET		0x0a
+#define MDIO_WC_REG_TX_FIR_TAP_POST_TAP_MASK		0x7c00
+#define MDIO_WC_REG_TX_FIR_TAP_ENABLE		0x8000
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL	0x82e3
+#define MDIO_WC_REG_CL72_USERB0_CL72_OS_DEF_CTRL	0x82e6
+#define MDIO_WC_REG_CL72_USERB0_CL72_BR_DEF_CTRL	0x82e7
+#define MDIO_WC_REG_CL72_USERB0_CL72_2P5_DEF_CTRL	0x82e8
+#define MDIO_WC_REG_CL72_USERB0_CL72_MISC4_CONTROL	0x82ec
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1		0x8300
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2		0x8301
+#define MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3		0x8302
+#define MDIO_WC_REG_SERDESDIGITAL_STATUS1000X1		0x8304
+#define MDIO_WC_REG_SERDESDIGITAL_MISC1			0x8308
+#define MDIO_WC_REG_SERDESDIGITAL_MISC2			0x8309
+#define MDIO_WC_REG_DIGITAL3_UP1			0x8329
+#define MDIO_WC_REG_DIGITAL4_MISC3			0x833c
+#define MDIO_WC_REG_DIGITAL5_MISC6			0x8345
+#define MDIO_WC_REG_DIGITAL5_MISC7			0x8349
+#define MDIO_WC_REG_DIGITAL5_ACTUAL_SPEED		0x834e
+#define MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL		0x8350
+#define MDIO_WC_REG_CL49_USERB0_CTRL			0x8368
+#define MDIO_WC_REG_TX66_CONTROL			0x83b0
+#define MDIO_WC_REG_RX66_CONTROL			0x83c0
+#define MDIO_WC_REG_RX66_SCW0				0x83c2
+#define MDIO_WC_REG_RX66_SCW1				0x83c3
+#define MDIO_WC_REG_RX66_SCW2				0x83c4
+#define MDIO_WC_REG_RX66_SCW3				0x83c5
+#define MDIO_WC_REG_RX66_SCW0_MASK			0x83c6
+#define MDIO_WC_REG_RX66_SCW1_MASK			0x83c7
+#define MDIO_WC_REG_RX66_SCW2_MASK			0x83c8
+#define MDIO_WC_REG_RX66_SCW3_MASK			0x83c9
+#define MDIO_WC_REG_FX100_CTRL1				0x8400
+#define MDIO_WC_REG_FX100_CTRL3				0x8402
+
+#define MDIO_WC_REG_MICROBLK_CMD			0xffc2
+#define MDIO_WC_REG_MICROBLK_DL_STATUS			0xffc5
+#define MDIO_WC_REG_MICROBLK_CMD3			0xffcc
+
+#define MDIO_WC_REG_AERBLK_AER				0xffde
+#define MDIO_WC_REG_COMBO_IEEE0_MIICTRL			0xffe0
+#define MDIO_WC_REG_COMBO_IEEE0_MIIISTAT		0xffe1
+
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET			0x810A
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_RX_BITSHIFT	0
+#define MDIO_WC0_XGXS_BLK2_LANE_RESET_TX_BITSHIFT	4
+
+#define MDIO_WC0_XGXS_BLK6_XGXS_X2_CONTROL2		0x8141
+
+#define DIGITAL5_ACTUAL_SPEED_TX_MASK			0x003f
+
+/* 54618se */
+#define MDIO_REG_GPHY_PHYID_LSB				0x3
+#define MDIO_REG_GPHY_ID_54618SE		0x5cd5
+#define MDIO_REG_GPHY_CL45_ADDR_REG			0xd
+#define MDIO_REG_GPHY_CL45_DATA_REG			0xe
+#define MDIO_REG_GPHY_EEE_ADV			0x3c
+#define MDIO_REG_GPHY_EEE_1G		(0x1 << 2)
+#define MDIO_REG_GPHY_EEE_100		(0x1 << 1)
+#define MDIO_REG_GPHY_EEE_RESOLVED		0x803e
+#define MDIO_REG_INTR_STATUS				0x1a
+#define MDIO_REG_INTR_MASK				0x1b
+#define MDIO_REG_INTR_MASK_LINK_STATUS			(0x1 << 1)
+#define MDIO_REG_GPHY_SHADOW				0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL2			(0x0e << 10)
+#define MDIO_REG_GPHY_SHADOW_WR_ENA			(0x1 << 15)
+#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED		(0x1e << 10)
+#define MDIO_REG_GPHY_SHADOW_INVERT_FIB_SD		(0x1 << 8)
+
+#define IGU_FUNC_BASE			0x0400
+
+#define IGU_ADDR_MSIX			0x0000
+#define IGU_ADDR_INT_ACK		0x0200
+#define IGU_ADDR_PROD_UPD		0x0201
+#define IGU_ADDR_ATTN_BITS_UPD	0x0202
+#define IGU_ADDR_ATTN_BITS_SET	0x0203
+#define IGU_ADDR_ATTN_BITS_CLR	0x0204
+#define IGU_ADDR_COALESCE_NOW	0x0205
+#define IGU_ADDR_SIMD_MASK		0x0206
+#define IGU_ADDR_SIMD_NOMASK	0x0207
+#define IGU_ADDR_MSI_CTL		0x0210
+#define IGU_ADDR_MSI_ADDR_LO	0x0211
+#define IGU_ADDR_MSI_ADDR_HI	0x0212
+#define IGU_ADDR_MSI_DATA		0x0213
+
+#define IGU_USE_REGISTER_ustorm_type_0_sb_cleanup  0
+#define IGU_USE_REGISTER_ustorm_type_1_sb_cleanup  1
+#define IGU_USE_REGISTER_cstorm_type_0_sb_cleanup  2
+#define IGU_USE_REGISTER_cstorm_type_1_sb_cleanup  3
+
+#define COMMAND_REG_INT_ACK	    0x0
+#define COMMAND_REG_PROD_UPD	    0x4
+#define COMMAND_REG_ATTN_BITS_UPD   0x8
+#define COMMAND_REG_ATTN_BITS_SET   0xc
+#define COMMAND_REG_ATTN_BITS_CLR   0x10
+#define COMMAND_REG_COALESCE_NOW    0x14
+#define COMMAND_REG_SIMD_MASK	    0x18
+#define COMMAND_REG_SIMD_NOMASK     0x1c
+
+
+#define IGU_MEM_BASE						0x0000
+
+#define IGU_MEM_MSIX_BASE					0x0000
+#define IGU_MEM_MSIX_UPPER					0x007f
+#define IGU_MEM_MSIX_RESERVED_UPPER			0x01ff
+
+#define IGU_MEM_PBA_MSIX_BASE				0x0200
+#define IGU_MEM_PBA_MSIX_UPPER				0x0200
+
+#define IGU_CMD_BACKWARD_COMP_PROD_UPD		0x0201
+#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 	0x03ff
+
+#define IGU_CMD_INT_ACK_BASE				0x0400
+#define IGU_CMD_INT_ACK_UPPER\
+	(IGU_CMD_INT_ACK_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
+#define IGU_CMD_INT_ACK_RESERVED_UPPER		0x04ff
+
+#define IGU_CMD_E2_PROD_UPD_BASE			0x0500
+#define IGU_CMD_E2_PROD_UPD_UPPER\
+	(IGU_CMD_E2_PROD_UPD_BASE + MAX_SB_PER_PORT * NUM_OF_PORTS_PER_PATH - 1)
+#define IGU_CMD_E2_PROD_UPD_RESERVED_UPPER	0x059f
+
+#define IGU_CMD_ATTN_BIT_UPD_UPPER			0x05a0
+#define IGU_CMD_ATTN_BIT_SET_UPPER			0x05a1
+#define IGU_CMD_ATTN_BIT_CLR_UPPER			0x05a2
+
+#define IGU_REG_SISR_MDPC_WMASK_UPPER		0x05a3
+#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER	0x05a4
+#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER	0x05a5
+#define IGU_REG_SISR_MDPC_WOMASK_UPPER		0x05a6
+
+#define IGU_REG_RESERVED_UPPER				0x05ff
+/* Fields of IGU PF CONFIGRATION REGISTER */
+#define IGU_PF_CONF_FUNC_EN	  (0x1<<0)  /* function enable	      */
+#define IGU_PF_CONF_MSI_MSIX_EN   (0x1<<1)  /* MSI/MSIX enable	      */
+#define IGU_PF_CONF_INT_LINE_EN   (0x1<<2)  /* INT enable	      */
+#define IGU_PF_CONF_ATTN_BIT_EN   (0x1<<3)  /* attention enable       */
+#define IGU_PF_CONF_SINGLE_ISR_EN (0x1<<4)  /* single ISR mode enable */
+#define IGU_PF_CONF_SIMD_MODE	  (0x1<<5)  /* simd all ones mode     */
+
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN	   (0x1<<0)  /* function enable        */
+#define IGU_VF_CONF_MSI_MSIX_EN    (0x1<<1)  /* MSI/MSIX enable        */
+#define IGU_VF_CONF_PARENT_MASK    (0x3<<2)  /* Parent PF	       */
+#define IGU_VF_CONF_PARENT_SHIFT   2	     /* Parent PF	       */
+#define IGU_VF_CONF_SINGLE_ISR_EN  (0x1<<4)  /* single ISR mode enable */
+
+
+#define IGU_BC_DSB_NUM_SEGS    5
+#define IGU_BC_NDSB_NUM_SEGS   2
+#define IGU_NORM_DSB_NUM_SEGS  2
+#define IGU_NORM_NDSB_NUM_SEGS 1
+#define IGU_BC_BASE_DSB_PROD   128
+#define IGU_NORM_BASE_DSB_PROD 136
+
+	/* FID (if VF - [6] = 0; [5:0] = VF number; if PF - [6] = 1; \
+	[5:2] = 0; [1:0] = PF number) */
+#define IGU_FID_ENCODE_IS_PF	    (0x1<<6)
+#define IGU_FID_ENCODE_IS_PF_SHIFT  6
+#define IGU_FID_VF_NUM_MASK	    (0x3f)
+#define IGU_FID_PF_NUM_MASK	    (0x7)
+
+#define IGU_REG_MAPPING_MEMORY_VALID		(1<<0)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_MASK	(0x3F<<1)
+#define IGU_REG_MAPPING_MEMORY_VECTOR_SHIFT	1
+#define IGU_REG_MAPPING_MEMORY_FID_MASK	(0x7F<<7)
+#define IGU_REG_MAPPING_MEMORY_FID_SHIFT	7
+
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+
+/**
+ * String-to-compress [31:8] = CID (all 24 bits)
+ * String-to-compress [7:4] = Region
+ * String-to-compress [3:0] = Type
+ */
+#define CDU_VALID_DATA(_cid, _region, _type)\
+	(((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
+#define CDU_CRC8(_cid, _region, _type)\
+	(calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
+#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type)\
+	(0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
+#define CDU_RSRVD_VALUE_TYPE_B(_crc, _type)\
+	(0x80 | ((_type)&0xf << 3) | ((CDU_CRC8(_cid, _region, _type)) & 0x7))
+#define CDU_RSRVD_INVALIDATE_CONTEXT_VALUE(_val) ((_val) & ~0x80)
+
+/******************************************************************************
+ * Description:
+ *	   Calculates crc 8 on a word value: polynomial 0-1-2-8
+ *	   Code was translated from Verilog.
+ * Return:
+ *****************************************************************************/
+static inline u8 calc_crc8(u32 data, u8 crc)
+{
+	u8 D[32];
+	u8 NewCRC[8];
+	u8 C[8];
+	u8 crc_res;
+	u8 i;
+
+	/* split the data into 31 bits */
+	for (i = 0; i < 32; i++) {
+		D[i] = (u8)(data & 1);
+		data = data >> 1;
+	}
+
+	/* split the crc into 8 bits */
+	for (i = 0; i < 8; i++) {
+		C[i] = crc & 1;
+		crc = crc >> 1;
+	}
+
+	NewCRC[0] = D[31] ^ D[30] ^ D[28] ^ D[23] ^ D[21] ^ D[19] ^ D[18] ^
+		    D[16] ^ D[14] ^ D[12] ^ D[8] ^ D[7] ^ D[6] ^ D[0] ^ C[4] ^
+		    C[6] ^ C[7];
+	NewCRC[1] = D[30] ^ D[29] ^ D[28] ^ D[24] ^ D[23] ^ D[22] ^ D[21] ^
+		    D[20] ^ D[18] ^ D[17] ^ D[16] ^ D[15] ^ D[14] ^ D[13] ^
+		    D[12] ^ D[9] ^ D[6] ^ D[1] ^ D[0] ^ C[0] ^ C[4] ^ C[5] ^
+		    C[6];
+	NewCRC[2] = D[29] ^ D[28] ^ D[25] ^ D[24] ^ D[22] ^ D[17] ^ D[15] ^
+		    D[13] ^ D[12] ^ D[10] ^ D[8] ^ D[6] ^ D[2] ^ D[1] ^ D[0] ^
+		    C[0] ^ C[1] ^ C[4] ^ C[5];
+	NewCRC[3] = D[30] ^ D[29] ^ D[26] ^ D[25] ^ D[23] ^ D[18] ^ D[16] ^
+		    D[14] ^ D[13] ^ D[11] ^ D[9] ^ D[7] ^ D[3] ^ D[2] ^ D[1] ^
+		    C[1] ^ C[2] ^ C[5] ^ C[6];
+	NewCRC[4] = D[31] ^ D[30] ^ D[27] ^ D[26] ^ D[24] ^ D[19] ^ D[17] ^
+		    D[15] ^ D[14] ^ D[12] ^ D[10] ^ D[8] ^ D[4] ^ D[3] ^ D[2] ^
+		    C[0] ^ C[2] ^ C[3] ^ C[6] ^ C[7];
+	NewCRC[5] = D[31] ^ D[28] ^ D[27] ^ D[25] ^ D[20] ^ D[18] ^ D[16] ^
+		    D[15] ^ D[13] ^ D[11] ^ D[9] ^ D[5] ^ D[4] ^ D[3] ^ C[1] ^
+		    C[3] ^ C[4] ^ C[7];
+	NewCRC[6] = D[29] ^ D[28] ^ D[26] ^ D[21] ^ D[19] ^ D[17] ^ D[16] ^
+		    D[14] ^ D[12] ^ D[10] ^ D[6] ^ D[5] ^ D[4] ^ C[2] ^ C[4] ^
+		    C[5];
+	NewCRC[7] = D[30] ^ D[29] ^ D[27] ^ D[22] ^ D[20] ^ D[18] ^ D[17] ^
+		    D[15] ^ D[13] ^ D[11] ^ D[7] ^ D[6] ^ D[5] ^ C[3] ^ C[5] ^
+		    C[6];
+
+	crc_res = 0;
+	for (i = 0; i < 8; i++)
+		crc_res |= (NewCRC[i] << i);
+
+	return crc_res;
+}
+
+
+#endif /* BNX2X_REG_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
new file mode 100644
index 0000000..df52f11
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -0,0 +1,5692 @@
+/* bnx2x_sp.c: Broadcom Everest network driver.
+ *
+ * Copyright 2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov
+ *
+ */
+#include <linux/module.h>
+#include <linux/crc32.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/crc32c.h>
+#include "bnx2x.h"
+#include "bnx2x_cmn.h"
+#include "bnx2x_sp.h"
+
+#define BNX2X_MAX_EMUL_MULTI		16
+
+/**** Exe Queue interfaces ****/
+
+/**
+ * bnx2x_exe_queue_init - init the Exe Queue object
+ *
+ * @o:		poiter to the object
+ * @exe_len:	length
+ * @owner:	poiter to the owner
+ * @validate:	validate function pointer
+ * @optimize:	optimize function pointer
+ * @exec:	execute function pointer
+ * @get:	get function pointer
+ */
+static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
+					struct bnx2x_exe_queue_obj *o,
+					int exe_len,
+					union bnx2x_qable_obj *owner,
+					exe_q_validate validate,
+					exe_q_optimize optimize,
+					exe_q_execute exec,
+					exe_q_get get)
+{
+	memset(o, 0, sizeof(*o));
+
+	INIT_LIST_HEAD(&o->exe_queue);
+	INIT_LIST_HEAD(&o->pending_comp);
+
+	spin_lock_init(&o->lock);
+
+	o->exe_chunk_len = exe_len;
+	o->owner         = owner;
+
+	/* Owner specific callbacks */
+	o->validate      = validate;
+	o->optimize      = optimize;
+	o->execute       = exec;
+	o->get           = get;
+
+	DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
+			 "length of %d\n", exe_len);
+}
+
+static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
+					     struct bnx2x_exeq_elem *elem)
+{
+	DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
+	kfree(elem);
+}
+
+static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
+{
+	struct bnx2x_exeq_elem *elem;
+	int cnt = 0;
+
+	spin_lock_bh(&o->lock);
+
+	list_for_each_entry(elem, &o->exe_queue, link)
+		cnt++;
+
+	spin_unlock_bh(&o->lock);
+
+	return cnt;
+}
+
+/**
+ * bnx2x_exe_queue_add - add a new element to the execution queue
+ *
+ * @bp:		driver handle
+ * @o:		queue
+ * @cmd:	new command to add
+ * @restore:	true - do not optimize the command
+ *
+ * If the element is optimized or is illegal, frees it.
+ */
+static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
+				      struct bnx2x_exe_queue_obj *o,
+				      struct bnx2x_exeq_elem *elem,
+				      bool restore)
+{
+	int rc;
+
+	spin_lock_bh(&o->lock);
+
+	if (!restore) {
+		/* Try to cancel this element queue */
+		rc = o->optimize(bp, o->owner, elem);
+		if (rc)
+			goto free_and_exit;
+
+		/* Check if this request is ok */
+		rc = o->validate(bp, o->owner, elem);
+		if (rc) {
+			BNX2X_ERR("Preamble failed: %d\n", rc);
+			goto free_and_exit;
+		}
+	}
+
+	/* If so, add it to the execution queue */
+	list_add_tail(&elem->link, &o->exe_queue);
+
+	spin_unlock_bh(&o->lock);
+
+	return 0;
+
+free_and_exit:
+	bnx2x_exe_queue_free_elem(bp, elem);
+
+	spin_unlock_bh(&o->lock);
+
+	return rc;
+
+}
+
+static inline void __bnx2x_exe_queue_reset_pending(
+	struct bnx2x *bp,
+	struct bnx2x_exe_queue_obj *o)
+{
+	struct bnx2x_exeq_elem *elem;
+
+	while (!list_empty(&o->pending_comp)) {
+		elem = list_first_entry(&o->pending_comp,
+					struct bnx2x_exeq_elem, link);
+
+		list_del(&elem->link);
+		bnx2x_exe_queue_free_elem(bp, elem);
+	}
+}
+
+static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
+						 struct bnx2x_exe_queue_obj *o)
+{
+
+	spin_lock_bh(&o->lock);
+
+	__bnx2x_exe_queue_reset_pending(bp, o);
+
+	spin_unlock_bh(&o->lock);
+
+}
+
+/**
+ * bnx2x_exe_queue_step - execute one execution chunk atomically
+ *
+ * @bp:			driver handle
+ * @o:			queue
+ * @ramrod_flags:	flags
+ *
+ * (Atomicy is ensured using the exe_queue->lock).
+ */
+static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
+				       struct bnx2x_exe_queue_obj *o,
+				       unsigned long *ramrod_flags)
+{
+	struct bnx2x_exeq_elem *elem, spacer;
+	int cur_len = 0, rc;
+
+	memset(&spacer, 0, sizeof(spacer));
+
+	spin_lock_bh(&o->lock);
+
+	/*
+	 * Next step should not be performed until the current is finished,
+	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
+	 * properly clear object internals without sending any command to the FW
+	 * which also implies there won't be any completion to clear the
+	 * 'pending' list.
+	 */
+	if (!list_empty(&o->pending_comp)) {
+		if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
+			DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
+					 "resetting pending_comp\n");
+			__bnx2x_exe_queue_reset_pending(bp, o);
+		} else {
+			spin_unlock_bh(&o->lock);
+			return 1;
+		}
+	}
+
+	/*
+	 * Run through the pending commands list and create a next
+	 * execution chunk.
+	 */
+	while (!list_empty(&o->exe_queue)) {
+		elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
+					link);
+		WARN_ON(!elem->cmd_len);
+
+		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
+			cur_len += elem->cmd_len;
+			/*
+			 * Prevent from both lists being empty when moving an
+			 * element. This will allow the call of
+			 * bnx2x_exe_queue_empty() without locking.
+			 */
+			list_add_tail(&spacer.link, &o->pending_comp);
+			mb();
+			list_del(&elem->link);
+			list_add_tail(&elem->link, &o->pending_comp);
+			list_del(&spacer.link);
+		} else
+			break;
+	}
+
+	/* Sanity check */
+	if (!cur_len) {
+		spin_unlock_bh(&o->lock);
+		return 0;
+	}
+
+	rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
+	if (rc < 0)
+		/*
+		 *  In case of an error return the commands back to the queue
+		 *  and reset the pending_comp.
+		 */
+		list_splice_init(&o->pending_comp, &o->exe_queue);
+	else if (!rc)
+		/*
+		 * If zero is returned, means there are no outstanding pending
+		 * completions and we may dismiss the pending list.
+		 */
+		__bnx2x_exe_queue_reset_pending(bp, o);
+
+	spin_unlock_bh(&o->lock);
+	return rc;
+}
+
+static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
+{
+	bool empty = list_empty(&o->exe_queue);
+
+	/* Don't reorder!!! */
+	mb();
+
+	return empty && list_empty(&o->pending_comp);
+}
+
+static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
+	struct bnx2x *bp)
+{
+	DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
+	return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
+}
+
+/************************ raw_obj functions ***********************************/
+static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
+{
+	return !!test_bit(o->state, o->pstate);
+}
+
+static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
+{
+	smp_mb__before_clear_bit();
+	clear_bit(o->state, o->pstate);
+	smp_mb__after_clear_bit();
+}
+
+static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
+{
+	smp_mb__before_clear_bit();
+	set_bit(o->state, o->pstate);
+	smp_mb__after_clear_bit();
+}
+
+/**
+ * bnx2x_state_wait - wait until the given bit(state) is cleared
+ *
+ * @bp:		device handle
+ * @state:	state which is to be cleared
+ * @state_p:	state buffer
+ *
+ */
+static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
+				   unsigned long *pstate)
+{
+	/* can take a while if any port is running */
+	int cnt = 5000;
+
+
+	if (CHIP_REV_IS_EMUL(bp))
+		cnt *= 20;
+
+	DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
+
+	might_sleep();
+	while (cnt--) {
+		if (!test_bit(state, pstate)) {
+#ifdef BNX2X_STOP_ON_ERROR
+			DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
+#endif
+			return 0;
+		}
+
+		usleep_range(1000, 1000);
+
+		if (bp->panic)
+			return -EIO;
+	}
+
+	/* timeout! */
+	BNX2X_ERR("timeout waiting for state %d\n", state);
+#ifdef BNX2X_STOP_ON_ERROR
+	bnx2x_panic();
+#endif
+
+	return -EBUSY;
+}
+
+static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
+{
+	return bnx2x_state_wait(bp, raw->state, raw->pstate);
+}
+
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/* credit handling callbacks */
+static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+	WARN_ON(!mp);
+
+	return mp->get_entry(mp, offset);
+}
+
+static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+	WARN_ON(!mp);
+
+	return mp->get(mp, 1);
+}
+
+static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
+{
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	WARN_ON(!vp);
+
+	return vp->get_entry(vp, offset);
+}
+
+static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	WARN_ON(!vp);
+
+	return vp->get(vp, 1);
+}
+
+static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	if (!mp->get(mp, 1))
+		return false;
+
+	if (!vp->get(vp, 1)) {
+		mp->put(mp, 1);
+		return false;
+	}
+
+	return true;
+}
+
+static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+	return mp->put_entry(mp, offset);
+}
+
+static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+
+	return mp->put(mp, 1);
+}
+
+static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
+{
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	return vp->put_entry(vp, offset);
+}
+
+static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	return vp->put(vp, 1);
+}
+
+static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
+	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
+
+	if (!mp->put(mp, 1))
+		return false;
+
+	if (!vp->put(vp, 1)) {
+		mp->get(mp, 1);
+		return false;
+	}
+
+	return true;
+}
+
+/* check_add() callbacks */
+static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
+			       union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	if (!is_valid_ether_addr(data->mac.mac))
+		return -EINVAL;
+
+	/* Check if a requested MAC already exists */
+	list_for_each_entry(pos, &o->head, link)
+		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+			return -EEXIST;
+
+	return 0;
+}
+
+static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
+				union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	list_for_each_entry(pos, &o->head, link)
+		if (data->vlan.vlan == pos->u.vlan.vlan)
+			return -EEXIST;
+
+	return 0;
+}
+
+static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
+				   union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	list_for_each_entry(pos, &o->head, link)
+		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+			     ETH_ALEN)))
+			return -EEXIST;
+
+	return 0;
+}
+
+
+/* check_del() callbacks */
+static struct bnx2x_vlan_mac_registry_elem *
+	bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
+			    union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	list_for_each_entry(pos, &o->head, link)
+		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
+			return pos;
+
+	return NULL;
+}
+
+static struct bnx2x_vlan_mac_registry_elem *
+	bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
+			     union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	list_for_each_entry(pos, &o->head, link)
+		if (data->vlan.vlan == pos->u.vlan.vlan)
+			return pos;
+
+	return NULL;
+}
+
+static struct bnx2x_vlan_mac_registry_elem *
+	bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
+				 union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+
+	list_for_each_entry(pos, &o->head, link)
+		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
+		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
+			     ETH_ALEN)))
+			return pos;
+
+	return NULL;
+}
+
+/* check_move() callback */
+static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
+			     struct bnx2x_vlan_mac_obj *dst_o,
+			     union bnx2x_classification_ramrod_data *data)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+	int rc;
+
+	/* Check if we can delete the requested configuration from the first
+	 * object.
+	 */
+	pos = src_o->check_del(src_o, data);
+
+	/*  check if configuration can be added */
+	rc = dst_o->check_add(dst_o, data);
+
+	/* If this classification can not be added (is already set)
+	 * or can't be deleted - return an error.
+	 */
+	if (rc || !pos)
+		return false;
+
+	return true;
+}
+
+static bool bnx2x_check_move_always_err(
+	struct bnx2x_vlan_mac_obj *src_o,
+	struct bnx2x_vlan_mac_obj *dst_o,
+	union bnx2x_classification_ramrod_data *data)
+{
+	return false;
+}
+
+
+static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	u8 rx_tx_flag = 0;
+
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
+
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
+
+	return rx_tx_flag;
+}
+
+/* LLH CAM line allocations */
+enum {
+	LLH_CAM_ISCSI_ETH_LINE = 0,
+	LLH_CAM_ETH_LINE,
+	LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
+};
+
+static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+				 bool add, unsigned char *dev_addr, int index)
+{
+	u32 wb_data[2];
+	u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
+			 NIG_REG_LLH0_FUNC_MEM;
+
+	if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
+		return;
+
+	DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
+			 (add ? "ADD" : "DELETE"), index);
+
+	if (add) {
+		/* LLH_FUNC_MEM is a u64 WB register */
+		reg_offset += 8*index;
+
+		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
+			      (dev_addr[4] <<  8) |  dev_addr[5]);
+		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
+
+		REG_WR_DMAE(bp, reg_offset, wb_data, 2);
+	}
+
+	REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
+				  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
+}
+
+/**
+ * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
+ *
+ * @bp:		device handle
+ * @o:		queue for which we want to configure this rule
+ * @add:	if true the command is an ADD command, DEL otherwise
+ * @opcode:	CLASSIFY_RULE_OPCODE_XXX
+ * @hdr:	pointer to a header to setup
+ *
+ */
+static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
+	struct eth_classify_cmd_header *hdr)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+
+	hdr->client_id = raw->cl_id;
+	hdr->func_id = raw->func_id;
+
+	/* Rx or/and Tx (internal switching) configuration ? */
+	hdr->cmd_general_data |=
+		bnx2x_vlan_mac_get_rx_tx_flag(o);
+
+	if (add)
+		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
+
+	hdr->cmd_general_data |=
+		(opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
+}
+
+/**
+ * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
+ *
+ * @cid:	connection id
+ * @type:	BNX2X_FILTER_XXX_PENDING
+ * @hdr:	poiter to header to setup
+ * @rule_cnt:
+ *
+ * currently we always configure one rule and echo field to contain a CID and an
+ * opcode type.
+ */
+static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
+				struct eth_classify_header *hdr, int rule_cnt)
+{
+	hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
+	hdr->rule_cnt = (u8)rule_cnt;
+}
+
+
+/* hw_config() callbacks */
+static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
+				 struct bnx2x_vlan_mac_obj *o,
+				 struct bnx2x_exeq_elem *elem, int rule_idx,
+				 int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct eth_classify_rules_ramrod_data *data =
+		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
+	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
+	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+	unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
+	u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
+
+	/*
+	 * Set LLH CAM entry: currently only iSCSI and ETH macs are
+	 * relevant. In addition, current implementation is tuned for a
+	 * single ETH MAC.
+	 *
+	 * When multiple unicast ETH MACs PF configuration in switch
+	 * independent mode is required (NetQ, multiple netdev MACs,
+	 * etc.), consider better utilisation of 8 per function MAC
+	 * entries in the LLH register. There is also
+	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
+	 * total number of CAM entries to 16.
+	 *
+	 * Currently we won't configure NIG for MACs other than a primary ETH
+	 * MAC and iSCSI L2 MAC.
+	 *
+	 * If this MAC is moving from one Queue to another, no need to change
+	 * NIG configuration.
+	 */
+	if (cmd != BNX2X_VLAN_MAC_MOVE) {
+		if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
+			bnx2x_set_mac_in_nig(bp, add, mac,
+					     LLH_CAM_ISCSI_ETH_LINE);
+		else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
+			bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
+	}
+
+	/* Reset the ramrod data buffer for the first rule */
+	if (rule_idx == 0)
+		memset(data, 0, sizeof(*data));
+
+	/* Setup a command header */
+	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
+				      &rule_entry->mac.header);
+
+	DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for "
+			 "Queue %d\n", (add ? "add" : "delete"),
+			 BNX2X_MAC_PRN_LIST(mac), raw->cl_id);
+
+	/* Set a MAC itself */
+	bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+			      &rule_entry->mac.mac_mid,
+			      &rule_entry->mac.mac_lsb, mac);
+
+	/* MOVE: Add a rule that will add this MAC to the target Queue */
+	if (cmd == BNX2X_VLAN_MAC_MOVE) {
+		rule_entry++;
+		rule_cnt++;
+
+		/* Setup ramrod data */
+		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+					elem->cmd_data.vlan_mac.target_obj,
+					      true, CLASSIFY_RULE_OPCODE_MAC,
+					      &rule_entry->mac.header);
+
+		/* Set a MAC itself */
+		bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
+				      &rule_entry->mac.mac_mid,
+				      &rule_entry->mac.mac_lsb, mac);
+	}
+
+	/* Set the ramrod data header */
+	/* TODO: take this to the higher level in order to prevent multiple
+		 writing */
+	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+					rule_cnt);
+}
+
+/**
+ * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
+ *
+ * @bp:		device handle
+ * @o:		queue
+ * @type:
+ * @cam_offset:	offset in cam memory
+ * @hdr:	pointer to a header to setup
+ *
+ * E1/E1H
+ */
+static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
+	struct mac_configuration_hdr *hdr)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+
+	hdr->length = 1;
+	hdr->offset = (u8)cam_offset;
+	hdr->client_id = 0xff;
+	hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
+}
+
+static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
+	u16 vlan_id, struct mac_configuration_entry *cfg_entry)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	u32 cl_bit_vec = (1 << r->cl_id);
+
+	cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
+	cfg_entry->pf_id = r->func_id;
+	cfg_entry->vlan_id = cpu_to_le16(vlan_id);
+
+	if (add) {
+		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_SET);
+		SET_FLAG(cfg_entry->flags,
+			 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
+
+		/* Set a MAC in a ramrod data */
+		bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
+				      &cfg_entry->middle_mac_addr,
+				      &cfg_entry->lsb_mac_addr, mac);
+	} else
+		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_INVALIDATE);
+}
+
+static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
+	u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
+{
+	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
+	struct bnx2x_raw_obj *raw = &o->raw;
+
+	bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
+					 &config->hdr);
+	bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
+					 cfg_entry);
+
+	DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n",
+			 (add ? "setting" : "clearing"),
+			 BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset);
+}
+
+/**
+ * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ * @elem:	bnx2x_exeq_elem
+ * @rule_idx:	rule_idx
+ * @cam_offset: cam_offset
+ */
+static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
+				  struct bnx2x_vlan_mac_obj *o,
+				  struct bnx2x_exeq_elem *elem, int rule_idx,
+				  int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct mac_configuration_cmd *config =
+		(struct mac_configuration_cmd *)(raw->rdata);
+	/*
+	 * 57710 and 57711 do not support MOVE command,
+	 * so it's either ADD or DEL
+	 */
+	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+		true : false;
+
+	/* Reset the ramrod data buffer */
+	memset(config, 0, sizeof(*config));
+
+	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
+				     cam_offset, add,
+				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
+				     ETH_VLAN_FILTER_ANY_VLAN, config);
+}
+
+static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
+				  struct bnx2x_vlan_mac_obj *o,
+				  struct bnx2x_exeq_elem *elem, int rule_idx,
+				  int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct eth_classify_rules_ramrod_data *data =
+		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
+	int rule_cnt = rule_idx + 1;
+	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+	int cmd = elem->cmd_data.vlan_mac.cmd;
+	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+	u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
+
+	/* Reset the ramrod data buffer for the first rule */
+	if (rule_idx == 0)
+		memset(data, 0, sizeof(*data));
+
+	/* Set a rule header */
+	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
+				      &rule_entry->vlan.header);
+
+	DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
+			 vlan);
+
+	/* Set a VLAN itself */
+	rule_entry->vlan.vlan = cpu_to_le16(vlan);
+
+	/* MOVE: Add a rule that will add this MAC to the target Queue */
+	if (cmd == BNX2X_VLAN_MAC_MOVE) {
+		rule_entry++;
+		rule_cnt++;
+
+		/* Setup ramrod data */
+		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+					elem->cmd_data.vlan_mac.target_obj,
+					      true, CLASSIFY_RULE_OPCODE_VLAN,
+					      &rule_entry->vlan.header);
+
+		/* Set a VLAN itself */
+		rule_entry->vlan.vlan = cpu_to_le16(vlan);
+	}
+
+	/* Set the ramrod data header */
+	/* TODO: take this to the higher level in order to prevent multiple
+		 writing */
+	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+					rule_cnt);
+}
+
+static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
+				      struct bnx2x_vlan_mac_obj *o,
+				      struct bnx2x_exeq_elem *elem,
+				      int rule_idx, int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct eth_classify_rules_ramrod_data *data =
+		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
+	int rule_cnt = rule_idx + 1;
+	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
+	int cmd = elem->cmd_data.vlan_mac.cmd;
+	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
+	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
+	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
+
+
+	/* Reset the ramrod data buffer for the first rule */
+	if (rule_idx == 0)
+		memset(data, 0, sizeof(*data));
+
+	/* Set a rule header */
+	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
+				      &rule_entry->pair.header);
+
+	/* Set VLAN and MAC themselvs */
+	rule_entry->pair.vlan = cpu_to_le16(vlan);
+	bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+			      &rule_entry->pair.mac_mid,
+			      &rule_entry->pair.mac_lsb, mac);
+
+	/* MOVE: Add a rule that will add this MAC to the target Queue */
+	if (cmd == BNX2X_VLAN_MAC_MOVE) {
+		rule_entry++;
+		rule_cnt++;
+
+		/* Setup ramrod data */
+		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
+					elem->cmd_data.vlan_mac.target_obj,
+					      true, CLASSIFY_RULE_OPCODE_PAIR,
+					      &rule_entry->pair.header);
+
+		/* Set a VLAN itself */
+		rule_entry->pair.vlan = cpu_to_le16(vlan);
+		bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
+				      &rule_entry->pair.mac_mid,
+				      &rule_entry->pair.mac_lsb, mac);
+	}
+
+	/* Set the ramrod data header */
+	/* TODO: take this to the higher level in order to prevent multiple
+		 writing */
+	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
+					rule_cnt);
+}
+
+/**
+ * bnx2x_set_one_vlan_mac_e1h -
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ * @elem:	bnx2x_exeq_elem
+ * @rule_idx:	rule_idx
+ * @cam_offset:	cam_offset
+ */
+static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
+				       struct bnx2x_vlan_mac_obj *o,
+				       struct bnx2x_exeq_elem *elem,
+				       int rule_idx, int cam_offset)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct mac_configuration_cmd *config =
+		(struct mac_configuration_cmd *)(raw->rdata);
+	/*
+	 * 57710 and 57711 do not support MOVE command,
+	 * so it's either ADD or DEL
+	 */
+	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+		true : false;
+
+	/* Reset the ramrod data buffer */
+	memset(config, 0, sizeof(*config));
+
+	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
+				     cam_offset, add,
+				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
+				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
+				     ETH_VLAN_FILTER_CLASSIFY, config);
+}
+
+#define list_next_entry(pos, member) \
+	list_entry((pos)->member.next, typeof(*(pos)), member)
+
+/**
+ * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
+ *
+ * @bp:		device handle
+ * @p:		command parameters
+ * @ppos:	pointer to the cooky
+ *
+ * reconfigure next MAC/VLAN/VLAN-MAC element from the
+ * previously configured elements list.
+ *
+ * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
+ * into an account
+ *
+ * pointer to the cooky  - that should be given back in the next call to make
+ * function handle the next element. If *ppos is set to NULL it will restart the
+ * iterator. If returned *ppos == NULL this means that the last element has been
+ * handled.
+ *
+ */
+static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
+			   struct bnx2x_vlan_mac_ramrod_params *p,
+			   struct bnx2x_vlan_mac_registry_elem **ppos)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos;
+	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+
+	/* If list is empty - there is nothing to do here */
+	if (list_empty(&o->head)) {
+		*ppos = NULL;
+		return 0;
+	}
+
+	/* make a step... */
+	if (*ppos == NULL)
+		*ppos = list_first_entry(&o->head,
+					 struct bnx2x_vlan_mac_registry_elem,
+					 link);
+	else
+		*ppos = list_next_entry(*ppos, link);
+
+	pos = *ppos;
+
+	/* If it's the last step - return NULL */
+	if (list_is_last(&pos->link, &o->head))
+		*ppos = NULL;
+
+	/* Prepare a 'user_req' */
+	memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
+
+	/* Set the command */
+	p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
+
+	/* Set vlan_mac_flags */
+	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
+
+	/* Set a restore bit */
+	__set_bit(RAMROD_RESTORE, &p->ramrod_flags);
+
+	return bnx2x_config_vlan_mac(bp, p);
+}
+
+/*
+ * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
+ * pointer to an element with a specific criteria and NULL if such an element
+ * hasn't been found.
+ */
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
+	struct bnx2x_exe_queue_obj *o,
+	struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem *pos;
+	struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
+
+	/* Check pending for execution commands */
+	list_for_each_entry(pos, &o->exe_queue, link)
+		if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
+			      sizeof(*data)) &&
+		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+			return pos;
+
+	return NULL;
+}
+
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
+	struct bnx2x_exe_queue_obj *o,
+	struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem *pos;
+	struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
+
+	/* Check pending for execution commands */
+	list_for_each_entry(pos, &o->exe_queue, link)
+		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
+			      sizeof(*data)) &&
+		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+			return pos;
+
+	return NULL;
+}
+
+static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
+	struct bnx2x_exe_queue_obj *o,
+	struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem *pos;
+	struct bnx2x_vlan_mac_ramrod_data *data =
+		&elem->cmd_data.vlan_mac.u.vlan_mac;
+
+	/* Check pending for execution commands */
+	list_for_each_entry(pos, &o->exe_queue, link)
+		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
+			      sizeof(*data)) &&
+		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
+			return pos;
+
+	return NULL;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
+ *
+ * @bp:		device handle
+ * @qo:		bnx2x_qable_obj
+ * @elem:	bnx2x_exeq_elem
+ *
+ * Checks that the requested configuration can be added. If yes and if
+ * requested, consume CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ *
+ */
+static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
+					      union bnx2x_qable_obj *qo,
+					      struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	int rc;
+
+	/* Check the registry */
+	rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
+	if (rc) {
+		DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
+				 "current registry state\n");
+		return rc;
+	}
+
+	/*
+	 * Check if there is a pending ADD command for this
+	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
+	 */
+	if (exeq->get(exeq, elem)) {
+		DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
+		return -EEXIST;
+	}
+
+	/*
+	 * TODO: Check the pending MOVE from other objects where this
+	 * object is a destination object.
+	 */
+
+	/* Consume the credit if not requested not to */
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    o->get_credit(o)))
+		return -EINVAL;
+
+	return 0;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
+ *
+ * @bp:		device handle
+ * @qo:		quable object to check
+ * @elem:	element that needs to be deleted
+ *
+ * Checks that the requested configuration can be deleted. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
+					      union bnx2x_qable_obj *qo,
+					      struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+	struct bnx2x_vlan_mac_registry_elem *pos;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	struct bnx2x_exeq_elem query_elem;
+
+	/* If this classification can not be deleted (doesn't exist)
+	 * - return a BNX2X_EXIST.
+	 */
+	pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+	if (!pos) {
+		DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
+				 "current registry state\n");
+		return -EEXIST;
+	}
+
+	/*
+	 * Check if there are pending DEL or MOVE commands for this
+	 * MAC/VLAN/VLAN-MAC. Return an error if so.
+	 */
+	memcpy(&query_elem, elem, sizeof(query_elem));
+
+	/* Check for MOVE commands */
+	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
+	if (exeq->get(exeq, &query_elem)) {
+		BNX2X_ERR("There is a pending MOVE command already\n");
+		return -EINVAL;
+	}
+
+	/* Check for DEL commands */
+	if (exeq->get(exeq, elem)) {
+		DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
+		return -EEXIST;
+	}
+
+	/* Return the credit to the credit pool if not requested not to */
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    o->put_credit(o))) {
+		BNX2X_ERR("Failed to return a credit\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
+ *
+ * @bp:		device handle
+ * @qo:		quable object to check (source)
+ * @elem:	element that needs to be moved
+ *
+ * Checks that the requested configuration can be moved. If yes and if
+ * requested, returns a CAM credit.
+ *
+ * The 'validate' is run after the 'optimize'.
+ */
+static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
+					       union bnx2x_qable_obj *qo,
+					       struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
+	struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
+	struct bnx2x_exeq_elem query_elem;
+	struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
+	struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
+
+	/*
+	 * Check if we can perform this operation based on the current registry
+	 * state.
+	 */
+	if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
+		DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
+				 "current registry state\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * Check if there is an already pending DEL or MOVE command for the
+	 * source object or ADD command for a destination object. Return an
+	 * error if so.
+	 */
+	memcpy(&query_elem, elem, sizeof(query_elem));
+
+	/* Check DEL on source */
+	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
+	if (src_exeq->get(src_exeq, &query_elem)) {
+		BNX2X_ERR("There is a pending DEL command on the source "
+			  "queue already\n");
+		return -EINVAL;
+	}
+
+	/* Check MOVE on source */
+	if (src_exeq->get(src_exeq, elem)) {
+		DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
+		return -EEXIST;
+	}
+
+	/* Check ADD on destination */
+	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
+	if (dest_exeq->get(dest_exeq, &query_elem)) {
+		BNX2X_ERR("There is a pending ADD command on the "
+			  "destination queue already\n");
+		return -EINVAL;
+	}
+
+	/* Consume the credit if not requested not to */
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    dest_o->get_credit(dest_o)))
+		return -EINVAL;
+
+	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
+	    src_o->put_credit(src_o))) {
+		/* return the credit taken from dest... */
+		dest_o->put_credit(dest_o);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
+				   union bnx2x_qable_obj *qo,
+				   struct bnx2x_exeq_elem *elem)
+{
+	switch (elem->cmd_data.vlan_mac.cmd) {
+	case BNX2X_VLAN_MAC_ADD:
+		return bnx2x_validate_vlan_mac_add(bp, qo, elem);
+	case BNX2X_VLAN_MAC_DEL:
+		return bnx2x_validate_vlan_mac_del(bp, qo, elem);
+	case BNX2X_VLAN_MAC_MOVE:
+		return bnx2x_validate_vlan_mac_move(bp, qo, elem);
+	default:
+		return -EINVAL;
+	}
+}
+
+/**
+ * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ *
+ */
+static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
+			       struct bnx2x_vlan_mac_obj *o)
+{
+	int cnt = 5000, rc;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	struct bnx2x_raw_obj *raw = &o->raw;
+
+	while (cnt--) {
+		/* Wait for the current command to complete */
+		rc = raw->wait_comp(bp, raw);
+		if (rc)
+			return rc;
+
+		/* Wait until there are no pending commands */
+		if (!bnx2x_exe_queue_empty(exeq))
+			usleep_range(1000, 1000);
+		else
+			return 0;
+	}
+
+	return -EBUSY;
+}
+
+/**
+ * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_vlan_mac_obj
+ * @cqe:
+ * @cont:	if true schedule next execution chunk
+ *
+ */
+static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
+				   struct bnx2x_vlan_mac_obj *o,
+				   union event_ring_elem *cqe,
+				   unsigned long *ramrod_flags)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	int rc;
+
+	/* Reset pending list */
+	bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
+
+	/* Clear pending */
+	r->clear_pending(r);
+
+	/* If ramrod failed this is most likely a SW bug */
+	if (cqe->message.error)
+		return -EINVAL;
+
+	/* Run the next bulk of pending commands if requeted */
+	if (test_bit(RAMROD_CONT, ramrod_flags)) {
+		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+		if (rc < 0)
+			return rc;
+	}
+
+	/* If there is more work to do return PENDING */
+	if (!bnx2x_exe_queue_empty(&o->exe_queue))
+		return 1;
+
+	return 0;
+}
+
+/**
+ * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
+ *
+ * @bp:		device handle
+ * @o:		bnx2x_qable_obj
+ * @elem:	bnx2x_exeq_elem
+ */
+static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
+				   union bnx2x_qable_obj *qo,
+				   struct bnx2x_exeq_elem *elem)
+{
+	struct bnx2x_exeq_elem query, *pos;
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+
+	memcpy(&query, elem, sizeof(query));
+
+	switch (elem->cmd_data.vlan_mac.cmd) {
+	case BNX2X_VLAN_MAC_ADD:
+		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
+		break;
+	case BNX2X_VLAN_MAC_DEL:
+		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
+		break;
+	default:
+		/* Don't handle anything other than ADD or DEL */
+		return 0;
+	}
+
+	/* If we found the appropriate element - delete it */
+	pos = exeq->get(exeq, &query);
+	if (pos) {
+
+		/* Return the credit of the optimized command */
+		if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+			      &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
+			if ((query.cmd_data.vlan_mac.cmd ==
+			     BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
+				BNX2X_ERR("Failed to return the credit for the "
+					  "optimized ADD command\n");
+				return -EINVAL;
+			} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
+				BNX2X_ERR("Failed to recover the credit from "
+					  "the optimized DEL command\n");
+				return -EINVAL;
+			}
+		}
+
+		DP(BNX2X_MSG_SP, "Optimizing %s command\n",
+			   (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
+			   "ADD" : "DEL");
+
+		list_del(&pos->link);
+		bnx2x_exe_queue_free_elem(bp, pos);
+		return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
+ *
+ * @bp:	  device handle
+ * @o:
+ * @elem:
+ * @restore:
+ * @re:
+ *
+ * prepare a registry element according to the current command request.
+ */
+static inline int bnx2x_vlan_mac_get_registry_elem(
+	struct bnx2x *bp,
+	struct bnx2x_vlan_mac_obj *o,
+	struct bnx2x_exeq_elem *elem,
+	bool restore,
+	struct bnx2x_vlan_mac_registry_elem **re)
+{
+	int cmd = elem->cmd_data.vlan_mac.cmd;
+	struct bnx2x_vlan_mac_registry_elem *reg_elem;
+
+	/* Allocate a new registry element if needed. */
+	if (!restore &&
+	    ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
+		reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
+		if (!reg_elem)
+			return -ENOMEM;
+
+		/* Get a new CAM offset */
+		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
+			/*
+			 * This shell never happen, because we have checked the
+			 * CAM availiability in the 'validate'.
+			 */
+			WARN_ON(1);
+			kfree(reg_elem);
+			return -EINVAL;
+		}
+
+		DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
+
+		/* Set a VLAN-MAC data */
+		memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
+			  sizeof(reg_elem->u));
+
+		/* Copy the flags (needed for DEL and RESTORE flows) */
+		reg_elem->vlan_mac_flags =
+			elem->cmd_data.vlan_mac.vlan_mac_flags;
+	} else /* DEL, RESTORE */
+		reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+
+	*re = reg_elem;
+	return 0;
+}
+
+/**
+ * bnx2x_execute_vlan_mac - execute vlan mac command
+ *
+ * @bp:			device handle
+ * @qo:
+ * @exe_chunk:
+ * @ramrod_flags:
+ *
+ * go and send a ramrod!
+ */
+static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
+				  union bnx2x_qable_obj *qo,
+				  struct list_head *exe_chunk,
+				  unsigned long *ramrod_flags)
+{
+	struct bnx2x_exeq_elem *elem;
+	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+	int rc, idx = 0;
+	bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
+	bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
+	struct bnx2x_vlan_mac_registry_elem *reg_elem;
+	int cmd;
+
+	/*
+	 * If DRIVER_ONLY execution is requested, cleanup a registry
+	 * and exit. Otherwise send a ramrod to FW.
+	 */
+	if (!drv_only) {
+		WARN_ON(r->check_pending(r));
+
+		/* Set pending */
+		r->set_pending(r);
+
+		/* Fill tha ramrod data */
+		list_for_each_entry(elem, exe_chunk, link) {
+			cmd = elem->cmd_data.vlan_mac.cmd;
+			/*
+			 * We will add to the target object in MOVE command, so
+			 * change the object for a CAM search.
+			 */
+			if (cmd == BNX2X_VLAN_MAC_MOVE)
+				cam_obj = elem->cmd_data.vlan_mac.target_obj;
+			else
+				cam_obj = o;
+
+			rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
+							      elem, restore,
+							      &reg_elem);
+			if (rc)
+				goto error_exit;
+
+			WARN_ON(!reg_elem);
+
+			/* Push a new entry into the registry */
+			if (!restore &&
+			    ((cmd == BNX2X_VLAN_MAC_ADD) ||
+			    (cmd == BNX2X_VLAN_MAC_MOVE)))
+				list_add(&reg_elem->link, &cam_obj->head);
+
+			/* Configure a single command in a ramrod data buffer */
+			o->set_one_rule(bp, o, elem, idx,
+					reg_elem->cam_offset);
+
+			/* MOVE command consumes 2 entries in the ramrod data */
+			if (cmd == BNX2X_VLAN_MAC_MOVE)
+				idx += 2;
+			else
+				idx++;
+		}
+
+		/*
+		 *  No need for an explicit memory barrier here as long we would
+		 *  need to ensure the ordering of writing to the SPQ element
+		 *  and updating of the SPQ producer which involves a memory
+		 *  read and we will have to put a full memory barrier there
+		 *  (inside bnx2x_sp_post()).
+		 */
+
+		rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
+				   U64_HI(r->rdata_mapping),
+				   U64_LO(r->rdata_mapping),
+				   ETH_CONNECTION_TYPE);
+		if (rc)
+			goto error_exit;
+	}
+
+	/* Now, when we are done with the ramrod - clean up the registry */
+	list_for_each_entry(elem, exe_chunk, link) {
+		cmd = elem->cmd_data.vlan_mac.cmd;
+		if ((cmd == BNX2X_VLAN_MAC_DEL) ||
+		    (cmd == BNX2X_VLAN_MAC_MOVE)) {
+			reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+
+			WARN_ON(!reg_elem);
+
+			o->put_cam_offset(o, reg_elem->cam_offset);
+			list_del(&reg_elem->link);
+			kfree(reg_elem);
+		}
+	}
+
+	if (!drv_only)
+		return 1;
+	else
+		return 0;
+
+error_exit:
+	r->clear_pending(r);
+
+	/* Cleanup a registry in case of a failure */
+	list_for_each_entry(elem, exe_chunk, link) {
+		cmd = elem->cmd_data.vlan_mac.cmd;
+
+		if (cmd == BNX2X_VLAN_MAC_MOVE)
+			cam_obj = elem->cmd_data.vlan_mac.target_obj;
+		else
+			cam_obj = o;
+
+		/* Delete all newly added above entries */
+		if (!restore &&
+		    ((cmd == BNX2X_VLAN_MAC_ADD) ||
+		    (cmd == BNX2X_VLAN_MAC_MOVE))) {
+			reg_elem = o->check_del(cam_obj,
+						&elem->cmd_data.vlan_mac.u);
+			if (reg_elem) {
+				list_del(&reg_elem->link);
+				kfree(reg_elem);
+			}
+		}
+	}
+
+	return rc;
+}
+
+static inline int bnx2x_vlan_mac_push_new_cmd(
+	struct bnx2x *bp,
+	struct bnx2x_vlan_mac_ramrod_params *p)
+{
+	struct bnx2x_exeq_elem *elem;
+	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+	bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
+
+	/* Allocate the execution queue element */
+	elem = bnx2x_exe_queue_alloc_elem(bp);
+	if (!elem)
+		return -ENOMEM;
+
+	/* Set the command 'length' */
+	switch (p->user_req.cmd) {
+	case BNX2X_VLAN_MAC_MOVE:
+		elem->cmd_len = 2;
+		break;
+	default:
+		elem->cmd_len = 1;
+	}
+
+	/* Fill the object specific info */
+	memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
+
+	/* Try to add a new command to the pending list */
+	return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
+}
+
+/**
+ * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
+ *
+ * @bp:	  device handle
+ * @p:
+ *
+ */
+int bnx2x_config_vlan_mac(
+	struct bnx2x *bp,
+	struct bnx2x_vlan_mac_ramrod_params *p)
+{
+	int rc = 0;
+	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
+	unsigned long *ramrod_flags = &p->ramrod_flags;
+	bool cont = test_bit(RAMROD_CONT, ramrod_flags);
+	struct bnx2x_raw_obj *raw = &o->raw;
+
+	/*
+	 * Add new elements to the execution list for commands that require it.
+	 */
+	if (!cont) {
+		rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
+		if (rc)
+			return rc;
+	}
+
+	/*
+	 * If nothing will be executed further in this iteration we want to
+	 * return PENDING if there are pending commands
+	 */
+	if (!bnx2x_exe_queue_empty(&o->exe_queue))
+		rc = 1;
+
+	if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
+		DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
+				 "clearing a pending bit.\n");
+		raw->clear_pending(raw);
+	}
+
+	/* Execute commands if required */
+	if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
+	    test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
+		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
+		if (rc < 0)
+			return rc;
+	}
+
+	/*
+	 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
+	 * then user want to wait until the last command is done.
+	 */
+	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+		/*
+		 * Wait maximum for the current exe_queue length iterations plus
+		 * one (for the current pending command).
+		 */
+		int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
+
+		while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
+		       max_iterations--) {
+
+			/* Wait for the current command to complete */
+			rc = raw->wait_comp(bp, raw);
+			if (rc)
+				return rc;
+
+			/* Make a next step */
+			rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
+						  ramrod_flags);
+			if (rc < 0)
+				return rc;
+		}
+
+		return 0;
+	}
+
+	return rc;
+}
+
+
+
+/**
+ * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
+ *
+ * @bp:			device handle
+ * @o:
+ * @vlan_mac_flags:
+ * @ramrod_flags:	execution flags to be used for this deletion
+ *
+ * if the last operation has completed successfully and there are no
+ * moreelements left, positive value if the last operation has completed
+ * successfully and there are more previously configured elements, negative
+ * value is current operation has failed.
+ */
+static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
+				  struct bnx2x_vlan_mac_obj *o,
+				  unsigned long *vlan_mac_flags,
+				  unsigned long *ramrod_flags)
+{
+	struct bnx2x_vlan_mac_registry_elem *pos = NULL;
+	int rc = 0;
+	struct bnx2x_vlan_mac_ramrod_params p;
+	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
+	struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+
+	/* Clear pending commands first */
+
+	spin_lock_bh(&exeq->lock);
+
+	list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
+		if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
+		    *vlan_mac_flags)
+			list_del(&exeq_pos->link);
+	}
+
+	spin_unlock_bh(&exeq->lock);
+
+	/* Prepare a command request */
+	memset(&p, 0, sizeof(p));
+	p.vlan_mac_obj = o;
+	p.ramrod_flags = *ramrod_flags;
+	p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
+
+	/*
+	 * Add all but the last VLAN-MAC to the execution queue without actually
+	 * execution anything.
+	 */
+	__clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
+	__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
+	__clear_bit(RAMROD_CONT, &p.ramrod_flags);
+
+	list_for_each_entry(pos, &o->head, link) {
+		if (pos->vlan_mac_flags == *vlan_mac_flags) {
+			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
+			memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
+			rc = bnx2x_config_vlan_mac(bp, &p);
+			if (rc < 0) {
+				BNX2X_ERR("Failed to add a new DEL command\n");
+				return rc;
+			}
+		}
+	}
+
+	p.ramrod_flags = *ramrod_flags;
+	__set_bit(RAMROD_CONT, &p.ramrod_flags);
+
+	return bnx2x_config_vlan_mac(bp, &p);
+}
+
+static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
+	u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
+	unsigned long *pstate, bnx2x_obj_type type)
+{
+	raw->func_id = func_id;
+	raw->cid = cid;
+	raw->cl_id = cl_id;
+	raw->rdata = rdata;
+	raw->rdata_mapping = rdata_mapping;
+	raw->state = state;
+	raw->pstate = pstate;
+	raw->obj_type = type;
+	raw->check_pending = bnx2x_raw_check_pending;
+	raw->clear_pending = bnx2x_raw_clear_pending;
+	raw->set_pending = bnx2x_raw_set_pending;
+	raw->wait_comp = bnx2x_raw_wait;
+}
+
+static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
+	u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
+	int state, unsigned long *pstate, bnx2x_obj_type type,
+	struct bnx2x_credit_pool_obj *macs_pool,
+	struct bnx2x_credit_pool_obj *vlans_pool)
+{
+	INIT_LIST_HEAD(&o->head);
+
+	o->macs_pool = macs_pool;
+	o->vlans_pool = vlans_pool;
+
+	o->delete_all = bnx2x_vlan_mac_del_all;
+	o->restore = bnx2x_vlan_mac_restore;
+	o->complete = bnx2x_complete_vlan_mac;
+	o->wait = bnx2x_wait_vlan_mac;
+
+	bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
+			   state, pstate, type);
+}
+
+
+void bnx2x_init_mac_obj(struct bnx2x *bp,
+			struct bnx2x_vlan_mac_obj *mac_obj,
+			u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			dma_addr_t rdata_mapping, int state,
+			unsigned long *pstate, bnx2x_obj_type type,
+			struct bnx2x_credit_pool_obj *macs_pool)
+{
+	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
+
+	bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
+				   rdata_mapping, state, pstate, type,
+				   macs_pool, NULL);
+
+	/* CAM credit pool handling */
+	mac_obj->get_credit = bnx2x_get_credit_mac;
+	mac_obj->put_credit = bnx2x_put_credit_mac;
+	mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+	mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+	if (CHIP_IS_E1x(bp)) {
+		mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
+		mac_obj->check_del         = bnx2x_check_mac_del;
+		mac_obj->check_add         = bnx2x_check_mac_add;
+		mac_obj->check_move        = bnx2x_check_move_always_err;
+		mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &mac_obj->exe_queue, 1, qable_obj,
+				     bnx2x_validate_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_mac);
+	} else {
+		mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
+		mac_obj->check_del         = bnx2x_check_mac_del;
+		mac_obj->check_add         = bnx2x_check_mac_add;
+		mac_obj->check_move        = bnx2x_check_move;
+		mac_obj->ramrod_cmd        =
+			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
+				     qable_obj, bnx2x_validate_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_mac);
+	}
+}
+
+void bnx2x_init_vlan_obj(struct bnx2x *bp,
+			 struct bnx2x_vlan_mac_obj *vlan_obj,
+			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			 dma_addr_t rdata_mapping, int state,
+			 unsigned long *pstate, bnx2x_obj_type type,
+			 struct bnx2x_credit_pool_obj *vlans_pool)
+{
+	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
+
+	bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
+				   rdata_mapping, state, pstate, type, NULL,
+				   vlans_pool);
+
+	vlan_obj->get_credit = bnx2x_get_credit_vlan;
+	vlan_obj->put_credit = bnx2x_put_credit_vlan;
+	vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
+	vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
+
+	if (CHIP_IS_E1x(bp)) {
+		BNX2X_ERR("Do not support chips others than E2 and newer\n");
+		BUG();
+	} else {
+		vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
+		vlan_obj->check_del         = bnx2x_check_vlan_del;
+		vlan_obj->check_add         = bnx2x_check_vlan_add;
+		vlan_obj->check_move        = bnx2x_check_move;
+		vlan_obj->ramrod_cmd        =
+			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
+				     qable_obj, bnx2x_validate_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan);
+	}
+}
+
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			     dma_addr_t rdata_mapping, int state,
+			     unsigned long *pstate, bnx2x_obj_type type,
+			     struct bnx2x_credit_pool_obj *macs_pool,
+			     struct bnx2x_credit_pool_obj *vlans_pool)
+{
+	union bnx2x_qable_obj *qable_obj =
+		(union bnx2x_qable_obj *)vlan_mac_obj;
+
+	bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
+				   rdata_mapping, state, pstate, type,
+				   macs_pool, vlans_pool);
+
+	/* CAM pool handling */
+	vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
+	vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
+	/*
+	 * CAM offset is relevant for 57710 and 57711 chips only which have a
+	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
+	 * will be taken from MACs' pool object only.
+	 */
+	vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
+	vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
+
+	if (CHIP_IS_E1(bp)) {
+		BNX2X_ERR("Do not support chips others than E2\n");
+		BUG();
+	} else if (CHIP_IS_E1H(bp)) {
+		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
+		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+		vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
+		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_mac_obj->exe_queue, 1, qable_obj,
+				     bnx2x_validate_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan_mac);
+	} else {
+		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
+		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
+		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
+		vlan_mac_obj->check_move        = bnx2x_check_move;
+		vlan_mac_obj->ramrod_cmd        =
+			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+
+		/* Exe Queue */
+		bnx2x_exe_queue_init(bp,
+				     &vlan_mac_obj->exe_queue,
+				     CLASSIFY_RULES_COUNT,
+				     qable_obj, bnx2x_validate_vlan_mac,
+				     bnx2x_optimize_vlan_mac,
+				     bnx2x_execute_vlan_mac,
+				     bnx2x_exeq_get_vlan_mac);
+	}
+
+}
+
+/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+static inline void __storm_memset_mac_filters(struct bnx2x *bp,
+			struct tstorm_eth_mac_filter_config *mac_filters,
+			u16 pf_id)
+{
+	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
+
+	u32 addr = BAR_TSTRORM_INTMEM +
+			TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
+
+	__storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
+}
+
+static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
+				 struct bnx2x_rx_mode_ramrod_params *p)
+{
+	/* update the bp MAC filter structure  */
+	u32 mask = (1 << p->cl_id);
+
+	struct tstorm_eth_mac_filter_config *mac_filters =
+		(struct tstorm_eth_mac_filter_config *)p->rdata;
+
+	/* initial seeting is drop-all */
+	u8 drop_all_ucast = 1, drop_all_mcast = 1;
+	u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
+	u8 unmatched_unicast = 0;
+
+    /* In e1x there we only take into account rx acceot flag since tx switching
+     * isn't enabled. */
+	if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
+		/* accept matched ucast */
+		drop_all_ucast = 0;
+
+	if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
+		/* accept matched mcast */
+		drop_all_mcast = 0;
+
+	if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
+		/* accept all mcast */
+		drop_all_ucast = 0;
+		accp_all_ucast = 1;
+	}
+	if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
+		/* accept all mcast */
+		drop_all_mcast = 0;
+		accp_all_mcast = 1;
+	}
+	if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
+		/* accept (all) bcast */
+		accp_all_bcast = 1;
+	if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
+		/* accept unmatched unicasts */
+		unmatched_unicast = 1;
+
+	mac_filters->ucast_drop_all = drop_all_ucast ?
+		mac_filters->ucast_drop_all | mask :
+		mac_filters->ucast_drop_all & ~mask;
+
+	mac_filters->mcast_drop_all = drop_all_mcast ?
+		mac_filters->mcast_drop_all | mask :
+		mac_filters->mcast_drop_all & ~mask;
+
+	mac_filters->ucast_accept_all = accp_all_ucast ?
+		mac_filters->ucast_accept_all | mask :
+		mac_filters->ucast_accept_all & ~mask;
+
+	mac_filters->mcast_accept_all = accp_all_mcast ?
+		mac_filters->mcast_accept_all | mask :
+		mac_filters->mcast_accept_all & ~mask;
+
+	mac_filters->bcast_accept_all = accp_all_bcast ?
+		mac_filters->bcast_accept_all | mask :
+		mac_filters->bcast_accept_all & ~mask;
+
+	mac_filters->unmatched_unicast = unmatched_unicast ?
+		mac_filters->unmatched_unicast | mask :
+		mac_filters->unmatched_unicast & ~mask;
+
+	DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
+			 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
+			 mac_filters->ucast_drop_all,
+			 mac_filters->mcast_drop_all,
+			 mac_filters->ucast_accept_all,
+			 mac_filters->mcast_accept_all,
+			 mac_filters->bcast_accept_all);
+
+	/* write the MAC filter structure*/
+	__storm_memset_mac_filters(bp, mac_filters, p->func_id);
+
+	/* The operation is completed */
+	clear_bit(p->state, p->pstate);
+	smp_mb__after_clear_bit();
+
+	return 0;
+}
+
+/* Setup ramrod data */
+static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
+				struct eth_classify_header *hdr,
+				u8 rule_cnt)
+{
+	hdr->echo = cid;
+	hdr->rule_cnt = rule_cnt;
+}
+
+static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
+				unsigned long accept_flags,
+				struct eth_filter_rules_cmd *cmd,
+				bool clear_accept_all)
+{
+	u16 state;
+
+	/* start with 'drop-all' */
+	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
+		ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+	if (accept_flags) {
+		if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
+			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+
+		if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
+			state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+
+		if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
+			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+			state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+		}
+
+		if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
+			state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+			state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
+		}
+		if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
+			state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+
+		if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
+			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
+			state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+		}
+		if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
+			state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
+	}
+
+	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
+	if (clear_accept_all) {
+		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
+		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
+		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
+		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
+	}
+
+	cmd->state = cpu_to_le16(state);
+
+}
+
+static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
+				struct bnx2x_rx_mode_ramrod_params *p)
+{
+	struct eth_filter_rules_ramrod_data *data = p->rdata;
+	int rc;
+	u8 rule_idx = 0;
+
+	/* Reset the ramrod data buffer */
+	memset(data, 0, sizeof(*data));
+
+	/* Setup ramrod data */
+
+	/* Tx (internal switching) */
+	if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
+		data->rules[rule_idx].client_id = p->cl_id;
+		data->rules[rule_idx].func_id = p->func_id;
+
+		data->rules[rule_idx].cmd_general_data =
+			ETH_FILTER_RULES_CMD_TX_CMD;
+
+		bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
+			&(data->rules[rule_idx++]), false);
+	}
+
+	/* Rx */
+	if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
+		data->rules[rule_idx].client_id = p->cl_id;
+		data->rules[rule_idx].func_id = p->func_id;
+
+		data->rules[rule_idx].cmd_general_data =
+			ETH_FILTER_RULES_CMD_RX_CMD;
+
+		bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
+			&(data->rules[rule_idx++]), false);
+	}
+
+
+	/*
+	 * If FCoE Queue configuration has been requested configure the Rx and
+	 * internal switching modes for this queue in separate rules.
+	 *
+	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
+	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
+	 */
+	if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
+		/*  Tx (internal switching) */
+		if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
+			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
+			data->rules[rule_idx].func_id = p->func_id;
+
+			data->rules[rule_idx].cmd_general_data =
+						ETH_FILTER_RULES_CMD_TX_CMD;
+
+			bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
+						     &(data->rules[rule_idx++]),
+						       true);
+		}
+
+		/* Rx */
+		if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
+			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
+			data->rules[rule_idx].func_id = p->func_id;
+
+			data->rules[rule_idx].cmd_general_data =
+						ETH_FILTER_RULES_CMD_RX_CMD;
+
+			bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
+						     &(data->rules[rule_idx++]),
+						       true);
+		}
+	}
+
+	/*
+	 * Set the ramrod header (most importantly - number of rules to
+	 * configure).
+	 */
+	bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
+
+	DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
+			 "tx_accept_flags 0x%lx\n",
+			 data->header.rule_cnt, p->rx_accept_flags,
+			 p->tx_accept_flags);
+
+	/*
+	 *  No need for an explicit memory barrier here as long we would
+	 *  need to ensure the ordering of writing to the SPQ element
+	 *  and updating of the SPQ producer which involves a memory
+	 *  read and we will have to put a full memory barrier there
+	 *  (inside bnx2x_sp_post()).
+	 */
+
+	/* Send a ramrod */
+	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
+			   U64_HI(p->rdata_mapping),
+			   U64_LO(p->rdata_mapping),
+			   ETH_CONNECTION_TYPE);
+	if (rc)
+		return rc;
+
+	/* Ramrod completion is pending */
+	return 1;
+}
+
+static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
+				      struct bnx2x_rx_mode_ramrod_params *p)
+{
+	return bnx2x_state_wait(bp, p->state, p->pstate);
+}
+
+static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
+				    struct bnx2x_rx_mode_ramrod_params *p)
+{
+	/* Do nothing */
+	return 0;
+}
+
+int bnx2x_config_rx_mode(struct bnx2x *bp,
+			 struct bnx2x_rx_mode_ramrod_params *p)
+{
+	int rc;
+
+	/* Configure the new classification in the chip */
+	rc = p->rx_mode_obj->config_rx_mode(bp, p);
+	if (rc < 0)
+		return rc;
+
+	/* Wait for a ramrod completion if was requested */
+	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
+		rc = p->rx_mode_obj->wait_comp(bp, p);
+		if (rc)
+			return rc;
+	}
+
+	return rc;
+}
+
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+			    struct bnx2x_rx_mode_obj *o)
+{
+	if (CHIP_IS_E1x(bp)) {
+		o->wait_comp      = bnx2x_empty_rx_mode_wait;
+		o->config_rx_mode = bnx2x_set_rx_mode_e1x;
+	} else {
+		o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
+		o->config_rx_mode = bnx2x_set_rx_mode_e2;
+	}
+}
+
+/********************* Multicast verbs: SET, CLEAR ****************************/
+static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
+{
+	return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
+}
+
+struct bnx2x_mcast_mac_elem {
+	struct list_head link;
+	u8 mac[ETH_ALEN];
+	u8 pad[2]; /* For a natural alignment of the following buffer */
+};
+
+struct bnx2x_pending_mcast_cmd {
+	struct list_head link;
+	int type; /* BNX2X_MCAST_CMD_X */
+	union {
+		struct list_head macs_head;
+		u32 macs_num; /* Needed for DEL command */
+		int next_bin; /* Needed for RESTORE flow with aprox match */
+	} data;
+
+	bool done; /* set to true, when the command has been handled,
+		    * practically used in 57712 handling only, where one pending
+		    * command may be handled in a few operations. As long as for
+		    * other chips every operation handling is completed in a
+		    * single ramrod, there is no need to utilize this field.
+		    */
+};
+
+static int bnx2x_mcast_wait(struct bnx2x *bp,
+			    struct bnx2x_mcast_obj *o)
+{
+	if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
+			o->raw.wait_comp(bp, &o->raw))
+		return -EBUSY;
+
+	return 0;
+}
+
+static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
+				   struct bnx2x_mcast_obj *o,
+				   struct bnx2x_mcast_ramrod_params *p,
+				   int cmd)
+{
+	int total_sz;
+	struct bnx2x_pending_mcast_cmd *new_cmd;
+	struct bnx2x_mcast_mac_elem *cur_mac = NULL;
+	struct bnx2x_mcast_list_elem *pos;
+	int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
+			     p->mcast_list_len : 0);
+
+	/* If the command is empty ("handle pending commands only"), break */
+	if (!p->mcast_list_len)
+		return 0;
+
+	total_sz = sizeof(*new_cmd) +
+		macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
+
+	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
+	new_cmd = kzalloc(total_sz, GFP_ATOMIC);
+
+	if (!new_cmd)
+		return -ENOMEM;
+
+	DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
+			 "macs_list_len=%d\n", cmd, macs_list_len);
+
+	INIT_LIST_HEAD(&new_cmd->data.macs_head);
+
+	new_cmd->type = cmd;
+	new_cmd->done = false;
+
+	switch (cmd) {
+	case BNX2X_MCAST_CMD_ADD:
+		cur_mac = (struct bnx2x_mcast_mac_elem *)
+			  ((u8 *)new_cmd + sizeof(*new_cmd));
+
+		/* Push the MACs of the current command into the pendig command
+		 * MACs list: FIFO
+		 */
+		list_for_each_entry(pos, &p->mcast_list, link) {
+			memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
+			list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
+			cur_mac++;
+		}
+
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		new_cmd->data.macs_num = p->mcast_list_len;
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		new_cmd->data.next_bin = 0;
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+	}
+
+	/* Push the new pending command to the tail of the pending list: FIFO */
+	list_add_tail(&new_cmd->link, &o->pending_cmds_head);
+
+	o->set_sched(o);
+
+	return 1;
+}
+
+/**
+ * bnx2x_mcast_get_next_bin - get the next set bin (index)
+ *
+ * @o:
+ * @last:	index to start looking from (including)
+ *
+ * returns the next found (set) bin or a negative value if none is found.
+ */
+static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
+{
+	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
+
+	for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
+		if (o->registry.aprox_match.vec[i])
+			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
+				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
+				if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
+						       vec, cur_bit)) {
+					return cur_bit;
+				}
+			}
+		inner_start = 0;
+	}
+
+	/* None found */
+	return -1;
+}
+
+/**
+ * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
+ *
+ * @o:
+ *
+ * returns the index of the found bin or -1 if none is found
+ */
+static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
+{
+	int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
+
+	if (cur_bit >= 0)
+		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
+
+	return cur_bit;
+}
+
+static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	u8 rx_tx_flag = 0;
+
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
+
+	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
+	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
+		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
+
+	return rx_tx_flag;
+}
+
+static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
+					struct bnx2x_mcast_obj *o, int idx,
+					union bnx2x_mcast_config_data *cfg_data,
+					int cmd)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	struct eth_multicast_rules_ramrod_data *data =
+		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
+	u8 func_id = r->func_id;
+	u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
+	int bin;
+
+	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
+		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
+
+	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
+
+	/* Get a bin and update a bins' vector */
+	switch (cmd) {
+	case BNX2X_MCAST_CMD_ADD:
+		bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
+		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		/* If there were no more bins to clear
+		 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
+		 * clear any (0xff) bin.
+		 * See bnx2x_mcast_validate_e2() for explanation when it may
+		 * happen.
+		 */
+		bin = bnx2x_mcast_clear_first_bin(o);
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		bin = cfg_data->bin;
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return;
+	}
+
+	DP(BNX2X_MSG_SP, "%s bin %d\n",
+			 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
+			 "Setting"  : "Clearing"), bin);
+
+	data->rules[idx].bin_id    = (u8)bin;
+	data->rules[idx].func_id   = func_id;
+	data->rules[idx].engine_id = o->engine_id;
+}
+
+/**
+ * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
+ *
+ * @bp:		device handle
+ * @o:
+ * @start_bin:	index in the registry to start from (including)
+ * @rdata_idx:	index in the ramrod data to start from
+ *
+ * returns last handled bin index or -1 if all bins have been handled
+ */
+static inline int bnx2x_mcast_handle_restore_cmd_e2(
+	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
+	int *rdata_idx)
+{
+	int cur_bin, cnt = *rdata_idx;
+	union bnx2x_mcast_config_data cfg_data = {0};
+
+	/* go through the registry and configure the bins from it */
+	for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
+	    cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
+
+		cfg_data.bin = (u8)cur_bin;
+		o->set_one_rule(bp, o, cnt, &cfg_data,
+				BNX2X_MCAST_CMD_RESTORE);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
+
+		/* Break if we reached the maximum number
+		 * of rules.
+		 */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	*rdata_idx = cnt;
+
+	return cur_bin;
+}
+
+static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+	int *line_idx)
+{
+	struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
+	int cnt = *line_idx;
+	union bnx2x_mcast_config_data cfg_data = {0};
+
+	list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
+				 link) {
+
+		cfg_data.mac = &pmac_pos->mac[0];
+		o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+				 " mcast MAC\n",
+				 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
+
+		list_del(&pmac_pos->link);
+
+		/* Break if we reached the maximum number
+		 * of rules.
+		 */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	*line_idx = cnt;
+
+	/* if no more MACs to configure - we are done */
+	if (list_empty(&cmd_pos->data.macs_head))
+		cmd_pos->done = true;
+}
+
+static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+	int *line_idx)
+{
+	int cnt = *line_idx;
+
+	while (cmd_pos->data.macs_num) {
+		o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
+
+		cnt++;
+
+		cmd_pos->data.macs_num--;
+
+		  DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
+				   cmd_pos->data.macs_num, cnt);
+
+		/* Break if we reached the maximum
+		 * number of rules.
+		 */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	*line_idx = cnt;
+
+	/* If we cleared all bins - we are done */
+	if (!cmd_pos->data.macs_num)
+		cmd_pos->done = true;
+}
+
+static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
+	int *line_idx)
+{
+	cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
+						line_idx);
+
+	if (cmd_pos->data.next_bin < 0)
+		/* If o->set_restore returned -1 we are done */
+		cmd_pos->done = true;
+	else
+		/* Start from the next bin next time */
+		cmd_pos->data.next_bin++;
+}
+
+static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
+				struct bnx2x_mcast_ramrod_params *p)
+{
+	struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
+	int cnt = 0;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+	list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
+				 link) {
+		switch (cmd_pos->type) {
+		case BNX2X_MCAST_CMD_ADD:
+			bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
+			break;
+
+		case BNX2X_MCAST_CMD_DEL:
+			bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
+			break;
+
+		case BNX2X_MCAST_CMD_RESTORE:
+			bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
+							   &cnt);
+			break;
+
+		default:
+			BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
+			return -EINVAL;
+		}
+
+		/* If the command has been completed - remove it from the list
+		 * and free the memory
+		 */
+		if (cmd_pos->done) {
+			list_del(&cmd_pos->link);
+			kfree(cmd_pos);
+		}
+
+		/* Break if we reached the maximum number of rules */
+		if (cnt >= o->max_cmd_len)
+			break;
+	}
+
+	return cnt;
+}
+
+static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+	int *line_idx)
+{
+	struct bnx2x_mcast_list_elem *mlist_pos;
+	union bnx2x_mcast_config_data cfg_data = {0};
+	int cnt = *line_idx;
+
+	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
+		cfg_data.mac = mlist_pos->mac;
+		o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+				 " mcast MAC\n",
+				 BNX2X_MAC_PRN_LIST(mlist_pos->mac));
+	}
+
+	*line_idx = cnt;
+}
+
+static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+	int *line_idx)
+{
+	int cnt = *line_idx, i;
+
+	for (i = 0; i < p->mcast_list_len; i++) {
+		o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
+
+		cnt++;
+
+		DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
+				 p->mcast_list_len - i - 1);
+	}
+
+	*line_idx = cnt;
+}
+
+/**
+ * bnx2x_mcast_handle_current_cmd -
+ *
+ * @bp:		device handle
+ * @p:
+ * @cmd:
+ * @start_cnt:	first line in the ramrod data that may be used
+ *
+ * This function is called iff there is enough place for the current command in
+ * the ramrod data.
+ * Returns number of lines filled in the ramrod data in total.
+ */
+static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
+			struct bnx2x_mcast_ramrod_params *p, int cmd,
+			int start_cnt)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	int cnt = start_cnt;
+
+	DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
+
+	switch (cmd) {
+	case BNX2X_MCAST_CMD_ADD:
+		bnx2x_mcast_hdl_add(bp, o, p, &cnt);
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		bnx2x_mcast_hdl_del(bp, o, p, &cnt);
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		o->hdl_restore(bp, o, 0, &cnt);
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+	}
+
+	/* The current command has been handled */
+	p->mcast_list_len = 0;
+
+	return cnt;
+}
+
+static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
+				   struct bnx2x_mcast_ramrod_params *p,
+				   int cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	int reg_sz = o->get_registry_size(o);
+
+	switch (cmd) {
+	/* DEL command deletes all currently configured MACs */
+	case BNX2X_MCAST_CMD_DEL:
+		o->set_registry_size(o, 0);
+		/* Don't break */
+
+	/* RESTORE command will restore the entire multicast configuration */
+	case BNX2X_MCAST_CMD_RESTORE:
+		/* Here we set the approximate amount of work to do, which in
+		 * fact may be only less as some MACs in postponed ADD
+		 * command(s) scheduled before this command may fall into
+		 * the same bin and the actual number of bins set in the
+		 * registry would be less than we estimated here. See
+		 * bnx2x_mcast_set_one_rule_e2() for further details.
+		 */
+		p->mcast_list_len = reg_sz;
+		break;
+
+	case BNX2X_MCAST_CMD_ADD:
+	case BNX2X_MCAST_CMD_CONT:
+		/* Here we assume that all new MACs will fall into new bins.
+		 * However we will correct the real registry size after we
+		 * handle all pending commands.
+		 */
+		o->set_registry_size(o, reg_sz + p->mcast_list_len);
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+
+	}
+
+	/* Increase the total number of MACs pending to be configured */
+	o->total_pending_num += p->mcast_list_len;
+
+	return 0;
+}
+
+static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
+				      struct bnx2x_mcast_ramrod_params *p,
+				      int old_num_bins)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+	o->set_registry_size(o, old_num_bins);
+	o->total_pending_num -= p->mcast_list_len;
+}
+
+/**
+ * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
+ *
+ * @bp:		device handle
+ * @p:
+ * @len:	number of rules to handle
+ */
+static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
+					struct bnx2x_mcast_ramrod_params *p,
+					u8 len)
+{
+	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
+	struct eth_multicast_rules_ramrod_data *data =
+		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
+
+	data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
+			  (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+	data->header.rule_cnt = len;
+}
+
+/**
+ * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
+ *
+ * @bp:		device handle
+ * @o:
+ *
+ * Recalculate the actual number of set bins in the registry using Brian
+ * Kernighan's algorithm: it's execution complexity is as a number of set bins.
+ *
+ * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
+ */
+static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
+						  struct bnx2x_mcast_obj *o)
+{
+	int i, cnt = 0;
+	u64 elem;
+
+	for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
+		elem = o->registry.aprox_match.vec[i];
+		for (; elem; cnt++)
+			elem &= elem - 1;
+	}
+
+	o->set_registry_size(o, cnt);
+
+	return 0;
+}
+
+static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
+				struct bnx2x_mcast_ramrod_params *p,
+				int cmd)
+{
+	struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct eth_multicast_rules_ramrod_data *data =
+		(struct eth_multicast_rules_ramrod_data *)(raw->rdata);
+	int cnt = 0, rc;
+
+	/* Reset the ramrod data buffer */
+	memset(data, 0, sizeof(*data));
+
+	cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
+
+	/* If there are no more pending commands - clear SCHEDULED state */
+	if (list_empty(&o->pending_cmds_head))
+		o->clear_sched(o);
+
+	/* The below may be true iff there was enough room in ramrod
+	 * data for all pending commands and for the current
+	 * command. Otherwise the current command would have been added
+	 * to the pending commands and p->mcast_list_len would have been
+	 * zeroed.
+	 */
+	if (p->mcast_list_len > 0)
+		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
+
+	/* We've pulled out some MACs - update the total number of
+	 * outstanding.
+	 */
+	o->total_pending_num -= cnt;
+
+	/* send a ramrod */
+	WARN_ON(o->total_pending_num < 0);
+	WARN_ON(cnt > o->max_cmd_len);
+
+	bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
+
+	/* Update a registry size if there are no more pending operations.
+	 *
+	 * We don't want to change the value of the registry size if there are
+	 * pending operations because we want it to always be equal to the
+	 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
+	 * set bins after the last requested operation in order to properly
+	 * evaluate the size of the next DEL/RESTORE operation.
+	 *
+	 * Note that we update the registry itself during command(s) handling
+	 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
+	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
+	 * with a limited amount of update commands (per MAC/bin) and we don't
+	 * know in this scope what the actual state of bins configuration is
+	 * going to be after this ramrod.
+	 */
+	if (!o->total_pending_num)
+		bnx2x_mcast_refresh_registry_e2(bp, o);
+
+	/*
+	 * If CLEAR_ONLY was requested - don't send a ramrod and clear
+	 * RAMROD_PENDING status immediately.
+	 */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+		raw->clear_pending(raw);
+		return 0;
+	} else {
+		/*
+		 *  No need for an explicit memory barrier here as long we would
+		 *  need to ensure the ordering of writing to the SPQ element
+		 *  and updating of the SPQ producer which involves a memory
+		 *  read and we will have to put a full memory barrier there
+		 *  (inside bnx2x_sp_post()).
+		 */
+
+		/* Send a ramrod */
+		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
+				   raw->cid, U64_HI(raw->rdata_mapping),
+				   U64_LO(raw->rdata_mapping),
+				   ETH_CONNECTION_TYPE);
+		if (rc)
+			return rc;
+
+		/* Ramrod completion is pending */
+		return 1;
+	}
+}
+
+static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
+				    struct bnx2x_mcast_ramrod_params *p,
+				    int cmd)
+{
+	/* Mark, that there is a work to do */
+	if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
+		p->mcast_list_len = 1;
+
+	return 0;
+}
+
+static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
+				       struct bnx2x_mcast_ramrod_params *p,
+				       int old_num_bins)
+{
+	/* Do nothing */
+}
+
+#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
+do { \
+	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
+} while (0)
+
+static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
+					   struct bnx2x_mcast_obj *o,
+					   struct bnx2x_mcast_ramrod_params *p,
+					   u32 *mc_filter)
+{
+	struct bnx2x_mcast_list_elem *mlist_pos;
+	int bit;
+
+	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
+		bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
+		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
+
+		DP(BNX2X_MSG_SP, "About to configure "
+				 BNX2X_MAC_FMT" mcast MAC, bin %d\n",
+				 BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit);
+
+		/* bookkeeping... */
+		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
+				  bit);
+	}
+}
+
+static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
+	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
+	u32 *mc_filter)
+{
+	int bit;
+
+	for (bit = bnx2x_mcast_get_next_bin(o, 0);
+	     bit >= 0;
+	     bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
+		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
+		DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
+	}
+}
+
+/* On 57711 we write the multicast MACs' aproximate match
+ * table by directly into the TSTORM's internal RAM. So we don't
+ * really need to handle any tricks to make it work.
+ */
+static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
+				 struct bnx2x_mcast_ramrod_params *p,
+				 int cmd)
+{
+	int i;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+
+	/* If CLEAR_ONLY has been requested - clear the registry
+	 * and clear a pending bit.
+	 */
+	if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+		u32 mc_filter[MC_HASH_SIZE] = {0};
+
+		/* Set the multicast filter bits before writing it into
+		 * the internal memory.
+		 */
+		switch (cmd) {
+		case BNX2X_MCAST_CMD_ADD:
+			bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
+			break;
+
+		case BNX2X_MCAST_CMD_DEL:
+			DP(BNX2X_MSG_SP, "Invalidating multicast "
+					 "MACs configuration\n");
+
+			/* clear the registry */
+			memset(o->registry.aprox_match.vec, 0,
+			       sizeof(o->registry.aprox_match.vec));
+			break;
+
+		case BNX2X_MCAST_CMD_RESTORE:
+			bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
+			break;
+
+		default:
+			BNX2X_ERR("Unknown command: %d\n", cmd);
+			return -EINVAL;
+		}
+
+		/* Set the mcast filter in the internal memory */
+		for (i = 0; i < MC_HASH_SIZE; i++)
+			REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
+	} else
+		/* clear the registry */
+		memset(o->registry.aprox_match.vec, 0,
+		       sizeof(o->registry.aprox_match.vec));
+
+	/* We are done */
+	r->clear_pending(r);
+
+	return 0;
+}
+
+static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
+				   struct bnx2x_mcast_ramrod_params *p,
+				   int cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	int reg_sz = o->get_registry_size(o);
+
+	switch (cmd) {
+	/* DEL command deletes all currently configured MACs */
+	case BNX2X_MCAST_CMD_DEL:
+		o->set_registry_size(o, 0);
+		/* Don't break */
+
+	/* RESTORE command will restore the entire multicast configuration */
+	case BNX2X_MCAST_CMD_RESTORE:
+		p->mcast_list_len = reg_sz;
+		  DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
+				   cmd, p->mcast_list_len);
+		break;
+
+	case BNX2X_MCAST_CMD_ADD:
+	case BNX2X_MCAST_CMD_CONT:
+		/* Multicast MACs on 57710 are configured as unicast MACs and
+		 * there is only a limited number of CAM entries for that
+		 * matter.
+		 */
+		if (p->mcast_list_len > o->max_cmd_len) {
+			BNX2X_ERR("Can't configure more than %d multicast MACs"
+				   "on 57710\n", o->max_cmd_len);
+			return -EINVAL;
+		}
+		/* Every configured MAC should be cleared if DEL command is
+		 * called. Only the last ADD command is relevant as long as
+		 * every ADD commands overrides the previous configuration.
+		 */
+		DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
+		if (p->mcast_list_len > 0)
+			o->set_registry_size(o, p->mcast_list_len);
+
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd);
+		return -EINVAL;
+
+	}
+
+	/* We want to ensure that commands are executed one by one for 57710.
+	 * Therefore each none-empty command will consume o->max_cmd_len.
+	 */
+	if (p->mcast_list_len)
+		o->total_pending_num += o->max_cmd_len;
+
+	return 0;
+}
+
+static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
+				      struct bnx2x_mcast_ramrod_params *p,
+				      int old_num_macs)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+
+	o->set_registry_size(o, old_num_macs);
+
+	/* If current command hasn't been handled yet and we are
+	 * here means that it's meant to be dropped and we have to
+	 * update the number of outstandling MACs accordingly.
+	 */
+	if (p->mcast_list_len)
+		o->total_pending_num -= o->max_cmd_len;
+}
+
+static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
+					struct bnx2x_mcast_obj *o, int idx,
+					union bnx2x_mcast_config_data *cfg_data,
+					int cmd)
+{
+	struct bnx2x_raw_obj *r = &o->raw;
+	struct mac_configuration_cmd *data =
+		(struct mac_configuration_cmd *)(r->rdata);
+
+	/* copy mac */
+	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
+		bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
+				      &data->config_table[idx].middle_mac_addr,
+				      &data->config_table[idx].lsb_mac_addr,
+				      cfg_data->mac);
+
+		data->config_table[idx].vlan_id = 0;
+		data->config_table[idx].pf_id = r->func_id;
+		data->config_table[idx].clients_bit_vector =
+			cpu_to_le32(1 << r->cl_id);
+
+		SET_FLAG(data->config_table[idx].flags,
+			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_SET);
+	}
+}
+
+/**
+ * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
+ *
+ * @bp:		device handle
+ * @p:
+ * @len:	number of rules to handle
+ */
+static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
+					struct bnx2x_mcast_ramrod_params *p,
+					u8 len)
+{
+	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
+	struct mac_configuration_cmd *data =
+		(struct mac_configuration_cmd *)(r->rdata);
+
+	u8 offset = (CHIP_REV_IS_SLOW(bp) ?
+		     BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
+		     BNX2X_MAX_MULTICAST*(1 + r->func_id));
+
+	data->hdr.offset = offset;
+	data->hdr.client_id = 0xff;
+	data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
+			  (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
+	data->hdr.length = len;
+}
+
+/**
+ * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
+ *
+ * @bp:		device handle
+ * @o:
+ * @start_idx:	index in the registry to start from
+ * @rdata_idx:	index in the ramrod data to start from
+ *
+ * restore command for 57710 is like all other commands - always a stand alone
+ * command - start_idx and rdata_idx will always be 0. This function will always
+ * succeed.
+ * returns -1 to comply with 57712 variant.
+ */
+static inline int bnx2x_mcast_handle_restore_cmd_e1(
+	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
+	int *rdata_idx)
+{
+	struct bnx2x_mcast_mac_elem *elem;
+	int i = 0;
+	union bnx2x_mcast_config_data cfg_data = {0};
+
+	/* go through the registry and configure the MACs from it. */
+	list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
+		cfg_data.mac = &elem->mac[0];
+		o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
+
+		i++;
+
+		  DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+				   " mcast MAC\n",
+				   BNX2X_MAC_PRN_LIST(cfg_data.mac));
+	}
+
+	*rdata_idx = i;
+
+	return -1;
+}
+
+
+static inline int bnx2x_mcast_handle_pending_cmds_e1(
+	struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
+{
+	struct bnx2x_pending_mcast_cmd *cmd_pos;
+	struct bnx2x_mcast_mac_elem *pmac_pos;
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	union bnx2x_mcast_config_data cfg_data = {0};
+	int cnt = 0;
+
+
+	/* If nothing to be done - return */
+	if (list_empty(&o->pending_cmds_head))
+		return 0;
+
+	/* Handle the first command */
+	cmd_pos = list_first_entry(&o->pending_cmds_head,
+				   struct bnx2x_pending_mcast_cmd, link);
+
+	switch (cmd_pos->type) {
+	case BNX2X_MCAST_CMD_ADD:
+		list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
+			cfg_data.mac = &pmac_pos->mac[0];
+			o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
+
+			cnt++;
+
+			DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
+					 " mcast MAC\n",
+					 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
+		}
+		break;
+
+	case BNX2X_MCAST_CMD_DEL:
+		cnt = cmd_pos->data.macs_num;
+		DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
+		break;
+
+	case BNX2X_MCAST_CMD_RESTORE:
+		o->hdl_restore(bp, o, 0, &cnt);
+		break;
+
+	default:
+		BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
+		return -EINVAL;
+	}
+
+	list_del(&cmd_pos->link);
+	kfree(cmd_pos);
+
+	return cnt;
+}
+
+/**
+ * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
+ *
+ * @fw_hi:
+ * @fw_mid:
+ * @fw_lo:
+ * @mac:
+ */
+static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
+					 __le16 *fw_lo, u8 *mac)
+{
+	mac[1] = ((u8 *)fw_hi)[0];
+	mac[0] = ((u8 *)fw_hi)[1];
+	mac[3] = ((u8 *)fw_mid)[0];
+	mac[2] = ((u8 *)fw_mid)[1];
+	mac[5] = ((u8 *)fw_lo)[0];
+	mac[4] = ((u8 *)fw_lo)[1];
+}
+
+/**
+ * bnx2x_mcast_refresh_registry_e1 -
+ *
+ * @bp:		device handle
+ * @cnt:
+ *
+ * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
+ * and update the registry correspondingly: if ADD - allocate a memory and add
+ * the entries to the registry (list), if DELETE - clear the registry and free
+ * the memory.
+ */
+static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
+						  struct bnx2x_mcast_obj *o)
+{
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct bnx2x_mcast_mac_elem *elem;
+	struct mac_configuration_cmd *data =
+			(struct mac_configuration_cmd *)(raw->rdata);
+
+	/* If first entry contains a SET bit - the command was ADD,
+	 * otherwise - DEL_ALL
+	 */
+	if (GET_FLAG(data->config_table[0].flags,
+			MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
+		int i, len = data->hdr.length;
+
+		/* Break if it was a RESTORE command */
+		if (!list_empty(&o->registry.exact_match.macs))
+			return 0;
+
+		elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
+		if (!elem) {
+			BNX2X_ERR("Failed to allocate registry memory\n");
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < len; i++, elem++) {
+			bnx2x_get_fw_mac_addr(
+				&data->config_table[i].msb_mac_addr,
+				&data->config_table[i].middle_mac_addr,
+				&data->config_table[i].lsb_mac_addr,
+				elem->mac);
+			DP(BNX2X_MSG_SP, "Adding registry entry for ["
+					 BNX2X_MAC_FMT"]\n",
+				   BNX2X_MAC_PRN_LIST(elem->mac));
+			list_add_tail(&elem->link,
+				      &o->registry.exact_match.macs);
+		}
+	} else {
+		elem = list_first_entry(&o->registry.exact_match.macs,
+					struct bnx2x_mcast_mac_elem, link);
+		DP(BNX2X_MSG_SP, "Deleting a registry\n");
+		kfree(elem);
+		INIT_LIST_HEAD(&o->registry.exact_match.macs);
+	}
+
+	return 0;
+}
+
+static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
+				struct bnx2x_mcast_ramrod_params *p,
+				int cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct bnx2x_raw_obj *raw = &o->raw;
+	struct mac_configuration_cmd *data =
+		(struct mac_configuration_cmd *)(raw->rdata);
+	int cnt = 0, i, rc;
+
+	/* Reset the ramrod data buffer */
+	memset(data, 0, sizeof(*data));
+
+	/* First set all entries as invalid */
+	for (i = 0; i < o->max_cmd_len ; i++)
+		SET_FLAG(data->config_table[i].flags,
+			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
+			 T_ETH_MAC_COMMAND_INVALIDATE);
+
+	/* Handle pending commands first */
+	cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
+
+	/* If there are no more pending commands - clear SCHEDULED state */
+	if (list_empty(&o->pending_cmds_head))
+		o->clear_sched(o);
+
+	/* The below may be true iff there were no pending commands */
+	if (!cnt)
+		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
+
+	/* For 57710 every command has o->max_cmd_len length to ensure that
+	 * commands are done one at a time.
+	 */
+	o->total_pending_num -= o->max_cmd_len;
+
+	/* send a ramrod */
+
+	WARN_ON(cnt > o->max_cmd_len);
+
+	/* Set ramrod header (in particular, a number of entries to update) */
+	bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
+
+	/* update a registry: we need the registry contents to be always up
+	 * to date in order to be able to execute a RESTORE opcode. Here
+	 * we use the fact that for 57710 we sent one command at a time
+	 * hence we may take the registry update out of the command handling
+	 * and do it in a simpler way here.
+	 */
+	rc = bnx2x_mcast_refresh_registry_e1(bp, o);
+	if (rc)
+		return rc;
+
+	/*
+	 * If CLEAR_ONLY was requested - don't send a ramrod and clear
+	 * RAMROD_PENDING status immediately.
+	 */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+		raw->clear_pending(raw);
+		return 0;
+	} else {
+		/*
+		 *  No need for an explicit memory barrier here as long we would
+		 *  need to ensure the ordering of writing to the SPQ element
+		 *  and updating of the SPQ producer which involves a memory
+		 *  read and we will have to put a full memory barrier there
+		 *  (inside bnx2x_sp_post()).
+		 */
+
+		/* Send a ramrod */
+		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
+				   U64_HI(raw->rdata_mapping),
+				   U64_LO(raw->rdata_mapping),
+				   ETH_CONNECTION_TYPE);
+		if (rc)
+			return rc;
+
+		/* Ramrod completion is pending */
+		return 1;
+	}
+
+}
+
+static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
+{
+	return o->registry.exact_match.num_macs_set;
+}
+
+static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
+{
+	return o->registry.aprox_match.num_bins_set;
+}
+
+static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
+						int n)
+{
+	o->registry.exact_match.num_macs_set = n;
+}
+
+static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
+						int n)
+{
+	o->registry.aprox_match.num_bins_set = n;
+}
+
+int bnx2x_config_mcast(struct bnx2x *bp,
+		       struct bnx2x_mcast_ramrod_params *p,
+		       int cmd)
+{
+	struct bnx2x_mcast_obj *o = p->mcast_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+	int rc = 0, old_reg_size;
+
+	/* This is needed to recover number of currently configured mcast macs
+	 * in case of failure.
+	 */
+	old_reg_size = o->get_registry_size(o);
+
+	/* Do some calculations and checks */
+	rc = o->validate(bp, p, cmd);
+	if (rc)
+		return rc;
+
+	/* Return if there is no work to do */
+	if ((!p->mcast_list_len) && (!o->check_sched(o)))
+		return 0;
+
+	DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
+			 "o->max_cmd_len=%d\n", o->total_pending_num,
+			 p->mcast_list_len, o->max_cmd_len);
+
+	/* Enqueue the current command to the pending list if we can't complete
+	 * it in the current iteration
+	 */
+	if (r->check_pending(r) ||
+	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
+		rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
+		if (rc < 0)
+			goto error_exit1;
+
+		/* As long as the current command is in a command list we
+		 * don't need to handle it separately.
+		 */
+		p->mcast_list_len = 0;
+	}
+
+	if (!r->check_pending(r)) {
+
+		/* Set 'pending' state */
+		r->set_pending(r);
+
+		/* Configure the new classification in the chip */
+		rc = o->config_mcast(bp, p, cmd);
+		if (rc < 0)
+			goto error_exit2;
+
+		/* Wait for a ramrod completion if was requested */
+		if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
+			rc = o->wait_comp(bp, o);
+	}
+
+	return rc;
+
+error_exit2:
+	r->clear_pending(r);
+
+error_exit1:
+	o->revert(bp, p, old_reg_size);
+
+	return rc;
+}
+
+static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
+{
+	smp_mb__before_clear_bit();
+	clear_bit(o->sched_state, o->raw.pstate);
+	smp_mb__after_clear_bit();
+}
+
+static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
+{
+	smp_mb__before_clear_bit();
+	set_bit(o->sched_state, o->raw.pstate);
+	smp_mb__after_clear_bit();
+}
+
+static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
+{
+	return !!test_bit(o->sched_state, o->raw.pstate);
+}
+
+static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
+{
+	return o->raw.check_pending(&o->raw) || o->check_sched(o);
+}
+
+void bnx2x_init_mcast_obj(struct bnx2x *bp,
+			  struct bnx2x_mcast_obj *mcast_obj,
+			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
+			  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
+			  int state, unsigned long *pstate, bnx2x_obj_type type)
+{
+	memset(mcast_obj, 0, sizeof(*mcast_obj));
+
+	bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
+			   rdata, rdata_mapping, state, pstate, type);
+
+	mcast_obj->engine_id = engine_id;
+
+	INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
+
+	mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
+	mcast_obj->check_sched = bnx2x_mcast_check_sched;
+	mcast_obj->set_sched = bnx2x_mcast_set_sched;
+	mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
+
+	if (CHIP_IS_E1(bp)) {
+		mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
+		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
+		mcast_obj->hdl_restore       =
+			bnx2x_mcast_handle_restore_cmd_e1;
+		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
+
+		if (CHIP_REV_IS_SLOW(bp))
+			mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
+		else
+			mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
+
+		mcast_obj->wait_comp         = bnx2x_mcast_wait;
+		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
+		mcast_obj->validate          = bnx2x_mcast_validate_e1;
+		mcast_obj->revert            = bnx2x_mcast_revert_e1;
+		mcast_obj->get_registry_size =
+			bnx2x_mcast_get_registry_size_exact;
+		mcast_obj->set_registry_size =
+			bnx2x_mcast_set_registry_size_exact;
+
+		/* 57710 is the only chip that uses the exact match for mcast
+		 * at the moment.
+		 */
+		INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
+
+	} else if (CHIP_IS_E1H(bp)) {
+		mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
+		mcast_obj->enqueue_cmd   = NULL;
+		mcast_obj->hdl_restore   = NULL;
+		mcast_obj->check_pending = bnx2x_mcast_check_pending;
+
+		/* 57711 doesn't send a ramrod, so it has unlimited credit
+		 * for one command.
+		 */
+		mcast_obj->max_cmd_len       = -1;
+		mcast_obj->wait_comp         = bnx2x_mcast_wait;
+		mcast_obj->set_one_rule      = NULL;
+		mcast_obj->validate          = bnx2x_mcast_validate_e1h;
+		mcast_obj->revert            = bnx2x_mcast_revert_e1h;
+		mcast_obj->get_registry_size =
+			bnx2x_mcast_get_registry_size_aprox;
+		mcast_obj->set_registry_size =
+			bnx2x_mcast_set_registry_size_aprox;
+	} else {
+		mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
+		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
+		mcast_obj->hdl_restore       =
+			bnx2x_mcast_handle_restore_cmd_e2;
+		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
+		/* TODO: There should be a proper HSI define for this number!!!
+		 */
+		mcast_obj->max_cmd_len       = 16;
+		mcast_obj->wait_comp         = bnx2x_mcast_wait;
+		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
+		mcast_obj->validate          = bnx2x_mcast_validate_e2;
+		mcast_obj->revert            = bnx2x_mcast_revert_e2;
+		mcast_obj->get_registry_size =
+			bnx2x_mcast_get_registry_size_aprox;
+		mcast_obj->set_registry_size =
+			bnx2x_mcast_set_registry_size_aprox;
+	}
+}
+
+/*************************** Credit handling **********************************/
+
+/**
+ * atomic_add_ifless - add if the result is less than a given value.
+ *
+ * @v:	pointer of type atomic_t
+ * @a:	the amount to add to v...
+ * @u:	...if (v + a) is less than u.
+ *
+ * returns true if (v + a) was less than u, and false otherwise.
+ *
+ */
+static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
+{
+	int c, old;
+
+	c = atomic_read(v);
+	for (;;) {
+		if (unlikely(c + a >= u))
+			return false;
+
+		old = atomic_cmpxchg((v), c, c + a);
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+
+	return true;
+}
+
+/**
+ * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
+ *
+ * @v:	pointer of type atomic_t
+ * @a:	the amount to dec from v...
+ * @u:	...if (v - a) is more or equal than u.
+ *
+ * returns true if (v - a) was more or equal than u, and false
+ * otherwise.
+ */
+static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
+{
+	int c, old;
+
+	c = atomic_read(v);
+	for (;;) {
+		if (unlikely(c - a < u))
+			return false;
+
+		old = atomic_cmpxchg((v), c, c - a);
+		if (likely(old == c))
+			break;
+		c = old;
+	}
+
+	return true;
+}
+
+static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
+{
+	bool rc;
+
+	smp_mb();
+	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
+	smp_mb();
+
+	return rc;
+}
+
+static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
+{
+	bool rc;
+
+	smp_mb();
+
+	/* Don't let to refill if credit + cnt > pool_sz */
+	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
+
+	smp_mb();
+
+	return rc;
+}
+
+static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
+{
+	int cur_credit;
+
+	smp_mb();
+	cur_credit = atomic_read(&o->credit);
+
+	return cur_credit;
+}
+
+static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
+					  int cnt)
+{
+	return true;
+}
+
+
+static bool bnx2x_credit_pool_get_entry(
+	struct bnx2x_credit_pool_obj *o,
+	int *offset)
+{
+	int idx, vec, i;
+
+	*offset = -1;
+
+	/* Find "internal cam-offset" then add to base for this object... */
+	for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
+
+		/* Skip the current vector if there are no free entries in it */
+		if (!o->pool_mirror[vec])
+			continue;
+
+		/* If we've got here we are going to find a free entry */
+		for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
+		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
+
+			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
+				/* Got one!! */
+				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
+				*offset = o->base_pool_offset + idx;
+				return true;
+			}
+	}
+
+	return false;
+}
+
+static bool bnx2x_credit_pool_put_entry(
+	struct bnx2x_credit_pool_obj *o,
+	int offset)
+{
+	if (offset < o->base_pool_offset)
+		return false;
+
+	offset -= o->base_pool_offset;
+
+	if (offset >= o->pool_sz)
+		return false;
+
+	/* Return the entry to the pool */
+	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
+
+	return true;
+}
+
+static bool bnx2x_credit_pool_put_entry_always_true(
+	struct bnx2x_credit_pool_obj *o,
+	int offset)
+{
+	return true;
+}
+
+static bool bnx2x_credit_pool_get_entry_always_true(
+	struct bnx2x_credit_pool_obj *o,
+	int *offset)
+{
+	*offset = -1;
+	return true;
+}
+/**
+ * bnx2x_init_credit_pool - initialize credit pool internals.
+ *
+ * @p:
+ * @base:	Base entry in the CAM to use.
+ * @credit:	pool size.
+ *
+ * If base is negative no CAM entries handling will be performed.
+ * If credit is negative pool operations will always succeed (unlimited pool).
+ *
+ */
+static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
+					  int base, int credit)
+{
+	/* Zero the object first */
+	memset(p, 0, sizeof(*p));
+
+	/* Set the table to all 1s */
+	memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
+
+	/* Init a pool as full */
+	atomic_set(&p->credit, credit);
+
+	/* The total poll size */
+	p->pool_sz = credit;
+
+	p->base_pool_offset = base;
+
+	/* Commit the change */
+	smp_mb();
+
+	p->check = bnx2x_credit_pool_check;
+
+	/* if pool credit is negative - disable the checks */
+	if (credit >= 0) {
+		p->put      = bnx2x_credit_pool_put;
+		p->get      = bnx2x_credit_pool_get;
+		p->put_entry = bnx2x_credit_pool_put_entry;
+		p->get_entry = bnx2x_credit_pool_get_entry;
+	} else {
+		p->put      = bnx2x_credit_pool_always_true;
+		p->get      = bnx2x_credit_pool_always_true;
+		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
+		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
+	}
+
+	/* If base is negative - disable entries handling */
+	if (base < 0) {
+		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
+		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
+	}
+}
+
+void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
+				struct bnx2x_credit_pool_obj *p, u8 func_id,
+				u8 func_num)
+{
+/* TODO: this will be defined in consts as well... */
+#define BNX2X_CAM_SIZE_EMUL 5
+
+	int cam_sz;
+
+	if (CHIP_IS_E1(bp)) {
+		/* In E1, Multicast is saved in cam... */
+		if (!CHIP_REV_IS_SLOW(bp))
+			cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
+		else
+			cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
+
+		bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
+
+	} else if (CHIP_IS_E1H(bp)) {
+		/* CAM credit is equaly divided between all active functions
+		 * on the PORT!.
+		 */
+		if ((func_num > 0)) {
+			if (!CHIP_REV_IS_SLOW(bp))
+				cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
+			else
+				cam_sz = BNX2X_CAM_SIZE_EMUL;
+			bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
+		} else {
+			/* this should never happen! Block MAC operations. */
+			bnx2x_init_credit_pool(p, 0, 0);
+		}
+
+	} else {
+
+		/*
+		 * CAM credit is equaly divided between all active functions
+		 * on the PATH.
+		 */
+		if ((func_num > 0)) {
+			if (!CHIP_REV_IS_SLOW(bp))
+				cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
+			else
+				cam_sz = BNX2X_CAM_SIZE_EMUL;
+
+			/*
+			 * No need for CAM entries handling for 57712 and
+			 * newer.
+			 */
+			bnx2x_init_credit_pool(p, -1, cam_sz);
+		} else {
+			/* this should never happen! Block MAC operations. */
+			bnx2x_init_credit_pool(p, 0, 0);
+		}
+
+	}
+}
+
+void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
+				 struct bnx2x_credit_pool_obj *p,
+				 u8 func_id,
+				 u8 func_num)
+{
+	if (CHIP_IS_E1x(bp)) {
+		/*
+		 * There is no VLAN credit in HW on 57710 and 57711 only
+		 * MAC / MAC-VLAN can be set
+		 */
+		bnx2x_init_credit_pool(p, 0, -1);
+	} else {
+		/*
+		 * CAM credit is equaly divided between all active functions
+		 * on the PATH.
+		 */
+		if (func_num > 0) {
+			int credit = MAX_VLAN_CREDIT_E2 / func_num;
+			bnx2x_init_credit_pool(p, func_id * credit, credit);
+		} else
+			/* this should never happen! Block VLAN operations. */
+			bnx2x_init_credit_pool(p, 0, 0);
+	}
+}
+
+/****************** RSS Configuration ******************/
+/**
+ * bnx2x_debug_print_ind_table - prints the indirection table configuration.
+ *
+ * @bp:		driver hanlde
+ * @p:		pointer to rss configuration
+ *
+ * Prints it when NETIF_MSG_IFUP debug level is configured.
+ */
+static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
+					struct bnx2x_config_rss_params *p)
+{
+	int i;
+
+	DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
+	DP(BNX2X_MSG_SP, "0x0000: ");
+	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
+		DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
+
+		/* Print 4 bytes in a line */
+		if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
+		    (((i + 1) & 0x3) == 0)) {
+			DP_CONT(BNX2X_MSG_SP, "\n");
+			DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
+		}
+	}
+
+	DP_CONT(BNX2X_MSG_SP, "\n");
+}
+
+/**
+ * bnx2x_setup_rss - configure RSS
+ *
+ * @bp:		device handle
+ * @p:		rss configuration
+ *
+ * sends on UPDATE ramrod for that matter.
+ */
+static int bnx2x_setup_rss(struct bnx2x *bp,
+			   struct bnx2x_config_rss_params *p)
+{
+	struct bnx2x_rss_config_obj *o = p->rss_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+	struct eth_rss_update_ramrod_data *data =
+		(struct eth_rss_update_ramrod_data *)(r->rdata);
+	u8 rss_mode = 0;
+	int rc;
+
+	memset(data, 0, sizeof(*data));
+
+	DP(BNX2X_MSG_SP, "Configuring RSS\n");
+
+	/* Set an echo field */
+	data->echo = (r->cid & BNX2X_SWCID_MASK) |
+		     (r->state << BNX2X_SWCID_SHIFT);
+
+	/* RSS mode */
+	if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_DISABLED;
+	else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_REGULAR;
+	else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_VLAN_PRI;
+	else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_E1HOV_PRI;
+	else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
+		rss_mode = ETH_RSS_MODE_IP_DSCP;
+
+	data->rss_mode = rss_mode;
+
+	DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
+
+	/* RSS capabilities */
+	if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
+		data->capabilities |=
+			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
+		data->capabilities |=
+			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
+		data->capabilities |=
+			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
+
+	if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
+		data->capabilities |=
+			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
+
+	/* Hashing mask */
+	data->rss_result_mask = p->rss_result_mask;
+
+	/* RSS engine ID */
+	data->rss_engine_id = o->engine_id;
+
+	DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
+
+	/* Indirection table */
+	memcpy(data->indirection_table, p->ind_table,
+		  T_ETH_INDIRECTION_TABLE_SIZE);
+
+	/* Remember the last configuration */
+	memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
+
+	/* Print the indirection table */
+	if (netif_msg_ifup(bp))
+		bnx2x_debug_print_ind_table(bp, p);
+
+	/* RSS keys */
+	if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
+		memcpy(&data->rss_key[0], &p->rss_key[0],
+		       sizeof(data->rss_key));
+		data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
+	}
+
+	/*
+	 *  No need for an explicit memory barrier here as long we would
+	 *  need to ensure the ordering of writing to the SPQ element
+	 *  and updating of the SPQ producer which involves a memory
+	 *  read and we will have to put a full memory barrier there
+	 *  (inside bnx2x_sp_post()).
+	 */
+
+	/* Send a ramrod */
+	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
+			   U64_HI(r->rdata_mapping),
+			   U64_LO(r->rdata_mapping),
+			   ETH_CONNECTION_TYPE);
+
+	if (rc < 0)
+		return rc;
+
+	return 1;
+}
+
+void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
+			     u8 *ind_table)
+{
+	memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
+}
+
+int bnx2x_config_rss(struct bnx2x *bp,
+		     struct bnx2x_config_rss_params *p)
+{
+	int rc;
+	struct bnx2x_rss_config_obj *o = p->rss_obj;
+	struct bnx2x_raw_obj *r = &o->raw;
+
+	/* Do nothing if only driver cleanup was requested */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
+		return 0;
+
+	r->set_pending(r);
+
+	rc = o->config_rss(bp, p);
+	if (rc < 0) {
+		r->clear_pending(r);
+		return rc;
+	}
+
+	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
+		rc = r->wait_comp(bp, r);
+
+	return rc;
+}
+
+
+void bnx2x_init_rss_config_obj(struct bnx2x *bp,
+			       struct bnx2x_rss_config_obj *rss_obj,
+			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
+			       void *rdata, dma_addr_t rdata_mapping,
+			       int state, unsigned long *pstate,
+			       bnx2x_obj_type type)
+{
+	bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
+			   rdata_mapping, state, pstate, type);
+
+	rss_obj->engine_id  = engine_id;
+	rss_obj->config_rss = bnx2x_setup_rss;
+}
+
+/********************** Queue state object ***********************************/
+
+/**
+ * bnx2x_queue_state_change - perform Queue state change transition
+ *
+ * @bp:		device handle
+ * @params:	parameters to perform the transition
+ *
+ * returns 0 in case of successfully completed transition, negative error
+ * code in case of failure, positive (EBUSY) value if there is a completion
+ * to that is still pending (possible only if RAMROD_COMP_WAIT is
+ * not set in params->ramrod_flags for asynchronous commands).
+ *
+ */
+int bnx2x_queue_state_change(struct bnx2x *bp,
+			     struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	int rc, pending_bit;
+	unsigned long *pending = &o->pending;
+
+	/* Check that the requested transition is legal */
+	if (o->check_transition(bp, o, params))
+		return -EINVAL;
+
+	/* Set "pending" bit */
+	pending_bit = o->set_pending(o, params);
+
+	/* Don't send a command if only driver cleanup was requested */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
+		o->complete_cmd(bp, o, pending_bit);
+	else {
+		/* Send a ramrod */
+		rc = o->send_cmd(bp, params);
+		if (rc) {
+			o->next_state = BNX2X_Q_STATE_MAX;
+			clear_bit(pending_bit, pending);
+			smp_mb__after_clear_bit();
+			return rc;
+		}
+
+		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+			rc = o->wait_comp(bp, o, pending_bit);
+			if (rc)
+				return rc;
+
+			return 0;
+		}
+	}
+
+	return !!test_bit(pending_bit, pending);
+}
+
+
+static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
+				   struct bnx2x_queue_state_params *params)
+{
+	enum bnx2x_queue_cmd cmd = params->cmd, bit;
+
+	/* ACTIVATE and DEACTIVATE commands are implemented on top of
+	 * UPDATE command.
+	 */
+	if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
+	    (cmd == BNX2X_Q_CMD_DEACTIVATE))
+		bit = BNX2X_Q_CMD_UPDATE;
+	else
+		bit = cmd;
+
+	set_bit(bit, &obj->pending);
+	return bit;
+}
+
+static int bnx2x_queue_wait_comp(struct bnx2x *bp,
+				 struct bnx2x_queue_sp_obj *o,
+				 enum bnx2x_queue_cmd cmd)
+{
+	return bnx2x_state_wait(bp, cmd, &o->pending);
+}
+
+/**
+ * bnx2x_queue_comp_cmd - complete the state change command.
+ *
+ * @bp:		device handle
+ * @o:
+ * @cmd:
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
+				struct bnx2x_queue_sp_obj *o,
+				enum bnx2x_queue_cmd cmd)
+{
+	unsigned long cur_pending = o->pending;
+
+	if (!test_and_clear_bit(cmd, &cur_pending)) {
+		BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
+			  "pending 0x%lx, next_state %d\n", cmd,
+			  o->cids[BNX2X_PRIMARY_CID_INDEX],
+			  o->state, cur_pending, o->next_state);
+		return -EINVAL;
+	}
+
+	if (o->next_tx_only >= o->max_cos)
+		/* >= becuase tx only must always be smaller than cos since the
+		 * primary connection suports COS 0
+		 */
+		BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
+			   o->next_tx_only, o->max_cos);
+
+	DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
+			 "setting state to %d\n", cmd,
+			 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
+
+	if (o->next_tx_only)  /* print num tx-only if any exist */
+		DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d",
+			   o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
+
+	o->state = o->next_state;
+	o->num_tx_only = o->next_tx_only;
+	o->next_state = BNX2X_Q_STATE_MAX;
+
+	/* It's important that o->state and o->next_state are
+	 * updated before o->pending.
+	 */
+	wmb();
+
+	clear_bit(cmd, &o->pending);
+	smp_mb__after_clear_bit();
+
+	return 0;
+}
+
+static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
+				struct bnx2x_queue_state_params *cmd_params,
+				struct client_init_ramrod_data *data)
+{
+	struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
+
+	/* Rx data */
+
+	/* IPv6 TPA supported for E2 and above only */
+	data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
+				CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
+}
+
+static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
+				struct bnx2x_queue_sp_obj *o,
+				struct bnx2x_general_setup_params *params,
+				struct client_init_general_data *gen_data,
+				unsigned long *flags)
+{
+	gen_data->client_id = o->cl_id;
+
+	if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
+		gen_data->statistics_counter_id =
+					params->stat_id;
+		gen_data->statistics_en_flg = 1;
+		gen_data->statistics_zero_flg =
+			test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
+	} else
+		gen_data->statistics_counter_id =
+					DISABLE_STATISTIC_COUNTER_ID_VALUE;
+
+	gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
+	gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
+	gen_data->sp_client_id = params->spcl_id;
+	gen_data->mtu = cpu_to_le16(params->mtu);
+	gen_data->func_id = o->func_id;
+
+
+	gen_data->cos = params->cos;
+
+	gen_data->traffic_type =
+		test_bit(BNX2X_Q_FLG_FCOE, flags) ?
+		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
+
+	DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d",
+	   gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
+}
+
+static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
+				struct bnx2x_txq_setup_params *params,
+				struct client_init_tx_data *tx_data,
+				unsigned long *flags)
+{
+	tx_data->enforce_security_flg =
+		test_bit(BNX2X_Q_FLG_TX_SEC, flags);
+	tx_data->default_vlan =
+		cpu_to_le16(params->default_vlan);
+	tx_data->default_vlan_flg =
+		test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
+	tx_data->tx_switching_flg =
+		test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
+	tx_data->anti_spoofing_flg =
+		test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
+	tx_data->tx_status_block_id = params->fw_sb_id;
+	tx_data->tx_sb_index_number = params->sb_cq_index;
+	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
+
+	tx_data->tx_bd_page_base.lo =
+		cpu_to_le32(U64_LO(params->dscr_map));
+	tx_data->tx_bd_page_base.hi =
+		cpu_to_le32(U64_HI(params->dscr_map));
+
+	/* Don't configure any Tx switching mode during queue SETUP */
+	tx_data->state = 0;
+}
+
+static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
+				struct rxq_pause_params *params,
+				struct client_init_rx_data *rx_data)
+{
+	/* flow control data */
+	rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
+	rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
+	rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
+	rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
+	rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
+	rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
+	rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
+}
+
+static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
+				struct bnx2x_rxq_setup_params *params,
+				struct client_init_rx_data *rx_data,
+				unsigned long *flags)
+{
+		/* Rx data */
+	rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
+				CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
+	rx_data->vmqueue_mode_en_flg = 0;
+
+	rx_data->cache_line_alignment_log_size =
+		params->cache_line_log;
+	rx_data->enable_dynamic_hc =
+		test_bit(BNX2X_Q_FLG_DHC, flags);
+	rx_data->max_sges_for_packet = params->max_sges_pkt;
+	rx_data->client_qzone_id = params->cl_qzone_id;
+	rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
+
+	/* Always start in DROP_ALL mode */
+	rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
+				     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
+
+	/* We don't set drop flags */
+	rx_data->drop_ip_cs_err_flg = 0;
+	rx_data->drop_tcp_cs_err_flg = 0;
+	rx_data->drop_ttl0_flg = 0;
+	rx_data->drop_udp_cs_err_flg = 0;
+	rx_data->inner_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_FLG_VLAN, flags);
+	rx_data->outer_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_FLG_OV, flags);
+	rx_data->status_block_id = params->fw_sb_id;
+	rx_data->rx_sb_index_number = params->sb_cq_index;
+	rx_data->max_tpa_queues = params->max_tpa_queues;
+	rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
+	rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
+	rx_data->bd_page_base.lo =
+		cpu_to_le32(U64_LO(params->dscr_map));
+	rx_data->bd_page_base.hi =
+		cpu_to_le32(U64_HI(params->dscr_map));
+	rx_data->sge_page_base.lo =
+		cpu_to_le32(U64_LO(params->sge_map));
+	rx_data->sge_page_base.hi =
+		cpu_to_le32(U64_HI(params->sge_map));
+	rx_data->cqe_page_base.lo =
+		cpu_to_le32(U64_LO(params->rcq_map));
+	rx_data->cqe_page_base.hi =
+		cpu_to_le32(U64_HI(params->rcq_map));
+	rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
+
+	if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
+		rx_data->approx_mcast_engine_id = o->func_id;
+		rx_data->is_approx_mcast = 1;
+	}
+
+	rx_data->rss_engine_id = params->rss_engine_id;
+
+	/* silent vlan removal */
+	rx_data->silent_vlan_removal_flg =
+		test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
+	rx_data->silent_vlan_value =
+		cpu_to_le16(params->silent_removal_value);
+	rx_data->silent_vlan_mask =
+		cpu_to_le16(params->silent_removal_mask);
+
+}
+
+/* initialize the general, tx and rx parts of a queue object */
+static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
+				struct bnx2x_queue_state_params *cmd_params,
+				struct client_init_ramrod_data *data)
+{
+	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
+				       &cmd_params->params.setup.gen_params,
+				       &data->general,
+				       &cmd_params->params.setup.flags);
+
+	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
+				  &cmd_params->params.setup.txq_params,
+				  &data->tx,
+				  &cmd_params->params.setup.flags);
+
+	bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
+				  &cmd_params->params.setup.rxq_params,
+				  &data->rx,
+				  &cmd_params->params.setup.flags);
+
+	bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
+				     &cmd_params->params.setup.pause_params,
+				     &data->rx);
+}
+
+/* initialize the general and tx parts of a tx-only queue object */
+static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
+				struct bnx2x_queue_state_params *cmd_params,
+				struct tx_queue_init_ramrod_data *data)
+{
+	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
+				       &cmd_params->params.tx_only.gen_params,
+				       &data->general,
+				       &cmd_params->params.tx_only.flags);
+
+	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
+				  &cmd_params->params.tx_only.txq_params,
+				  &data->tx,
+				  &cmd_params->params.tx_only.flags);
+
+	DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",cmd_params->q_obj->cids[0],
+	   data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
+}
+
+/**
+ * bnx2x_q_init - init HW/FW queue
+ *
+ * @bp:		device handle
+ * @params:
+ *
+ * HW/FW initial Queue configuration:
+ *      - HC: Rx and Tx
+ *      - CDU context validation
+ *
+ */
+static inline int bnx2x_q_init(struct bnx2x *bp,
+			       struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct bnx2x_queue_init_params *init = &params->params.init;
+	u16 hc_usec;
+	u8 cos;
+
+	/* Tx HC configuration */
+	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
+	    test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
+		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
+
+		bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
+			init->tx.sb_cq_index,
+			!test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
+			hc_usec);
+	}
+
+	/* Rx HC configuration */
+	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
+	    test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
+		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
+
+		bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
+			init->rx.sb_cq_index,
+			!test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
+			hc_usec);
+	}
+
+	/* Set CDU context validation values */
+	for (cos = 0; cos < o->max_cos; cos++) {
+		DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d",
+				 o->cids[cos], cos);
+		DP(BNX2X_MSG_SP, "context pointer %p", init->cxts[cos]);
+		bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
+	}
+
+	/* As no ramrod is sent, complete the command immediately  */
+	o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
+
+	mmiowb();
+	smp_mb();
+
+	return 0;
+}
+
+static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct client_init_ramrod_data *rdata =
+		(struct client_init_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+	/* Clear the ramrod data */
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data */
+	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
+
+	/*
+	 *  No need for an explicit memory barrier here as long we would
+	 *  need to ensure the ordering of writing to the SPQ element
+	 *  and updating of the SPQ producer which involves a memory
+	 *  read and we will have to put a full memory barrier there
+	 *  (inside bnx2x_sp_post()).
+	 */
+
+	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct client_init_ramrod_data *rdata =
+		(struct client_init_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
+
+	/* Clear the ramrod data */
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data */
+	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
+	bnx2x_q_fill_setup_data_e2(bp, params, rdata);
+
+	/*
+	 *  No need for an explicit memory barrier here as long we would
+	 *  need to ensure the ordering of writing to the SPQ element
+	 *  and updating of the SPQ producer which involves a memory
+	 *  read and we will have to put a full memory barrier there
+	 *  (inside bnx2x_sp_post()).
+	 */
+
+	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
+				  struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct tx_queue_init_ramrod_data *rdata =
+		(struct tx_queue_init_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
+	struct bnx2x_queue_setup_tx_only_params *tx_only_params =
+		&params->params.tx_only;
+	u8 cid_index = tx_only_params->cid_index;
+
+
+	if (cid_index >= o->max_cos) {
+		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+			  o->cl_id, cid_index);
+		return -EINVAL;
+	}
+
+	DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d",
+			 tx_only_params->gen_params.cos,
+			 tx_only_params->gen_params.spcl_id);
+
+	/* Clear the ramrod data */
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data */
+	bnx2x_q_fill_setup_tx_only(bp, params, rdata);
+
+	DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
+			 "sp-client id %d, cos %d",
+			 o->cids[cid_index],
+			 rdata->general.client_id,
+			 rdata->general.sp_client_id, rdata->general.cos);
+
+	/*
+	 *  No need for an explicit memory barrier here as long we would
+	 *  need to ensure the ordering of writing to the SPQ element
+	 *  and updating of the SPQ producer which involves a memory
+	 *  read and we will have to put a full memory barrier there
+	 *  (inside bnx2x_sp_post()).
+	 */
+
+	return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+static void bnx2x_q_fill_update_data(struct bnx2x *bp,
+				     struct bnx2x_queue_sp_obj *obj,
+				     struct bnx2x_queue_update_params *params,
+				     struct client_update_ramrod_data *data)
+{
+	/* Client ID of the client to update */
+	data->client_id = obj->cl_id;
+
+	/* Function ID of the client to update */
+	data->func_id = obj->func_id;
+
+	/* Default VLAN value */
+	data->default_vlan = cpu_to_le16(params->def_vlan);
+
+	/* Inner VLAN stripping */
+	data->inner_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
+	data->inner_vlan_removal_change_flg =
+		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
+			 &params->update_flags);
+
+	/* Outer VLAN sripping */
+	data->outer_vlan_removal_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
+	data->outer_vlan_removal_change_flg =
+		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
+			 &params->update_flags);
+
+	/* Drop packets that have source MAC that doesn't belong to this
+	 * Queue.
+	 */
+	data->anti_spoofing_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
+	data->anti_spoofing_change_flg =
+		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
+
+	/* Activate/Deactivate */
+	data->activate_flg =
+		test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
+	data->activate_change_flg =
+		test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
+
+	/* Enable default VLAN */
+	data->default_vlan_enable_flg =
+		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
+	data->default_vlan_change_flg =
+		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+			 &params->update_flags);
+
+	/* silent vlan removal */
+	data->silent_vlan_change_flg =
+		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+			 &params->update_flags);
+	data->silent_vlan_removal_flg =
+		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
+	data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
+	data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
+}
+
+static inline int bnx2x_q_send_update(struct bnx2x *bp,
+				      struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	struct client_update_ramrod_data *rdata =
+		(struct client_update_ramrod_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	struct bnx2x_queue_update_params *update_params =
+		&params->params.update;
+	u8 cid_index = update_params->cid_index;
+
+	if (cid_index >= o->max_cos) {
+		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+			  o->cl_id, cid_index);
+		return -EINVAL;
+	}
+
+
+	/* Clear the ramrod data */
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data */
+	bnx2x_q_fill_update_data(bp, o, update_params, rdata);
+
+	/*
+	 *  No need for an explicit memory barrier here as long we would
+	 *  need to ensure the ordering of writing to the SPQ element
+	 *  and updating of the SPQ producer which involves a memory
+	 *  read and we will have to put a full memory barrier there
+	 *  (inside bnx2x_sp_post()).
+	 */
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
+			     o->cids[cid_index], U64_HI(data_mapping),
+			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
+}
+
+/**
+ * bnx2x_q_send_deactivate - send DEACTIVATE command
+ *
+ * @bp:		device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_update_params *update = &params->params.update;
+
+	memset(update, 0, sizeof(*update));
+
+	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+	return bnx2x_q_send_update(bp, params);
+}
+
+/**
+ * bnx2x_q_send_activate - send ACTIVATE command
+ *
+ * @bp:		device handle
+ * @params:
+ *
+ * implemented using the UPDATE command.
+ */
+static inline int bnx2x_q_send_activate(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_update_params *update = &params->params.update;
+
+	memset(update, 0, sizeof(*update));
+
+	__set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
+	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
+
+	return bnx2x_q_send_update(bp, params);
+}
+
+static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	/* TODO: Not implemented yet. */
+	return -1;
+}
+
+static inline int bnx2x_q_send_halt(struct bnx2x *bp,
+				    struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
+			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
+			     ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
+				       struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	u8 cid_idx = params->params.cfc_del.cid_index;
+
+	if (cid_idx >= o->max_cos) {
+		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+			  o->cl_id, cid_idx);
+		return -EINVAL;
+	}
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
+			     o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+	u8 cid_index = params->params.terminate.cid_index;
+
+	if (cid_index >= o->max_cos) {
+		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
+			  o->cl_id, cid_index);
+		return -EINVAL;
+	}
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
+			     o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_q_send_empty(struct bnx2x *bp,
+				     struct bnx2x_queue_state_params *params)
+{
+	struct bnx2x_queue_sp_obj *o = params->q_obj;
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
+			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
+			     ETH_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
+					struct bnx2x_queue_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_Q_CMD_INIT:
+		return bnx2x_q_init(bp, params);
+	case BNX2X_Q_CMD_SETUP_TX_ONLY:
+		return bnx2x_q_send_setup_tx_only(bp, params);
+	case BNX2X_Q_CMD_DEACTIVATE:
+		return bnx2x_q_send_deactivate(bp, params);
+	case BNX2X_Q_CMD_ACTIVATE:
+		return bnx2x_q_send_activate(bp, params);
+	case BNX2X_Q_CMD_UPDATE:
+		return bnx2x_q_send_update(bp, params);
+	case BNX2X_Q_CMD_UPDATE_TPA:
+		return bnx2x_q_send_update_tpa(bp, params);
+	case BNX2X_Q_CMD_HALT:
+		return bnx2x_q_send_halt(bp, params);
+	case BNX2X_Q_CMD_CFC_DEL:
+		return bnx2x_q_send_cfc_del(bp, params);
+	case BNX2X_Q_CMD_TERMINATE:
+		return bnx2x_q_send_terminate(bp, params);
+	case BNX2X_Q_CMD_EMPTY:
+		return bnx2x_q_send_empty(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
+				    struct bnx2x_queue_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_Q_CMD_SETUP:
+		return bnx2x_q_send_setup_e1x(bp, params);
+	case BNX2X_Q_CMD_INIT:
+	case BNX2X_Q_CMD_SETUP_TX_ONLY:
+	case BNX2X_Q_CMD_DEACTIVATE:
+	case BNX2X_Q_CMD_ACTIVATE:
+	case BNX2X_Q_CMD_UPDATE:
+	case BNX2X_Q_CMD_UPDATE_TPA:
+	case BNX2X_Q_CMD_HALT:
+	case BNX2X_Q_CMD_CFC_DEL:
+	case BNX2X_Q_CMD_TERMINATE:
+	case BNX2X_Q_CMD_EMPTY:
+		return bnx2x_queue_send_cmd_cmn(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
+				   struct bnx2x_queue_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_Q_CMD_SETUP:
+		return bnx2x_q_send_setup_e2(bp, params);
+	case BNX2X_Q_CMD_INIT:
+	case BNX2X_Q_CMD_SETUP_TX_ONLY:
+	case BNX2X_Q_CMD_DEACTIVATE:
+	case BNX2X_Q_CMD_ACTIVATE:
+	case BNX2X_Q_CMD_UPDATE:
+	case BNX2X_Q_CMD_UPDATE_TPA:
+	case BNX2X_Q_CMD_HALT:
+	case BNX2X_Q_CMD_CFC_DEL:
+	case BNX2X_Q_CMD_TERMINATE:
+	case BNX2X_Q_CMD_EMPTY:
+		return bnx2x_queue_send_cmd_cmn(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+/**
+ * bnx2x_queue_chk_transition - check state machine of a regular Queue
+ *
+ * @bp:		device handle
+ * @o:
+ * @params:
+ *
+ * (not Forwarding)
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ *         -EINVAL otherwise.
+ */
+static int bnx2x_queue_chk_transition(struct bnx2x *bp,
+				      struct bnx2x_queue_sp_obj *o,
+				      struct bnx2x_queue_state_params *params)
+{
+	enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
+	enum bnx2x_queue_cmd cmd = params->cmd;
+	struct bnx2x_queue_update_params *update_params =
+		 &params->params.update;
+	u8 next_tx_only = o->num_tx_only;
+
+	/*
+	 * Forget all pending for completion commands if a driver only state
+	 * transition has been requested.
+	 */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+		o->pending = 0;
+		o->next_state = BNX2X_Q_STATE_MAX;
+	}
+
+	/*
+	 * Don't allow a next state transition if we are in the middle of
+	 * the previous one.
+	 */
+	if (o->pending)
+		return -EBUSY;
+
+	switch (state) {
+	case BNX2X_Q_STATE_RESET:
+		if (cmd == BNX2X_Q_CMD_INIT)
+			next_state = BNX2X_Q_STATE_INITIALIZED;
+
+		break;
+	case BNX2X_Q_STATE_INITIALIZED:
+		if (cmd == BNX2X_Q_CMD_SETUP) {
+			if (test_bit(BNX2X_Q_FLG_ACTIVE,
+				     &params->params.setup.flags))
+				next_state = BNX2X_Q_STATE_ACTIVE;
+			else
+				next_state = BNX2X_Q_STATE_INACTIVE;
+		}
+
+		break;
+	case BNX2X_Q_STATE_ACTIVE:
+		if (cmd == BNX2X_Q_CMD_DEACTIVATE)
+			next_state = BNX2X_Q_STATE_INACTIVE;
+
+		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+			next_state = BNX2X_Q_STATE_ACTIVE;
+
+		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
+			next_state = BNX2X_Q_STATE_MULTI_COS;
+			next_tx_only = 1;
+		}
+
+		else if (cmd == BNX2X_Q_CMD_HALT)
+			next_state = BNX2X_Q_STATE_STOPPED;
+
+		else if (cmd == BNX2X_Q_CMD_UPDATE) {
+			/* If "active" state change is requested, update the
+			 *  state accordingly.
+			 */
+			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+				     &update_params->update_flags) &&
+			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+				      &update_params->update_flags))
+				next_state = BNX2X_Q_STATE_INACTIVE;
+			else
+				next_state = BNX2X_Q_STATE_ACTIVE;
+		}
+
+		break;
+	case BNX2X_Q_STATE_MULTI_COS:
+		if (cmd == BNX2X_Q_CMD_TERMINATE)
+			next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
+
+		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
+			next_state = BNX2X_Q_STATE_MULTI_COS;
+			next_tx_only = o->num_tx_only + 1;
+		}
+
+		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+			next_state = BNX2X_Q_STATE_MULTI_COS;
+
+		else if (cmd == BNX2X_Q_CMD_UPDATE) {
+			/* If "active" state change is requested, update the
+			 *  state accordingly.
+			 */
+			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+				     &update_params->update_flags) &&
+			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+				      &update_params->update_flags))
+				next_state = BNX2X_Q_STATE_INACTIVE;
+			else
+				next_state = BNX2X_Q_STATE_MULTI_COS;
+		}
+
+		break;
+	case BNX2X_Q_STATE_MCOS_TERMINATED:
+		if (cmd == BNX2X_Q_CMD_CFC_DEL) {
+			next_tx_only = o->num_tx_only - 1;
+			if (next_tx_only == 0)
+				next_state = BNX2X_Q_STATE_ACTIVE;
+			else
+				next_state = BNX2X_Q_STATE_MULTI_COS;
+		}
+
+		break;
+	case BNX2X_Q_STATE_INACTIVE:
+		if (cmd == BNX2X_Q_CMD_ACTIVATE)
+			next_state = BNX2X_Q_STATE_ACTIVE;
+
+		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
+			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
+			next_state = BNX2X_Q_STATE_INACTIVE;
+
+		else if (cmd == BNX2X_Q_CMD_HALT)
+			next_state = BNX2X_Q_STATE_STOPPED;
+
+		else if (cmd == BNX2X_Q_CMD_UPDATE) {
+			/* If "active" state change is requested, update the
+			 * state accordingly.
+			 */
+			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+				     &update_params->update_flags) &&
+			    test_bit(BNX2X_Q_UPDATE_ACTIVATE,
+				     &update_params->update_flags)){
+				if (o->num_tx_only == 0)
+					next_state = BNX2X_Q_STATE_ACTIVE;
+				else /* tx only queues exist for this queue */
+					next_state = BNX2X_Q_STATE_MULTI_COS;
+			} else
+				next_state = BNX2X_Q_STATE_INACTIVE;
+		}
+
+		break;
+	case BNX2X_Q_STATE_STOPPED:
+		if (cmd == BNX2X_Q_CMD_TERMINATE)
+			next_state = BNX2X_Q_STATE_TERMINATED;
+
+		break;
+	case BNX2X_Q_STATE_TERMINATED:
+		if (cmd == BNX2X_Q_CMD_CFC_DEL)
+			next_state = BNX2X_Q_STATE_RESET;
+
+		break;
+	default:
+		BNX2X_ERR("Illegal state: %d\n", state);
+	}
+
+	/* Transition is assured */
+	if (next_state != BNX2X_Q_STATE_MAX) {
+		DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
+				 state, cmd, next_state);
+		o->next_state = next_state;
+		o->next_tx_only = next_tx_only;
+		return 0;
+	}
+
+	DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
+
+	return -EINVAL;
+}
+
+void bnx2x_init_queue_obj(struct bnx2x *bp,
+			  struct bnx2x_queue_sp_obj *obj,
+			  u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
+			  void *rdata,
+			  dma_addr_t rdata_mapping, unsigned long type)
+{
+	memset(obj, 0, sizeof(*obj));
+
+	/* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
+	BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
+
+	memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
+	obj->max_cos = cid_cnt;
+	obj->cl_id = cl_id;
+	obj->func_id = func_id;
+	obj->rdata = rdata;
+	obj->rdata_mapping = rdata_mapping;
+	obj->type = type;
+	obj->next_state = BNX2X_Q_STATE_MAX;
+
+	if (CHIP_IS_E1x(bp))
+		obj->send_cmd = bnx2x_queue_send_cmd_e1x;
+	else
+		obj->send_cmd = bnx2x_queue_send_cmd_e2;
+
+	obj->check_transition = bnx2x_queue_chk_transition;
+
+	obj->complete_cmd = bnx2x_queue_comp_cmd;
+	obj->wait_comp = bnx2x_queue_wait_comp;
+	obj->set_pending = bnx2x_queue_set_pending;
+}
+
+void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
+			     struct bnx2x_queue_sp_obj *obj,
+			     u32 cid, u8 index)
+{
+	obj->cids[index] = cid;
+}
+
+/********************** Function state object *********************************/
+enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
+					   struct bnx2x_func_sp_obj *o)
+{
+	/* in the middle of transaction - return INVALID state */
+	if (o->pending)
+		return BNX2X_F_STATE_MAX;
+
+	/*
+	 * unsure the order of reading of o->pending and o->state
+	 * o->pending should be read first
+	 */
+	rmb();
+
+	return o->state;
+}
+
+static int bnx2x_func_wait_comp(struct bnx2x *bp,
+				struct bnx2x_func_sp_obj *o,
+				enum bnx2x_func_cmd cmd)
+{
+	return bnx2x_state_wait(bp, cmd, &o->pending);
+}
+
+/**
+ * bnx2x_func_state_change_comp - complete the state machine transition
+ *
+ * @bp:		device handle
+ * @o:
+ * @cmd:
+ *
+ * Called on state change transition. Completes the state
+ * machine transition only - no HW interaction.
+ */
+static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
+					       struct bnx2x_func_sp_obj *o,
+					       enum bnx2x_func_cmd cmd)
+{
+	unsigned long cur_pending = o->pending;
+
+	if (!test_and_clear_bit(cmd, &cur_pending)) {
+		BNX2X_ERR("Bad MC reply %d for func %d in state %d "
+			  "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
+			  o->state, cur_pending, o->next_state);
+		return -EINVAL;
+	}
+
+	DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to "
+			 "%d\n", cmd, BP_FUNC(bp), o->next_state);
+
+	o->state = o->next_state;
+	o->next_state = BNX2X_F_STATE_MAX;
+
+	/* It's important that o->state and o->next_state are
+	 * updated before o->pending.
+	 */
+	wmb();
+
+	clear_bit(cmd, &o->pending);
+	smp_mb__after_clear_bit();
+
+	return 0;
+}
+
+/**
+ * bnx2x_func_comp_cmd - complete the state change command
+ *
+ * @bp:		device handle
+ * @o:
+ * @cmd:
+ *
+ * Checks that the arrived completion is expected.
+ */
+static int bnx2x_func_comp_cmd(struct bnx2x *bp,
+			       struct bnx2x_func_sp_obj *o,
+			       enum bnx2x_func_cmd cmd)
+{
+	/* Complete the state machine part first, check if it's a
+	 * legal completion.
+	 */
+	int rc = bnx2x_func_state_change_comp(bp, o, cmd);
+	return rc;
+}
+
+/**
+ * bnx2x_func_chk_transition - perform function state machine transition
+ *
+ * @bp:		device handle
+ * @o:
+ * @params:
+ *
+ * It both checks if the requested command is legal in a current
+ * state and, if it's legal, sets a `next_state' in the object
+ * that will be used in the completion flow to set the `state'
+ * of the object.
+ *
+ * returns 0 if a requested command is a legal transition,
+ *         -EINVAL otherwise.
+ */
+static int bnx2x_func_chk_transition(struct bnx2x *bp,
+				     struct bnx2x_func_sp_obj *o,
+				     struct bnx2x_func_state_params *params)
+{
+	enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
+	enum bnx2x_func_cmd cmd = params->cmd;
+
+	/*
+	 * Forget all pending for completion commands if a driver only state
+	 * transition has been requested.
+	 */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+		o->pending = 0;
+		o->next_state = BNX2X_F_STATE_MAX;
+	}
+
+	/*
+	 * Don't allow a next state transition if we are in the middle of
+	 * the previous one.
+	 */
+	if (o->pending)
+		return -EBUSY;
+
+	switch (state) {
+	case BNX2X_F_STATE_RESET:
+		if (cmd == BNX2X_F_CMD_HW_INIT)
+			next_state = BNX2X_F_STATE_INITIALIZED;
+
+		break;
+	case BNX2X_F_STATE_INITIALIZED:
+		if (cmd == BNX2X_F_CMD_START)
+			next_state = BNX2X_F_STATE_STARTED;
+
+		else if (cmd == BNX2X_F_CMD_HW_RESET)
+			next_state = BNX2X_F_STATE_RESET;
+
+		break;
+	case BNX2X_F_STATE_STARTED:
+		if (cmd == BNX2X_F_CMD_STOP)
+			next_state = BNX2X_F_STATE_INITIALIZED;
+		else if (cmd == BNX2X_F_CMD_TX_STOP)
+			next_state = BNX2X_F_STATE_TX_STOPPED;
+
+		break;
+	case BNX2X_F_STATE_TX_STOPPED:
+		if (cmd == BNX2X_F_CMD_TX_START)
+			next_state = BNX2X_F_STATE_STARTED;
+
+		break;
+	default:
+		BNX2X_ERR("Unknown state: %d\n", state);
+	}
+
+	/* Transition is assured */
+	if (next_state != BNX2X_F_STATE_MAX) {
+		DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
+				 state, cmd, next_state);
+		o->next_state = next_state;
+		return 0;
+	}
+
+	DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
+			 state, cmd);
+
+	return -EINVAL;
+}
+
+/**
+ * bnx2x_func_init_func - performs HW init at function stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
+ * HW blocks.
+ */
+static inline int bnx2x_func_init_func(struct bnx2x *bp,
+				       const struct bnx2x_func_sp_drv_ops *drv)
+{
+	return drv->init_hw_func(bp);
+}
+
+/**
+ * bnx2x_func_init_port - performs HW init at port stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
+ * FUNCTION-only HW blocks.
+ *
+ */
+static inline int bnx2x_func_init_port(struct bnx2x *bp,
+				       const struct bnx2x_func_sp_drv_ops *drv)
+{
+	int rc = drv->init_hw_port(bp);
+	if (rc)
+		return rc;
+
+	return bnx2x_func_init_func(bp, drv);
+}
+
+/**
+ * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	int rc = drv->init_hw_cmn_chip(bp);
+	if (rc)
+		return rc;
+
+	return bnx2x_func_init_port(bp, drv);
+}
+
+/**
+ * bnx2x_func_init_cmn - performs HW init at common stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Init HW when the current phase is
+ * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
+ * PORT-only and FUNCTION-only HW blocks.
+ */
+static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
+				      const struct bnx2x_func_sp_drv_ops *drv)
+{
+	int rc = drv->init_hw_cmn(bp);
+	if (rc)
+		return rc;
+
+	return bnx2x_func_init_port(bp, drv);
+}
+
+static int bnx2x_func_hw_init(struct bnx2x *bp,
+			      struct bnx2x_func_state_params *params)
+{
+	u32 load_code = params->params.hw_init.load_phase;
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
+	int rc = 0;
+
+	DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
+			 BP_ABS_FUNC(bp), load_code);
+
+	/* Prepare buffers for unzipping the FW */
+	rc = drv->gunzip_init(bp);
+	if (rc)
+		return rc;
+
+	/* Prepare FW */
+	rc = drv->init_fw(bp);
+	if (rc) {
+		BNX2X_ERR("Error loading firmware\n");
+		goto fw_init_err;
+	}
+
+	/* Handle the beginning of COMMON_XXX pases separatelly... */
+	switch (load_code) {
+	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
+		rc = bnx2x_func_init_cmn_chip(bp, drv);
+		if (rc)
+			goto init_hw_err;
+
+		break;
+	case FW_MSG_CODE_DRV_LOAD_COMMON:
+		rc = bnx2x_func_init_cmn(bp, drv);
+		if (rc)
+			goto init_hw_err;
+
+		break;
+	case FW_MSG_CODE_DRV_LOAD_PORT:
+		rc = bnx2x_func_init_port(bp, drv);
+		if (rc)
+			goto init_hw_err;
+
+		break;
+	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
+		rc = bnx2x_func_init_func(bp, drv);
+		if (rc)
+			goto init_hw_err;
+
+		break;
+	default:
+		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
+		rc = -EINVAL;
+	}
+
+init_hw_err:
+	drv->release_fw(bp);
+
+fw_init_err:
+	drv->gunzip_end(bp);
+
+	/* In case of success, complete the comand immediatelly: no ramrods
+	 * have been sent.
+	 */
+	if (!rc)
+		o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
+
+	return rc;
+}
+
+/**
+ * bnx2x_func_reset_func - reset HW at function stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
+ * FUNCTION-only HW blocks.
+ */
+static inline void bnx2x_func_reset_func(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	drv->reset_hw_func(bp);
+}
+
+/**
+ * bnx2x_func_reset_port - reser HW at port stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
+ * FUNCTION-only and PORT-only HW blocks.
+ *
+ *                 !!!IMPORTANT!!!
+ *
+ * It's important to call reset_port before reset_func() as the last thing
+ * reset_func does is pf_disable() thus disabling PGLUE_B, which
+ * makes impossible any DMAE transactions.
+ */
+static inline void bnx2x_func_reset_port(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	drv->reset_hw_port(bp);
+	bnx2x_func_reset_func(bp, drv);
+}
+
+/**
+ * bnx2x_func_reset_cmn - reser HW at common stage
+ *
+ * @bp:		device handle
+ * @drv:
+ *
+ * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
+ * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
+ * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
+ */
+static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
+					const struct bnx2x_func_sp_drv_ops *drv)
+{
+	bnx2x_func_reset_port(bp, drv);
+	drv->reset_hw_cmn(bp);
+}
+
+
+static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
+				      struct bnx2x_func_state_params *params)
+{
+	u32 reset_phase = params->params.hw_reset.reset_phase;
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
+
+	DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
+			 reset_phase);
+
+	switch (reset_phase) {
+	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
+		bnx2x_func_reset_cmn(bp, drv);
+		break;
+	case FW_MSG_CODE_DRV_UNLOAD_PORT:
+		bnx2x_func_reset_port(bp, drv);
+		break;
+	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
+		bnx2x_func_reset_func(bp, drv);
+		break;
+	default:
+		BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
+			   reset_phase);
+		break;
+	}
+
+	/* Complete the comand immediatelly: no ramrods have been sent. */
+	o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
+
+	return 0;
+}
+
+static inline int bnx2x_func_send_start(struct bnx2x *bp,
+					struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct function_start_data *rdata =
+		(struct function_start_data *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	struct bnx2x_func_start_params *start_params = &params->params.start;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	/* Fill the ramrod data with provided parameters */
+	rdata->function_mode = cpu_to_le16(start_params->mf_mode);
+	rdata->sd_vlan_tag   = start_params->sd_vlan_tag;
+	rdata->path_id       = BP_PATH(bp);
+	rdata->network_cos_mode = start_params->network_cos_mode;
+
+	/*
+	 *  No need for an explicit memory barrier here as long we would
+	 *  need to ensure the ordering of writing to the SPQ element
+	 *  and updating of the SPQ producer which involves a memory
+	 *  read and we will have to put a full memory barrier there
+	 *  (inside bnx2x_sp_post()).
+	 */
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_stop(struct bnx2x *bp,
+				       struct bnx2x_func_state_params *params)
+{
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
+			     NONE_CONNECTION_TYPE);
+}
+
+static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
+				       struct bnx2x_func_state_params *params)
+{
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
+			     NONE_CONNECTION_TYPE);
+}
+static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
+				       struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	struct flow_control_configuration *rdata =
+		(struct flow_control_configuration *)o->rdata;
+	dma_addr_t data_mapping = o->rdata_mapping;
+	struct bnx2x_func_tx_start_params *tx_start_params =
+		&params->params.tx_start;
+	int i;
+
+	memset(rdata, 0, sizeof(*rdata));
+
+	rdata->dcb_enabled = tx_start_params->dcb_enabled;
+	rdata->dcb_version = tx_start_params->dcb_version;
+	rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
+
+	for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
+		rdata->traffic_type_to_priority_cos[i] =
+			tx_start_params->traffic_type_to_priority_cos[i];
+
+	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
+			     U64_HI(data_mapping),
+			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
+}
+
+static int bnx2x_func_send_cmd(struct bnx2x *bp,
+			       struct bnx2x_func_state_params *params)
+{
+	switch (params->cmd) {
+	case BNX2X_F_CMD_HW_INIT:
+		return bnx2x_func_hw_init(bp, params);
+	case BNX2X_F_CMD_START:
+		return bnx2x_func_send_start(bp, params);
+	case BNX2X_F_CMD_STOP:
+		return bnx2x_func_send_stop(bp, params);
+	case BNX2X_F_CMD_HW_RESET:
+		return bnx2x_func_hw_reset(bp, params);
+	case BNX2X_F_CMD_TX_STOP:
+		return bnx2x_func_send_tx_stop(bp, params);
+	case BNX2X_F_CMD_TX_START:
+		return bnx2x_func_send_tx_start(bp, params);
+	default:
+		BNX2X_ERR("Unknown command: %d\n", params->cmd);
+		return -EINVAL;
+	}
+}
+
+void bnx2x_init_func_obj(struct bnx2x *bp,
+			 struct bnx2x_func_sp_obj *obj,
+			 void *rdata, dma_addr_t rdata_mapping,
+			 struct bnx2x_func_sp_drv_ops *drv_iface)
+{
+	memset(obj, 0, sizeof(*obj));
+
+	mutex_init(&obj->one_pending_mutex);
+
+	obj->rdata = rdata;
+	obj->rdata_mapping = rdata_mapping;
+
+	obj->send_cmd = bnx2x_func_send_cmd;
+	obj->check_transition = bnx2x_func_chk_transition;
+	obj->complete_cmd = bnx2x_func_comp_cmd;
+	obj->wait_comp = bnx2x_func_wait_comp;
+
+	obj->drv = drv_iface;
+}
+
+/**
+ * bnx2x_func_state_change - perform Function state change transition
+ *
+ * @bp:		device handle
+ * @params:	parameters to perform the transaction
+ *
+ * returns 0 in case of successfully completed transition,
+ *         negative error code in case of failure, positive
+ *         (EBUSY) value if there is a completion to that is
+ *         still pending (possible only if RAMROD_COMP_WAIT is
+ *         not set in params->ramrod_flags for asynchronous
+ *         commands).
+ */
+int bnx2x_func_state_change(struct bnx2x *bp,
+			    struct bnx2x_func_state_params *params)
+{
+	struct bnx2x_func_sp_obj *o = params->f_obj;
+	int rc;
+	enum bnx2x_func_cmd cmd = params->cmd;
+	unsigned long *pending = &o->pending;
+
+	mutex_lock(&o->one_pending_mutex);
+
+	/* Check that the requested transition is legal */
+	if (o->check_transition(bp, o, params)) {
+		mutex_unlock(&o->one_pending_mutex);
+		return -EINVAL;
+	}
+
+	/* Set "pending" bit */
+	set_bit(cmd, pending);
+
+	/* Don't send a command if only driver cleanup was requested */
+	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
+		bnx2x_func_state_change_comp(bp, o, cmd);
+		mutex_unlock(&o->one_pending_mutex);
+	} else {
+		/* Send a ramrod */
+		rc = o->send_cmd(bp, params);
+
+		mutex_unlock(&o->one_pending_mutex);
+
+		if (rc) {
+			o->next_state = BNX2X_F_STATE_MAX;
+			clear_bit(cmd, pending);
+			smp_mb__after_clear_bit();
+			return rc;
+		}
+
+		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
+			rc = o->wait_comp(bp, o, cmd);
+			if (rc)
+				return rc;
+
+			return 0;
+		}
+	}
+
+	return !!test_bit(cmd, pending);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
new file mode 100644
index 0000000..9a517c2
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -0,0 +1,1297 @@
+/* bnx2x_sp.h: Broadcom Everest network driver.
+ *
+ * Copyright 2011 Broadcom Corporation
+ *
+ * Unless you and Broadcom execute a separate written software license
+ * agreement governing use of this software, this software is licensed to you
+ * under the terms of the GNU General Public License version 2, available
+ * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
+ *
+ * Notwithstanding the above, under no circumstances may you combine this
+ * software in any way with any other Broadcom software provided under a
+ * license other than the GPL, without Broadcom's express prior written
+ * consent.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Vladislav Zolotarov
+ *
+ */
+#ifndef BNX2X_SP_VERBS
+#define BNX2X_SP_VERBS
+
+struct bnx2x;
+struct eth_context;
+
+/* Bits representing general command's configuration */
+enum {
+	RAMROD_TX,
+	RAMROD_RX,
+	/* Wait until all pending commands complete */
+	RAMROD_COMP_WAIT,
+	/* Don't send a ramrod, only update a registry */
+	RAMROD_DRV_CLR_ONLY,
+	/* Configure HW according to the current object state */
+	RAMROD_RESTORE,
+	 /* Execute the next command now */
+	RAMROD_EXEC,
+	/*
+	 * Don't add a new command and continue execution of posponed
+	 * commands. If not set a new command will be added to the
+	 * pending commands list.
+	 */
+	RAMROD_CONT,
+};
+
+typedef enum {
+	BNX2X_OBJ_TYPE_RX,
+	BNX2X_OBJ_TYPE_TX,
+	BNX2X_OBJ_TYPE_RX_TX,
+} bnx2x_obj_type;
+
+/* Filtering states */
+enum {
+	BNX2X_FILTER_MAC_PENDING,
+	BNX2X_FILTER_VLAN_PENDING,
+	BNX2X_FILTER_VLAN_MAC_PENDING,
+	BNX2X_FILTER_RX_MODE_PENDING,
+	BNX2X_FILTER_RX_MODE_SCHED,
+	BNX2X_FILTER_ISCSI_ETH_START_SCHED,
+	BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
+	BNX2X_FILTER_FCOE_ETH_START_SCHED,
+	BNX2X_FILTER_FCOE_ETH_STOP_SCHED,
+	BNX2X_FILTER_MCAST_PENDING,
+	BNX2X_FILTER_MCAST_SCHED,
+	BNX2X_FILTER_RSS_CONF_PENDING,
+};
+
+struct bnx2x_raw_obj {
+	u8		func_id;
+
+	/* Queue params */
+	u8		cl_id;
+	u32		cid;
+
+	/* Ramrod data buffer params */
+	void		*rdata;
+	dma_addr_t	rdata_mapping;
+
+	/* Ramrod state params */
+	int		state;   /* "ramrod is pending" state bit */
+	unsigned long	*pstate; /* pointer to state buffer */
+
+	bnx2x_obj_type	obj_type;
+
+	int (*wait_comp)(struct bnx2x *bp,
+			 struct bnx2x_raw_obj *o);
+
+	bool (*check_pending)(struct bnx2x_raw_obj *o);
+	void (*clear_pending)(struct bnx2x_raw_obj *o);
+	void (*set_pending)(struct bnx2x_raw_obj *o);
+};
+
+/************************* VLAN-MAC commands related parameters ***************/
+struct bnx2x_mac_ramrod_data {
+	u8 mac[ETH_ALEN];
+};
+
+struct bnx2x_vlan_ramrod_data {
+	u16 vlan;
+};
+
+struct bnx2x_vlan_mac_ramrod_data {
+	u8 mac[ETH_ALEN];
+	u16 vlan;
+};
+
+union bnx2x_classification_ramrod_data {
+	struct bnx2x_mac_ramrod_data mac;
+	struct bnx2x_vlan_ramrod_data vlan;
+	struct bnx2x_vlan_mac_ramrod_data vlan_mac;
+};
+
+/* VLAN_MAC commands */
+enum bnx2x_vlan_mac_cmd {
+	BNX2X_VLAN_MAC_ADD,
+	BNX2X_VLAN_MAC_DEL,
+	BNX2X_VLAN_MAC_MOVE,
+};
+
+struct bnx2x_vlan_mac_data {
+	/* Requested command: BNX2X_VLAN_MAC_XX */
+	enum bnx2x_vlan_mac_cmd cmd;
+	/*
+	 * used to contain the data related vlan_mac_flags bits from
+	 * ramrod parameters.
+	 */
+	unsigned long vlan_mac_flags;
+
+	/* Needed for MOVE command */
+	struct bnx2x_vlan_mac_obj *target_obj;
+
+	union bnx2x_classification_ramrod_data u;
+};
+
+/*************************** Exe Queue obj ************************************/
+union bnx2x_exe_queue_cmd_data {
+	struct bnx2x_vlan_mac_data vlan_mac;
+
+	struct {
+		/* TODO */
+	} mcast;
+};
+
+struct bnx2x_exeq_elem {
+	struct list_head		link;
+
+	/* Length of this element in the exe_chunk. */
+	int				cmd_len;
+
+	union bnx2x_exe_queue_cmd_data	cmd_data;
+};
+
+union bnx2x_qable_obj;
+
+union bnx2x_exeq_comp_elem {
+	union event_ring_elem *elem;
+};
+
+struct bnx2x_exe_queue_obj;
+
+typedef int (*exe_q_validate)(struct bnx2x *bp,
+			      union bnx2x_qable_obj *o,
+			      struct bnx2x_exeq_elem *elem);
+
+/**
+ * @return positive is entry was optimized, 0 - if not, negative
+ *         in case of an error.
+ */
+typedef int (*exe_q_optimize)(struct bnx2x *bp,
+			      union bnx2x_qable_obj *o,
+			      struct bnx2x_exeq_elem *elem);
+typedef int (*exe_q_execute)(struct bnx2x *bp,
+			     union bnx2x_qable_obj *o,
+			     struct list_head *exe_chunk,
+			     unsigned long *ramrod_flags);
+typedef struct bnx2x_exeq_elem *
+			(*exe_q_get)(struct bnx2x_exe_queue_obj *o,
+				     struct bnx2x_exeq_elem *elem);
+
+struct bnx2x_exe_queue_obj {
+	/*
+	 * Commands pending for an execution.
+	 */
+	struct list_head	exe_queue;
+
+	/*
+	 * Commands pending for an completion.
+	 */
+	struct list_head	pending_comp;
+
+	spinlock_t		lock;
+
+	/* Maximum length of commands' list for one execution */
+	int			exe_chunk_len;
+
+	union bnx2x_qable_obj	*owner;
+
+	/****** Virtual functions ******/
+	/**
+	 * Called before commands execution for commands that are really
+	 * going to be executed (after 'optimize').
+	 *
+	 * Must run under exe_queue->lock
+	 */
+	exe_q_validate		validate;
+
+
+	/**
+	 * This will try to cancel the current pending commands list
+	 * considering the new command.
+	 *
+	 * Must run under exe_queue->lock
+	 */
+	exe_q_optimize		optimize;
+
+	/**
+	 * Run the next commands chunk (owner specific).
+	 */
+	exe_q_execute		execute;
+
+	/**
+	 * Return the exe_queue element containing the specific command
+	 * if any. Otherwise return NULL.
+	 */
+	exe_q_get		get;
+};
+/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
+/*
+ * Element in the VLAN_MAC registry list having all currenty configured
+ * rules.
+ */
+struct bnx2x_vlan_mac_registry_elem {
+	struct list_head	link;
+
+	/*
+	 * Used to store the cam offset used for the mac/vlan/vlan-mac.
+	 * Relevant for 57710 and 57711 only. VLANs and MACs share the
+	 * same CAM for these chips.
+	 */
+	int			cam_offset;
+
+	/* Needed for DEL and RESTORE flows */
+	unsigned long		vlan_mac_flags;
+
+	union bnx2x_classification_ramrod_data u;
+};
+
+/* Bits representing VLAN_MAC commands specific flags */
+enum {
+	BNX2X_UC_LIST_MAC,
+	BNX2X_ETH_MAC,
+	BNX2X_ISCSI_ETH_MAC,
+	BNX2X_NETQ_ETH_MAC,
+	BNX2X_DONT_CONSUME_CAM_CREDIT,
+	BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
+};
+
+struct bnx2x_vlan_mac_ramrod_params {
+	/* Object to run the command from */
+	struct bnx2x_vlan_mac_obj *vlan_mac_obj;
+
+	/* General command flags: COMP_WAIT, etc. */
+	unsigned long ramrod_flags;
+
+	/* Command specific configuration request */
+	struct bnx2x_vlan_mac_data user_req;
+};
+
+struct bnx2x_vlan_mac_obj {
+	struct bnx2x_raw_obj raw;
+
+	/* Bookkeeping list: will prevent the addition of already existing
+	 * entries.
+	 */
+	struct list_head		head;
+
+	/* TODO: Add it's initialization in the init functions */
+	struct bnx2x_exe_queue_obj	exe_queue;
+
+	/* MACs credit pool */
+	struct bnx2x_credit_pool_obj	*macs_pool;
+
+	/* VLANs credit pool */
+	struct bnx2x_credit_pool_obj	*vlans_pool;
+
+	/* RAMROD command to be used */
+	int				ramrod_cmd;
+
+	/**
+	 * Checks if ADD-ramrod with the given params may be performed.
+	 *
+	 * @return zero if the element may be added
+	 */
+
+	int (*check_add)(struct bnx2x_vlan_mac_obj *o,
+			 union bnx2x_classification_ramrod_data *data);
+
+	/**
+	 * Checks if DEL-ramrod with the given params may be performed.
+	 *
+	 * @return true if the element may be deleted
+	 */
+	struct bnx2x_vlan_mac_registry_elem *
+		(*check_del)(struct bnx2x_vlan_mac_obj *o,
+			     union bnx2x_classification_ramrod_data *data);
+
+	/**
+	 * Checks if DEL-ramrod with the given params may be performed.
+	 *
+	 * @return true if the element may be deleted
+	 */
+	bool (*check_move)(struct bnx2x_vlan_mac_obj *src_o,
+			   struct bnx2x_vlan_mac_obj *dst_o,
+			   union bnx2x_classification_ramrod_data *data);
+
+	/**
+	 *  Update the relevant credit object(s) (consume/return
+	 *  correspondingly).
+	 */
+	bool (*get_credit)(struct bnx2x_vlan_mac_obj *o);
+	bool (*put_credit)(struct bnx2x_vlan_mac_obj *o);
+	bool (*get_cam_offset)(struct bnx2x_vlan_mac_obj *o, int *offset);
+	bool (*put_cam_offset)(struct bnx2x_vlan_mac_obj *o, int offset);
+
+	/**
+	 * Configures one rule in the ramrod data buffer.
+	 */
+	void (*set_one_rule)(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *o,
+			     struct bnx2x_exeq_elem *elem, int rule_idx,
+			     int cam_offset);
+
+	/**
+	*  Delete all configured elements having the given
+	*  vlan_mac_flags specification. Assumes no pending for
+	*  execution commands. Will schedule all all currently
+	*  configured MACs/VLANs/VLAN-MACs matching the vlan_mac_flags
+	*  specification for deletion and will use the given
+	*  ramrod_flags for the last DEL operation.
+	 *
+	 * @param bp
+	 * @param o
+	 * @param ramrod_flags RAMROD_XX flags
+	 *
+	 * @return 0 if the last operation has completed successfully
+	 *         and there are no more elements left, positive value
+	 *         if there are pending for completion commands,
+	 *         negative value in case of failure.
+	 */
+	int (*delete_all)(struct bnx2x *bp,
+			  struct bnx2x_vlan_mac_obj *o,
+			  unsigned long *vlan_mac_flags,
+			  unsigned long *ramrod_flags);
+
+	/**
+	 * Reconfigures the next MAC/VLAN/VLAN-MAC element from the previously
+	 * configured elements list.
+	 *
+	 * @param bp
+	 * @param p Command parameters (RAMROD_COMP_WAIT bit in
+	 *          ramrod_flags is only taken into an account)
+	 * @param ppos a pointer to the cooky that should be given back in the
+	 *        next call to make function handle the next element. If
+	 *        *ppos is set to NULL it will restart the iterator.
+	 *        If returned *ppos == NULL this means that the last
+	 *        element has been handled.
+	 *
+	 * @return int
+	 */
+	int (*restore)(struct bnx2x *bp,
+		       struct bnx2x_vlan_mac_ramrod_params *p,
+		       struct bnx2x_vlan_mac_registry_elem **ppos);
+
+	/**
+	 * Should be called on a completion arival.
+	 *
+	 * @param bp
+	 * @param o
+	 * @param cqe Completion element we are handling
+	 * @param ramrod_flags if RAMROD_CONT is set the next bulk of
+	 *		       pending commands will be executed.
+	 *		       RAMROD_DRV_CLR_ONLY and RAMROD_RESTORE
+	 *		       may also be set if needed.
+	 *
+	 * @return 0 if there are neither pending nor waiting for
+	 *         completion commands. Positive value if there are
+	 *         pending for execution or for completion commands.
+	 *         Negative value in case of an error (including an
+	 *         error in the cqe).
+	 */
+	int (*complete)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+			union event_ring_elem *cqe,
+			unsigned long *ramrod_flags);
+
+	/**
+	 * Wait for completion of all commands. Don't schedule new ones,
+	 * just wait. It assumes that the completion code will schedule
+	 * for new commands.
+	 */
+	int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o);
+};
+
+/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
+
+/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in
+ * a bnx2x_rx_mode_ramrod_params.
+ */
+enum {
+	BNX2X_RX_MODE_FCOE_ETH,
+	BNX2X_RX_MODE_ISCSI_ETH,
+};
+
+enum {
+	BNX2X_ACCEPT_UNICAST,
+	BNX2X_ACCEPT_MULTICAST,
+	BNX2X_ACCEPT_ALL_UNICAST,
+	BNX2X_ACCEPT_ALL_MULTICAST,
+	BNX2X_ACCEPT_BROADCAST,
+	BNX2X_ACCEPT_UNMATCHED,
+	BNX2X_ACCEPT_ANY_VLAN
+};
+
+struct bnx2x_rx_mode_ramrod_params {
+	struct bnx2x_rx_mode_obj *rx_mode_obj;
+	unsigned long *pstate;
+	int state;
+	u8 cl_id;
+	u32 cid;
+	u8 func_id;
+	unsigned long ramrod_flags;
+	unsigned long rx_mode_flags;
+
+	/*
+	 * rdata is either a pointer to eth_filter_rules_ramrod_data(e2) or to
+	 * a tstorm_eth_mac_filter_config (e1x).
+	 */
+	void *rdata;
+	dma_addr_t rdata_mapping;
+
+	/* Rx mode settings */
+	unsigned long rx_accept_flags;
+
+	/* internal switching settings */
+	unsigned long tx_accept_flags;
+};
+
+struct bnx2x_rx_mode_obj {
+	int (*config_rx_mode)(struct bnx2x *bp,
+			      struct bnx2x_rx_mode_ramrod_params *p);
+
+	int (*wait_comp)(struct bnx2x *bp,
+			 struct bnx2x_rx_mode_ramrod_params *p);
+};
+
+/********************** Set multicast group ***********************************/
+
+struct bnx2x_mcast_list_elem {
+	struct list_head link;
+	u8 *mac;
+};
+
+union bnx2x_mcast_config_data {
+	u8 *mac;
+	u8 bin; /* used in a RESTORE flow */
+};
+
+struct bnx2x_mcast_ramrod_params {
+	struct bnx2x_mcast_obj *mcast_obj;
+
+	/* Relevant options are RAMROD_COMP_WAIT and RAMROD_DRV_CLR_ONLY */
+	unsigned long ramrod_flags;
+
+	struct list_head mcast_list; /* list of struct bnx2x_mcast_list_elem */
+	/** TODO:
+	 *      - rename it to macs_num.
+	 *      - Add a new command type for handling pending commands
+	 *        (remove "zero semantics").
+	 *
+	 *  Length of mcast_list. If zero and ADD_CONT command - post
+	 *  pending commands.
+	 */
+	int mcast_list_len;
+};
+
+enum {
+	BNX2X_MCAST_CMD_ADD,
+	BNX2X_MCAST_CMD_CONT,
+	BNX2X_MCAST_CMD_DEL,
+	BNX2X_MCAST_CMD_RESTORE,
+};
+
+struct bnx2x_mcast_obj {
+	struct bnx2x_raw_obj raw;
+
+	union {
+		struct {
+		#define BNX2X_MCAST_BINS_NUM	256
+		#define BNX2X_MCAST_VEC_SZ	(BNX2X_MCAST_BINS_NUM / 64)
+			u64 vec[BNX2X_MCAST_VEC_SZ];
+
+			/** Number of BINs to clear. Should be updated
+			 *  immediately when a command arrives in order to
+			 *  properly create DEL commands.
+			 */
+			int num_bins_set;
+		} aprox_match;
+
+		struct {
+			struct list_head macs;
+			int num_macs_set;
+		} exact_match;
+	} registry;
+
+	/* Pending commands */
+	struct list_head pending_cmds_head;
+
+	/* A state that is set in raw.pstate, when there are pending commands */
+	int sched_state;
+
+	/* Maximal number of mcast MACs configured in one command */
+	int max_cmd_len;
+
+	/* Total number of currently pending MACs to configure: both
+	 * in the pending commands list and in the current command.
+	 */
+	int total_pending_num;
+
+	u8 engine_id;
+
+	/**
+	 * @param cmd command to execute (BNX2X_MCAST_CMD_X, see above)
+	 */
+	int (*config_mcast)(struct bnx2x *bp,
+				struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+	/**
+	 * Fills the ramrod data during the RESTORE flow.
+	 *
+	 * @param bp
+	 * @param o
+	 * @param start_idx Registry index to start from
+	 * @param rdata_idx Index in the ramrod data to start from
+	 *
+	 * @return -1 if we handled the whole registry or index of the last
+	 *         handled registry element.
+	 */
+	int (*hdl_restore)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
+			   int start_bin, int *rdata_idx);
+
+	int (*enqueue_cmd)(struct bnx2x *bp, struct bnx2x_mcast_obj *o,
+			   struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+	void (*set_one_rule)(struct bnx2x *bp,
+			     struct bnx2x_mcast_obj *o, int idx,
+			     union bnx2x_mcast_config_data *cfg_data, int cmd);
+
+	/** Checks if there are more mcast MACs to be set or a previous
+	 *  command is still pending.
+	 */
+	bool (*check_pending)(struct bnx2x_mcast_obj *o);
+
+	/**
+	 * Set/Clear/Check SCHEDULED state of the object
+	 */
+	void (*set_sched)(struct bnx2x_mcast_obj *o);
+	void (*clear_sched)(struct bnx2x_mcast_obj *o);
+	bool (*check_sched)(struct bnx2x_mcast_obj *o);
+
+	/* Wait until all pending commands complete */
+	int (*wait_comp)(struct bnx2x *bp, struct bnx2x_mcast_obj *o);
+
+	/**
+	 * Handle the internal object counters needed for proper
+	 * commands handling. Checks that the provided parameters are
+	 * feasible.
+	 */
+	int (*validate)(struct bnx2x *bp,
+			struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+	/**
+	 * Restore the values of internal counters in case of a failure.
+	 */
+	void (*revert)(struct bnx2x *bp,
+		       struct bnx2x_mcast_ramrod_params *p,
+		       int old_num_bins);
+
+	int (*get_registry_size)(struct bnx2x_mcast_obj *o);
+	void (*set_registry_size)(struct bnx2x_mcast_obj *o, int n);
+};
+
+/*************************** Credit handling **********************************/
+struct bnx2x_credit_pool_obj {
+
+	/* Current amount of credit in the pool */
+	atomic_t	credit;
+
+	/* Maximum allowed credit. put() will check against it. */
+	int		pool_sz;
+
+	/*
+	 *  Allocate a pool table statically.
+	 *
+	 *  Currently the mamimum allowed size is MAX_MAC_CREDIT_E2(272)
+	 *
+	 *  The set bit in the table will mean that the entry is available.
+	 */
+#define BNX2X_POOL_VEC_SIZE	(MAX_MAC_CREDIT_E2 / 64)
+	u64		pool_mirror[BNX2X_POOL_VEC_SIZE];
+
+	/* Base pool offset (initialized differently */
+	int		base_pool_offset;
+
+	/**
+	 * Get the next free pool entry.
+	 *
+	 * @return true if there was a free entry in the pool
+	 */
+	bool (*get_entry)(struct bnx2x_credit_pool_obj *o, int *entry);
+
+	/**
+	 * Return the entry back to the pool.
+	 *
+	 * @return true if entry is legal and has been successfully
+	 *         returned to the pool.
+	 */
+	bool (*put_entry)(struct bnx2x_credit_pool_obj *o, int entry);
+
+	/**
+	 * Get the requested amount of credit from the pool.
+	 *
+	 * @param cnt Amount of requested credit
+	 * @return true if the operation is successful
+	 */
+	bool (*get)(struct bnx2x_credit_pool_obj *o, int cnt);
+
+	/**
+	 * Returns the credit to the pool.
+	 *
+	 * @param cnt Amount of credit to return
+	 * @return true if the operation is successful
+	 */
+	bool (*put)(struct bnx2x_credit_pool_obj *o, int cnt);
+
+	/**
+	 * Reads the current amount of credit.
+	 */
+	int (*check)(struct bnx2x_credit_pool_obj *o);
+};
+
+/*************************** RSS configuration ********************************/
+enum {
+	/* RSS_MODE bits are mutually exclusive */
+	BNX2X_RSS_MODE_DISABLED,
+	BNX2X_RSS_MODE_REGULAR,
+	BNX2X_RSS_MODE_VLAN_PRI,
+	BNX2X_RSS_MODE_E1HOV_PRI,
+	BNX2X_RSS_MODE_IP_DSCP,
+
+	BNX2X_RSS_SET_SRCH, /* Setup searcher, E1x specific flag */
+
+	BNX2X_RSS_IPV4,
+	BNX2X_RSS_IPV4_TCP,
+	BNX2X_RSS_IPV6,
+	BNX2X_RSS_IPV6_TCP,
+};
+
+struct bnx2x_config_rss_params {
+	struct bnx2x_rss_config_obj *rss_obj;
+
+	/* may have RAMROD_COMP_WAIT set only */
+	unsigned long	ramrod_flags;
+
+	/* BNX2X_RSS_X bits */
+	unsigned long	rss_flags;
+
+	/* Number hash bits to take into an account */
+	u8		rss_result_mask;
+
+	/* Indirection table */
+	u8		ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+	/* RSS hash values */
+	u32		rss_key[10];
+
+	/* valid only iff BNX2X_RSS_UPDATE_TOE is set */
+	u16		toe_rss_bitmap;
+};
+
+struct bnx2x_rss_config_obj {
+	struct bnx2x_raw_obj	raw;
+
+	/* RSS engine to use */
+	u8			engine_id;
+
+	/* Last configured indirection table */
+	u8			ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+
+	int (*config_rss)(struct bnx2x *bp,
+			  struct bnx2x_config_rss_params *p);
+};
+
+/*********************** Queue state update ***********************************/
+
+/* UPDATE command options */
+enum {
+	BNX2X_Q_UPDATE_IN_VLAN_REM,
+	BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
+	BNX2X_Q_UPDATE_OUT_VLAN_REM,
+	BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
+	BNX2X_Q_UPDATE_ANTI_SPOOF,
+	BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG,
+	BNX2X_Q_UPDATE_ACTIVATE,
+	BNX2X_Q_UPDATE_ACTIVATE_CHNG,
+	BNX2X_Q_UPDATE_DEF_VLAN_EN,
+	BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+	BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+	BNX2X_Q_UPDATE_SILENT_VLAN_REM
+};
+
+/* Allowed Queue states */
+enum bnx2x_q_state {
+	BNX2X_Q_STATE_RESET,
+	BNX2X_Q_STATE_INITIALIZED,
+	BNX2X_Q_STATE_ACTIVE,
+	BNX2X_Q_STATE_MULTI_COS,
+	BNX2X_Q_STATE_MCOS_TERMINATED,
+	BNX2X_Q_STATE_INACTIVE,
+	BNX2X_Q_STATE_STOPPED,
+	BNX2X_Q_STATE_TERMINATED,
+	BNX2X_Q_STATE_FLRED,
+	BNX2X_Q_STATE_MAX,
+};
+
+/* Allowed commands */
+enum bnx2x_queue_cmd {
+	BNX2X_Q_CMD_INIT,
+	BNX2X_Q_CMD_SETUP,
+	BNX2X_Q_CMD_SETUP_TX_ONLY,
+	BNX2X_Q_CMD_DEACTIVATE,
+	BNX2X_Q_CMD_ACTIVATE,
+	BNX2X_Q_CMD_UPDATE,
+	BNX2X_Q_CMD_UPDATE_TPA,
+	BNX2X_Q_CMD_HALT,
+	BNX2X_Q_CMD_CFC_DEL,
+	BNX2X_Q_CMD_TERMINATE,
+	BNX2X_Q_CMD_EMPTY,
+	BNX2X_Q_CMD_MAX,
+};
+
+/* queue SETUP + INIT flags */
+enum {
+	BNX2X_Q_FLG_TPA,
+	BNX2X_Q_FLG_TPA_IPV6,
+	BNX2X_Q_FLG_STATS,
+	BNX2X_Q_FLG_ZERO_STATS,
+	BNX2X_Q_FLG_ACTIVE,
+	BNX2X_Q_FLG_OV,
+	BNX2X_Q_FLG_VLAN,
+	BNX2X_Q_FLG_COS,
+	BNX2X_Q_FLG_HC,
+	BNX2X_Q_FLG_HC_EN,
+	BNX2X_Q_FLG_DHC,
+	BNX2X_Q_FLG_FCOE,
+	BNX2X_Q_FLG_LEADING_RSS,
+	BNX2X_Q_FLG_MCAST,
+	BNX2X_Q_FLG_DEF_VLAN,
+	BNX2X_Q_FLG_TX_SWITCH,
+	BNX2X_Q_FLG_TX_SEC,
+	BNX2X_Q_FLG_ANTI_SPOOF,
+	BNX2X_Q_FLG_SILENT_VLAN_REM
+};
+
+/* Queue type options: queue type may be a compination of below. */
+enum bnx2x_q_type {
+	/** TODO: Consider moving both these flags into the init()
+	 *        ramrod params.
+	 */
+	BNX2X_Q_TYPE_HAS_RX,
+	BNX2X_Q_TYPE_HAS_TX,
+};
+
+#define BNX2X_PRIMARY_CID_INDEX			0
+#define BNX2X_MULTI_TX_COS_E1X			1
+#define BNX2X_MULTI_TX_COS_E2_E3A0		2
+#define BNX2X_MULTI_TX_COS_E3B0			3
+#define BNX2X_MULTI_TX_COS			BNX2X_MULTI_TX_COS_E3B0
+
+
+struct bnx2x_queue_init_params {
+	struct {
+		unsigned long	flags;
+		u16		hc_rate;
+		u8		fw_sb_id;
+		u8		sb_cq_index;
+	} tx;
+
+	struct {
+		unsigned long	flags;
+		u16		hc_rate;
+		u8		fw_sb_id;
+		u8		sb_cq_index;
+	} rx;
+
+	/* CID context in the host memory */
+	struct eth_context *cxts[BNX2X_MULTI_TX_COS];
+
+	/* maximum number of cos supported by hardware */
+	u8 max_cos;
+};
+
+struct bnx2x_queue_terminate_params {
+	/* index within the tx_only cids of this queue object */
+	u8 cid_index;
+};
+
+struct bnx2x_queue_cfc_del_params {
+	/* index within the tx_only cids of this queue object */
+	u8 cid_index;
+};
+
+struct bnx2x_queue_update_params {
+	unsigned long	update_flags; /* BNX2X_Q_UPDATE_XX bits */
+	u16		def_vlan;
+	u16		silent_removal_value;
+	u16		silent_removal_mask;
+/* index within the tx_only cids of this queue object */
+	u8		cid_index;
+};
+
+struct rxq_pause_params {
+	u16		bd_th_lo;
+	u16		bd_th_hi;
+	u16		rcq_th_lo;
+	u16		rcq_th_hi;
+	u16		sge_th_lo; /* valid iff BNX2X_Q_FLG_TPA */
+	u16		sge_th_hi; /* valid iff BNX2X_Q_FLG_TPA */
+	u16		pri_map;
+};
+
+/* general */
+struct bnx2x_general_setup_params {
+	/* valid iff BNX2X_Q_FLG_STATS */
+	u8		stat_id;
+
+	u8		spcl_id;
+	u16		mtu;
+	u8		cos;
+};
+
+struct bnx2x_rxq_setup_params {
+	/* dma */
+	dma_addr_t	dscr_map;
+	dma_addr_t	sge_map;
+	dma_addr_t	rcq_map;
+	dma_addr_t	rcq_np_map;
+
+	u16		drop_flags;
+	u16		buf_sz;
+	u8		fw_sb_id;
+	u8		cl_qzone_id;
+
+	/* valid iff BNX2X_Q_FLG_TPA */
+	u16		tpa_agg_sz;
+	u16		sge_buf_sz;
+	u8		max_sges_pkt;
+	u8		max_tpa_queues;
+	u8		rss_engine_id;
+
+	u8		cache_line_log;
+
+	u8		sb_cq_index;
+
+	/* valid iff BXN2X_Q_FLG_SILENT_VLAN_REM */
+	u16 silent_removal_value;
+	u16 silent_removal_mask;
+};
+
+struct bnx2x_txq_setup_params {
+	/* dma */
+	dma_addr_t	dscr_map;
+
+	u8		fw_sb_id;
+	u8		sb_cq_index;
+	u8		cos;		/* valid iff BNX2X_Q_FLG_COS */
+	u16		traffic_type;
+	/* equals to the leading rss client id, used for TX classification*/
+	u8		tss_leading_cl_id;
+
+	/* valid iff BNX2X_Q_FLG_DEF_VLAN */
+	u16		default_vlan;
+};
+
+struct bnx2x_queue_setup_params {
+	struct bnx2x_general_setup_params gen_params;
+	struct bnx2x_txq_setup_params txq_params;
+	struct bnx2x_rxq_setup_params rxq_params;
+	struct rxq_pause_params pause_params;
+	unsigned long flags;
+};
+
+struct bnx2x_queue_setup_tx_only_params {
+	struct bnx2x_general_setup_params	gen_params;
+	struct bnx2x_txq_setup_params		txq_params;
+	unsigned long				flags;
+	/* index within the tx_only cids of this queue object */
+	u8					cid_index;
+};
+
+struct bnx2x_queue_state_params {
+	struct bnx2x_queue_sp_obj *q_obj;
+
+	/* Current command */
+	enum bnx2x_queue_cmd cmd;
+
+	/* may have RAMROD_COMP_WAIT set only */
+	unsigned long ramrod_flags;
+
+	/* Params according to the current command */
+	union {
+		struct bnx2x_queue_update_params	update;
+		struct bnx2x_queue_setup_params		setup;
+		struct bnx2x_queue_init_params		init;
+		struct bnx2x_queue_setup_tx_only_params	tx_only;
+		struct bnx2x_queue_terminate_params	terminate;
+		struct bnx2x_queue_cfc_del_params	cfc_del;
+	} params;
+};
+
+struct bnx2x_queue_sp_obj {
+	u32		cids[BNX2X_MULTI_TX_COS];
+	u8		cl_id;
+	u8		func_id;
+
+	/*
+	 * number of traffic classes supported by queue.
+	 * The primary connection of the queue suppotrs the first traffic
+	 * class. Any further traffic class is suppoted by a tx-only
+	 * connection.
+	 *
+	 * Therefore max_cos is also a number of valid entries in the cids
+	 * array.
+	 */
+	u8 max_cos;
+	u8 num_tx_only, next_tx_only;
+
+	enum bnx2x_q_state state, next_state;
+
+	/* bits from enum bnx2x_q_type */
+	unsigned long	type;
+
+	/* BNX2X_Q_CMD_XX bits. This object implements "one
+	 * pending" paradigm but for debug and tracing purposes it's
+	 * more convinient to have different bits for different
+	 * commands.
+	 */
+	unsigned long	pending;
+
+	/* Buffer to use as a ramrod data and its mapping */
+	void		*rdata;
+	dma_addr_t	rdata_mapping;
+
+	/**
+	 * Performs one state change according to the given parameters.
+	 *
+	 * @return 0 in case of success and negative value otherwise.
+	 */
+	int (*send_cmd)(struct bnx2x *bp,
+			struct bnx2x_queue_state_params *params);
+
+	/**
+	 * Sets the pending bit according to the requested transition.
+	 */
+	int (*set_pending)(struct bnx2x_queue_sp_obj *o,
+			   struct bnx2x_queue_state_params *params);
+
+	/**
+	 * Checks that the requested state transition is legal.
+	 */
+	int (*check_transition)(struct bnx2x *bp,
+				struct bnx2x_queue_sp_obj *o,
+				struct bnx2x_queue_state_params *params);
+
+	/**
+	 * Completes the pending command.
+	 */
+	int (*complete_cmd)(struct bnx2x *bp,
+			    struct bnx2x_queue_sp_obj *o,
+			    enum bnx2x_queue_cmd);
+
+	int (*wait_comp)(struct bnx2x *bp,
+			 struct bnx2x_queue_sp_obj *o,
+			 enum bnx2x_queue_cmd cmd);
+};
+
+/********************** Function state update *********************************/
+/* Allowed Function states */
+enum bnx2x_func_state {
+	BNX2X_F_STATE_RESET,
+	BNX2X_F_STATE_INITIALIZED,
+	BNX2X_F_STATE_STARTED,
+	BNX2X_F_STATE_TX_STOPPED,
+	BNX2X_F_STATE_MAX,
+};
+
+/* Allowed Function commands */
+enum bnx2x_func_cmd {
+	BNX2X_F_CMD_HW_INIT,
+	BNX2X_F_CMD_START,
+	BNX2X_F_CMD_STOP,
+	BNX2X_F_CMD_HW_RESET,
+	BNX2X_F_CMD_TX_STOP,
+	BNX2X_F_CMD_TX_START,
+	BNX2X_F_CMD_MAX,
+};
+
+struct bnx2x_func_hw_init_params {
+	/* A load phase returned by MCP.
+	 *
+	 * May be:
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON
+	 *		FW_MSG_CODE_DRV_LOAD_PORT
+	 *		FW_MSG_CODE_DRV_LOAD_FUNCTION
+	 */
+	u32 load_phase;
+};
+
+struct bnx2x_func_hw_reset_params {
+	/* A load phase returned by MCP.
+	 *
+	 * May be:
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON_CHIP
+	 *		FW_MSG_CODE_DRV_LOAD_COMMON
+	 *		FW_MSG_CODE_DRV_LOAD_PORT
+	 *		FW_MSG_CODE_DRV_LOAD_FUNCTION
+	 */
+	u32 reset_phase;
+};
+
+struct bnx2x_func_start_params {
+	/* Multi Function mode:
+	 *	- Single Function
+	 *	- Switch Dependent
+	 *	- Switch Independent
+	 */
+	u16 mf_mode;
+
+	/* Switch Dependent mode outer VLAN tag */
+	u16 sd_vlan_tag;
+
+	/* Function cos mode */
+	u8 network_cos_mode;
+};
+
+struct bnx2x_func_tx_start_params {
+	struct priority_cos traffic_type_to_priority_cos[MAX_TRAFFIC_TYPES];
+	u8 dcb_enabled;
+	u8 dcb_version;
+	u8 dont_add_pri_0_en;
+};
+
+struct bnx2x_func_state_params {
+	struct bnx2x_func_sp_obj *f_obj;
+
+	/* Current command */
+	enum bnx2x_func_cmd cmd;
+
+	/* may have RAMROD_COMP_WAIT set only */
+	unsigned long	ramrod_flags;
+
+	/* Params according to the current command */
+	union {
+		struct bnx2x_func_hw_init_params hw_init;
+		struct bnx2x_func_hw_reset_params hw_reset;
+		struct bnx2x_func_start_params start;
+		struct bnx2x_func_tx_start_params tx_start;
+	} params;
+};
+
+struct bnx2x_func_sp_drv_ops {
+	/* Init tool + runtime initialization:
+	 *      - Common Chip
+	 *      - Common (per Path)
+	 *      - Port
+	 *      - Function phases
+	 */
+	int (*init_hw_cmn_chip)(struct bnx2x *bp);
+	int (*init_hw_cmn)(struct bnx2x *bp);
+	int (*init_hw_port)(struct bnx2x *bp);
+	int (*init_hw_func)(struct bnx2x *bp);
+
+	/* Reset Function HW: Common, Port, Function phases. */
+	void (*reset_hw_cmn)(struct bnx2x *bp);
+	void (*reset_hw_port)(struct bnx2x *bp);
+	void (*reset_hw_func)(struct bnx2x *bp);
+
+	/* Init/Free GUNZIP resources */
+	int (*gunzip_init)(struct bnx2x *bp);
+	void (*gunzip_end)(struct bnx2x *bp);
+
+	/* Prepare/Release FW resources */
+	int (*init_fw)(struct bnx2x *bp);
+	void (*release_fw)(struct bnx2x *bp);
+};
+
+struct bnx2x_func_sp_obj {
+	enum bnx2x_func_state	state, next_state;
+
+	/* BNX2X_FUNC_CMD_XX bits. This object implements "one
+	 * pending" paradigm but for debug and tracing purposes it's
+	 * more convinient to have different bits for different
+	 * commands.
+	 */
+	unsigned long		pending;
+
+	/* Buffer to use as a ramrod data and its mapping */
+	void			*rdata;
+	dma_addr_t		rdata_mapping;
+
+	/* this mutex validates that when pending flag is taken, the next
+	 * ramrod to be sent will be the one set the pending bit
+	 */
+	struct mutex		one_pending_mutex;
+
+	/* Driver interface */
+	struct bnx2x_func_sp_drv_ops	*drv;
+
+	/**
+	 * Performs one state change according to the given parameters.
+	 *
+	 * @return 0 in case of success and negative value otherwise.
+	 */
+	int (*send_cmd)(struct bnx2x *bp,
+			struct bnx2x_func_state_params *params);
+
+	/**
+	 * Checks that the requested state transition is legal.
+	 */
+	int (*check_transition)(struct bnx2x *bp,
+				struct bnx2x_func_sp_obj *o,
+				struct bnx2x_func_state_params *params);
+
+	/**
+	 * Completes the pending command.
+	 */
+	int (*complete_cmd)(struct bnx2x *bp,
+			    struct bnx2x_func_sp_obj *o,
+			    enum bnx2x_func_cmd cmd);
+
+	int (*wait_comp)(struct bnx2x *bp, struct bnx2x_func_sp_obj *o,
+			 enum bnx2x_func_cmd cmd);
+};
+
+/********************** Interfaces ********************************************/
+/* Queueable objects set */
+union bnx2x_qable_obj {
+	struct bnx2x_vlan_mac_obj vlan_mac;
+};
+/************** Function state update *********/
+void bnx2x_init_func_obj(struct bnx2x *bp,
+			 struct bnx2x_func_sp_obj *obj,
+			 void *rdata, dma_addr_t rdata_mapping,
+			 struct bnx2x_func_sp_drv_ops *drv_iface);
+
+int bnx2x_func_state_change(struct bnx2x *bp,
+			    struct bnx2x_func_state_params *params);
+
+enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
+					   struct bnx2x_func_sp_obj *o);
+/******************* Queue State **************/
+void bnx2x_init_queue_obj(struct bnx2x *bp,
+			  struct bnx2x_queue_sp_obj *obj, u8 cl_id, u32 *cids,
+			  u8 cid_cnt, u8 func_id, void *rdata,
+			  dma_addr_t rdata_mapping, unsigned long type);
+
+int bnx2x_queue_state_change(struct bnx2x *bp,
+			     struct bnx2x_queue_state_params *params);
+
+/********************* VLAN-MAC ****************/
+void bnx2x_init_mac_obj(struct bnx2x *bp,
+			struct bnx2x_vlan_mac_obj *mac_obj,
+			u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			dma_addr_t rdata_mapping, int state,
+			unsigned long *pstate, bnx2x_obj_type type,
+			struct bnx2x_credit_pool_obj *macs_pool);
+
+void bnx2x_init_vlan_obj(struct bnx2x *bp,
+			 struct bnx2x_vlan_mac_obj *vlan_obj,
+			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			 dma_addr_t rdata_mapping, int state,
+			 unsigned long *pstate, bnx2x_obj_type type,
+			 struct bnx2x_credit_pool_obj *vlans_pool);
+
+void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
+			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
+			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
+			     dma_addr_t rdata_mapping, int state,
+			     unsigned long *pstate, bnx2x_obj_type type,
+			     struct bnx2x_credit_pool_obj *macs_pool,
+			     struct bnx2x_credit_pool_obj *vlans_pool);
+
+int bnx2x_config_vlan_mac(struct bnx2x *bp,
+			  struct bnx2x_vlan_mac_ramrod_params *p);
+
+int bnx2x_vlan_mac_move(struct bnx2x *bp,
+			struct bnx2x_vlan_mac_ramrod_params *p,
+			struct bnx2x_vlan_mac_obj *dest_o);
+
+/********************* RX MODE ****************/
+
+void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
+			    struct bnx2x_rx_mode_obj *o);
+
+/**
+ * Send and RX_MODE ramrod according to the provided parameters.
+ *
+ * @param bp
+ * @param p Command parameters
+ *
+ * @return 0 - if operation was successfull and there is no pending completions,
+ *         positive number - if there are pending completions,
+ *         negative - if there were errors
+ */
+int bnx2x_config_rx_mode(struct bnx2x *bp,
+			 struct bnx2x_rx_mode_ramrod_params *p);
+
+/****************** MULTICASTS ****************/
+
+void bnx2x_init_mcast_obj(struct bnx2x *bp,
+			  struct bnx2x_mcast_obj *mcast_obj,
+			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
+			  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
+			  int state, unsigned long *pstate,
+			  bnx2x_obj_type type);
+
+/**
+ * Configure multicast MACs list. May configure a new list
+ * provided in p->mcast_list (BNX2X_MCAST_CMD_ADD), clean up
+ * (BNX2X_MCAST_CMD_DEL) or restore (BNX2X_MCAST_CMD_RESTORE) a current
+ * configuration, continue to execute the pending commands
+ * (BNX2X_MCAST_CMD_CONT).
+ *
+ * If previous command is still pending or if number of MACs to
+ * configure is more that maximum number of MACs in one command,
+ * the current command will be enqueued to the tail of the
+ * pending commands list.
+ *
+ * @param bp
+ * @param p
+ * @param command to execute: BNX2X_MCAST_CMD_X
+ *
+ * @return 0 is operation was sucessfull and there are no pending completions,
+ *         negative if there were errors, positive if there are pending
+ *         completions.
+ */
+int bnx2x_config_mcast(struct bnx2x *bp,
+		       struct bnx2x_mcast_ramrod_params *p, int cmd);
+
+/****************** CREDIT POOL ****************/
+void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
+				struct bnx2x_credit_pool_obj *p, u8 func_id,
+				u8 func_num);
+void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
+				 struct bnx2x_credit_pool_obj *p, u8 func_id,
+				 u8 func_num);
+
+
+/****************** RSS CONFIGURATION ****************/
+void bnx2x_init_rss_config_obj(struct bnx2x *bp,
+			       struct bnx2x_rss_config_obj *rss_obj,
+			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
+			       void *rdata, dma_addr_t rdata_mapping,
+			       int state, unsigned long *pstate,
+			       bnx2x_obj_type type);
+
+/**
+ * Updates RSS configuration according to provided parameters.
+ *
+ * @param bp
+ * @param p
+ *
+ * @return 0 in case of success
+ */
+int bnx2x_config_rss(struct bnx2x *bp,
+		     struct bnx2x_config_rss_params *p);
+
+/**
+ * Return the current ind_table configuration.
+ *
+ * @param bp
+ * @param ind_table buffer to fill with the current indirection
+ *                  table content. Should be at least
+ *                  T_ETH_INDIRECTION_TABLE_SIZE bytes long.
+ */
+void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
+			     u8 *ind_table);
+
+#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
new file mode 100644
index 0000000..771f680
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -0,0 +1,1598 @@
+/* bnx2x_stats.c: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+#include "bnx2x_stats.h"
+#include "bnx2x_cmn.h"
+
+
+/* Statistics */
+
+/*
+ * General service functions
+ */
+
+static inline long bnx2x_hilo(u32 *hiref)
+{
+	u32 lo = *(hiref + 1);
+#if (BITS_PER_LONG == 64)
+	u32 hi = *hiref;
+
+	return HILO_U64(hi, lo);
+#else
+	return lo;
+#endif
+}
+
+/*
+ * Init service functions
+ */
+
+/* Post the next statistics ramrod. Protect it with the spin in
+ * order to ensure the strict order between statistics ramrods
+ * (each ramrod has a sequence number passed in a
+ * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be
+ * sent in order).
+ */
+static void bnx2x_storm_stats_post(struct bnx2x *bp)
+{
+	if (!bp->stats_pending) {
+		int rc;
+
+		spin_lock_bh(&bp->stats_lock);
+
+		if (bp->stats_pending) {
+			spin_unlock_bh(&bp->stats_lock);
+			return;
+		}
+
+		bp->fw_stats_req->hdr.drv_stats_counter =
+			cpu_to_le16(bp->stats_counter++);
+
+		DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n",
+			bp->fw_stats_req->hdr.drv_stats_counter);
+
+
+
+		/* send FW stats ramrod */
+		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
+				   U64_HI(bp->fw_stats_req_mapping),
+				   U64_LO(bp->fw_stats_req_mapping),
+				   NONE_CONNECTION_TYPE);
+		if (rc == 0)
+			bp->stats_pending = 1;
+
+		spin_unlock_bh(&bp->stats_lock);
+	}
+}
+
+static void bnx2x_hw_stats_post(struct bnx2x *bp)
+{
+	struct dmae_command *dmae = &bp->stats_dmae;
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	*stats_comp = DMAE_COMP_VAL;
+	if (CHIP_REV_IS_SLOW(bp))
+		return;
+
+	/* loader */
+	if (bp->executer_idx) {
+		int loader_idx = PMF_DMAE_C(bp);
+		u32 opcode =  bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+						 true, DMAE_COMP_GRC);
+		opcode = bnx2x_dmae_opcode_clr_src_reset(opcode);
+
+		memset(dmae, 0, sizeof(struct dmae_command));
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
+		dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
+				     sizeof(struct dmae_command) *
+				     (loader_idx + 1)) >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = sizeof(struct dmae_command) >> 2;
+		if (CHIP_IS_E1(bp))
+			dmae->len--;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		*stats_comp = 0;
+		bnx2x_post_dmae(bp, dmae, loader_idx);
+
+	} else if (bp->func_stx) {
+		*stats_comp = 0;
+		bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
+	}
+}
+
+static int bnx2x_stats_comp(struct bnx2x *bp)
+{
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+	int cnt = 10;
+
+	might_sleep();
+	while (*stats_comp != DMAE_COMP_VAL) {
+		if (!cnt) {
+			BNX2X_ERR("timeout waiting for stats finished\n");
+			break;
+		}
+		cnt--;
+		usleep_range(1000, 1000);
+	}
+	return 1;
+}
+
+/*
+ * Statistics service functions
+ */
+
+static void bnx2x_stats_pmf_update(struct bnx2x *bp)
+{
+	struct dmae_command *dmae;
+	u32 opcode;
+	int loader_idx = PMF_DMAE_C(bp);
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	/* sanity */
+	if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	bp->executer_idx = 0;
+
+	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0);
+
+	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC);
+	dmae->src_addr_lo = bp->port.port_stx >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+	dmae->len = DMAE_LEN32_RD_MAX;
+	dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+	dmae->comp_addr_hi = 0;
+	dmae->comp_val = 1;
+
+	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+	dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
+	dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
+				   DMAE_LEN32_RD_MAX * 4);
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
+				   DMAE_LEN32_RD_MAX * 4);
+	dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+
+	*stats_comp = 0;
+	bnx2x_hw_stats_post(bp);
+	bnx2x_stats_comp(bp);
+}
+
+static void bnx2x_port_stats_init(struct bnx2x *bp)
+{
+	struct dmae_command *dmae;
+	int port = BP_PORT(bp);
+	u32 opcode;
+	int loader_idx = PMF_DMAE_C(bp);
+	u32 mac_addr;
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	/* sanity */
+	if (!bp->link_vars.link_up || !bp->port.pmf) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	bp->executer_idx = 0;
+
+	/* MCP */
+	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+				    true, DMAE_COMP_GRC);
+
+	if (bp->port.port_stx) {
+
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+		dmae->dst_addr_lo = bp->port.port_stx >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = sizeof(struct host_port_stats) >> 2;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+	}
+
+	if (bp->func_stx) {
+
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+		dmae->dst_addr_lo = bp->func_stx >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = sizeof(struct host_func_stats) >> 2;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+	}
+
+	/* MAC */
+	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+				   true, DMAE_COMP_GRC);
+
+	/* EMAC is special */
+	if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
+		mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
+
+		/* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (mac_addr +
+				     EMAC_REG_EMAC_RX_STAT_AC) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+		dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		/* EMAC_REG_EMAC_RX_STAT_AC_28 */
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (mac_addr +
+				     EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+		     offsetof(struct emac_stats, rx_stat_falsecarriererrors));
+		dmae->len = 1;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		/* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (mac_addr +
+				     EMAC_REG_EMAC_TX_STAT_AC) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
+			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
+			offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
+		dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+	} else {
+		u32 tx_src_addr_lo, rx_src_addr_lo;
+		u16 rx_len, tx_len;
+
+		/* configure the params according to MAC type */
+		switch (bp->link_vars.mac_type) {
+		case MAC_TYPE_BMAC:
+			mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
+					   NIG_REG_INGRESS_BMAC0_MEM);
+
+			/* BIGMAC_REGISTER_TX_STAT_GTPKT ..
+			   BIGMAC_REGISTER_TX_STAT_GTBYT */
+			if (CHIP_IS_E1x(bp)) {
+				tx_src_addr_lo = (mac_addr +
+					BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+				tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
+					  BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
+				rx_src_addr_lo = (mac_addr +
+					BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+				rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
+					  BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
+			} else {
+				tx_src_addr_lo = (mac_addr +
+					BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+				tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT -
+					  BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2;
+				rx_src_addr_lo = (mac_addr +
+					BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+				rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ -
+					  BIGMAC2_REGISTER_RX_STAT_GR64) >> 2;
+			}
+			break;
+
+		case MAC_TYPE_UMAC: /* handled by MSTAT */
+		case MAC_TYPE_XMAC: /* handled by MSTAT */
+		default:
+			mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0;
+			tx_src_addr_lo = (mac_addr +
+					  MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2;
+			rx_src_addr_lo = (mac_addr +
+					  MSTAT_REG_RX_STAT_GR64_LO) >> 2;
+			tx_len = sizeof(bp->slowpath->
+					mac_stats.mstat_stats.stats_tx) >> 2;
+			rx_len = sizeof(bp->slowpath->
+					mac_stats.mstat_stats.stats_rx) >> 2;
+			break;
+		}
+
+		/* TX stats */
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = tx_src_addr_lo;
+		dmae->src_addr_hi = 0;
+		dmae->len = tx_len;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		/* RX stats */
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_hi = 0;
+		dmae->src_addr_lo = rx_src_addr_lo;
+		dmae->dst_addr_lo =
+			U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
+		dmae->dst_addr_hi =
+			U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2));
+		dmae->len = rx_len;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+	}
+
+	/* NIG */
+	if (!CHIP_IS_E3(bp)) {
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
+					    NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+				offsetof(struct nig_stats, egress_mac_pkt0_lo));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+				offsetof(struct nig_stats, egress_mac_pkt0_lo));
+		dmae->len = (2*sizeof(u32)) >> 2;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode = opcode;
+		dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
+					    NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
+		dmae->src_addr_hi = 0;
+		dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
+				offsetof(struct nig_stats, egress_mac_pkt1_lo));
+		dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
+				offsetof(struct nig_stats, egress_mac_pkt1_lo));
+		dmae->len = (2*sizeof(u32)) >> 2;
+		dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+		dmae->comp_addr_hi = 0;
+		dmae->comp_val = 1;
+	}
+
+	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+						 true, DMAE_COMP_PCI);
+	dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
+				    NIG_REG_STAT0_BRB_DISCARD) >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
+	dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
+
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+
+	*stats_comp = 0;
+}
+
+static void bnx2x_func_stats_init(struct bnx2x *bp)
+{
+	struct dmae_command *dmae = &bp->stats_dmae;
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	/* sanity */
+	if (!bp->func_stx) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	bp->executer_idx = 0;
+	memset(dmae, 0, sizeof(struct dmae_command));
+
+	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+					 true, DMAE_COMP_PCI);
+	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+	dmae->dst_addr_lo = bp->func_stx >> 2;
+	dmae->dst_addr_hi = 0;
+	dmae->len = sizeof(struct host_func_stats) >> 2;
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+
+	*stats_comp = 0;
+}
+
+static void bnx2x_stats_start(struct bnx2x *bp)
+{
+	if (bp->port.pmf)
+		bnx2x_port_stats_init(bp);
+
+	else if (bp->func_stx)
+		bnx2x_func_stats_init(bp);
+
+	bnx2x_hw_stats_post(bp);
+	bnx2x_storm_stats_post(bp);
+}
+
+static void bnx2x_stats_pmf_start(struct bnx2x *bp)
+{
+	bnx2x_stats_comp(bp);
+	bnx2x_stats_pmf_update(bp);
+	bnx2x_stats_start(bp);
+}
+
+static void bnx2x_stats_restart(struct bnx2x *bp)
+{
+	bnx2x_stats_comp(bp);
+	bnx2x_stats_start(bp);
+}
+
+static void bnx2x_bmac_stats_update(struct bnx2x *bp)
+{
+	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	struct {
+		u32 lo;
+		u32 hi;
+	} diff;
+
+	if (CHIP_IS_E1x(bp)) {
+		struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats);
+
+		/* the macros below will use "bmac1_stats" type */
+		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
+
+		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+		UPDATE_STAT64(tx_stat_gt127,
+				tx_stat_etherstatspkts65octetsto127octets);
+		UPDATE_STAT64(tx_stat_gt255,
+				tx_stat_etherstatspkts128octetsto255octets);
+		UPDATE_STAT64(tx_stat_gt511,
+				tx_stat_etherstatspkts256octetsto511octets);
+		UPDATE_STAT64(tx_stat_gt1023,
+				tx_stat_etherstatspkts512octetsto1023octets);
+		UPDATE_STAT64(tx_stat_gt1518,
+				tx_stat_etherstatspkts1024octetsto1522octets);
+		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
+		UPDATE_STAT64(tx_stat_gterr,
+				tx_stat_dot3statsinternalmactransmiterrors);
+		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+	} else {
+		struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats);
+
+		/* the macros below will use "bmac2_stats" type */
+		UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
+		UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
+		UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
+		UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
+		UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
+		UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
+		UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
+		UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
+		UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf);
+		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
+		UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
+		UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
+		UPDATE_STAT64(tx_stat_gt127,
+				tx_stat_etherstatspkts65octetsto127octets);
+		UPDATE_STAT64(tx_stat_gt255,
+				tx_stat_etherstatspkts128octetsto255octets);
+		UPDATE_STAT64(tx_stat_gt511,
+				tx_stat_etherstatspkts256octetsto511octets);
+		UPDATE_STAT64(tx_stat_gt1023,
+				tx_stat_etherstatspkts512octetsto1023octets);
+		UPDATE_STAT64(tx_stat_gt1518,
+				tx_stat_etherstatspkts1024octetsto1522octets);
+		UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047);
+		UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095);
+		UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216);
+		UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383);
+		UPDATE_STAT64(tx_stat_gterr,
+				tx_stat_dot3statsinternalmactransmiterrors);
+		UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+	}
+
+	estats->pause_frames_received_hi =
+				pstats->mac_stx[1].rx_stat_mac_xpf_hi;
+	estats->pause_frames_received_lo =
+				pstats->mac_stx[1].rx_stat_mac_xpf_lo;
+
+	estats->pause_frames_sent_hi =
+				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+	estats->pause_frames_sent_lo =
+				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+}
+
+static void bnx2x_mstat_stats_update(struct bnx2x *bp)
+{
+	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+	struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats);
+
+	ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets);
+	ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors);
+	ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts);
+	ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong);
+	ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments);
+	ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived);
+	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered);
+	ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf);
+	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
+	ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
+
+
+	ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
+	ADD_STAT64(stats_tx.tx_gt127,
+			tx_stat_etherstatspkts65octetsto127octets);
+	ADD_STAT64(stats_tx.tx_gt255,
+			tx_stat_etherstatspkts128octetsto255octets);
+	ADD_STAT64(stats_tx.tx_gt511,
+			tx_stat_etherstatspkts256octetsto511octets);
+	ADD_STAT64(stats_tx.tx_gt1023,
+			tx_stat_etherstatspkts512octetsto1023octets);
+	ADD_STAT64(stats_tx.tx_gt1518,
+			tx_stat_etherstatspkts1024octetsto1522octets);
+	ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047);
+
+	ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095);
+	ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216);
+	ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383);
+
+	ADD_STAT64(stats_tx.tx_gterr,
+			tx_stat_dot3statsinternalmactransmiterrors);
+	ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
+
+	ADD_64(estats->etherstatspkts1024octetsto1522octets_hi,
+	       new->stats_tx.tx_gt1518_hi,
+	       estats->etherstatspkts1024octetsto1522octets_lo,
+	       new->stats_tx.tx_gt1518_lo);
+
+	ADD_64(estats->etherstatspktsover1522octets_hi,
+	       new->stats_tx.tx_gt2047_hi,
+	       estats->etherstatspktsover1522octets_lo,
+	       new->stats_tx.tx_gt2047_lo);
+
+	ADD_64(estats->etherstatspktsover1522octets_hi,
+	       new->stats_tx.tx_gt4095_hi,
+	       estats->etherstatspktsover1522octets_lo,
+	       new->stats_tx.tx_gt4095_lo);
+
+	ADD_64(estats->etherstatspktsover1522octets_hi,
+	       new->stats_tx.tx_gt9216_hi,
+	       estats->etherstatspktsover1522octets_lo,
+	       new->stats_tx.tx_gt9216_lo);
+
+
+	ADD_64(estats->etherstatspktsover1522octets_hi,
+	       new->stats_tx.tx_gt16383_hi,
+	       estats->etherstatspktsover1522octets_lo,
+	       new->stats_tx.tx_gt16383_lo);
+
+	estats->pause_frames_received_hi =
+				pstats->mac_stx[1].rx_stat_mac_xpf_hi;
+	estats->pause_frames_received_lo =
+				pstats->mac_stx[1].rx_stat_mac_xpf_lo;
+
+	estats->pause_frames_sent_hi =
+				pstats->mac_stx[1].tx_stat_outxoffsent_hi;
+	estats->pause_frames_sent_lo =
+				pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+}
+
+static void bnx2x_emac_stats_update(struct bnx2x *bp)
+{
+	struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
+	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+
+	UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
+	UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
+	UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
+	UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
+	UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
+	UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
+	UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
+	UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
+	UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
+	UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
+	UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
+	UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
+	UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
+	UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
+	UPDATE_EXTEND_STAT(tx_stat_outxonsent);
+	UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
+	UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
+	UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
+	UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
+
+	estats->pause_frames_received_hi =
+			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
+	estats->pause_frames_received_lo =
+			pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
+	ADD_64(estats->pause_frames_received_hi,
+	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
+	       estats->pause_frames_received_lo,
+	       pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
+
+	estats->pause_frames_sent_hi =
+			pstats->mac_stx[1].tx_stat_outxonsent_hi;
+	estats->pause_frames_sent_lo =
+			pstats->mac_stx[1].tx_stat_outxonsent_lo;
+	ADD_64(estats->pause_frames_sent_hi,
+	       pstats->mac_stx[1].tx_stat_outxoffsent_hi,
+	       estats->pause_frames_sent_lo,
+	       pstats->mac_stx[1].tx_stat_outxoffsent_lo);
+}
+
+static int bnx2x_hw_stats_update(struct bnx2x *bp)
+{
+	struct nig_stats *new = bnx2x_sp(bp, nig_stats);
+	struct nig_stats *old = &(bp->port.old_nig_stats);
+	struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	struct {
+		u32 lo;
+		u32 hi;
+	} diff;
+
+	switch (bp->link_vars.mac_type) {
+	case MAC_TYPE_BMAC:
+		bnx2x_bmac_stats_update(bp);
+		break;
+
+	case MAC_TYPE_EMAC:
+		bnx2x_emac_stats_update(bp);
+		break;
+
+	case MAC_TYPE_UMAC:
+	case MAC_TYPE_XMAC:
+		bnx2x_mstat_stats_update(bp);
+		break;
+
+	case MAC_TYPE_NONE: /* unreached */
+		BNX2X_ERR("stats updated by DMAE but no MAC active\n");
+		return -1;
+
+	default: /* unreached */
+		BNX2X_ERR("Unknown MAC type\n");
+	}
+
+	ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
+		      new->brb_discard - old->brb_discard);
+	ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
+		      new->brb_truncate - old->brb_truncate);
+
+	if (!CHIP_IS_E3(bp)) {
+		UPDATE_STAT64_NIG(egress_mac_pkt0,
+					etherstatspkts1024octetsto1522octets);
+		UPDATE_STAT64_NIG(egress_mac_pkt1,
+					etherstatspktsover1522octets);
+	}
+
+	memcpy(old, new, sizeof(struct nig_stats));
+
+	memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
+	       sizeof(struct mac_stx));
+	estats->brb_drop_hi = pstats->brb_drop_hi;
+	estats->brb_drop_lo = pstats->brb_drop_lo;
+
+	pstats->host_port_stats_start = ++pstats->host_port_stats_end;
+
+	if (!BP_NOMCP(bp)) {
+		u32 nig_timer_max =
+			SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
+		if (nig_timer_max != estats->nig_timer_max) {
+			estats->nig_timer_max = nig_timer_max;
+			BNX2X_ERR("NIG timer max (%u)\n",
+				  estats->nig_timer_max);
+		}
+	}
+
+	return 0;
+}
+
+static int bnx2x_storm_stats_update(struct bnx2x *bp)
+{
+	struct tstorm_per_port_stats *tport =
+				&bp->fw_stats_data->port.tstorm_port_statistics;
+	struct tstorm_per_pf_stats *tfunc =
+				&bp->fw_stats_data->pf.tstorm_pf_statistics;
+	struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
+	int i;
+	u16 cur_stats_counter;
+
+	/* Make sure we use the value of the counter
+	 * used for sending the last stats ramrod.
+	 */
+	spin_lock_bh(&bp->stats_lock);
+	cur_stats_counter = bp->stats_counter - 1;
+	spin_unlock_bh(&bp->stats_lock);
+
+	/* are storm stats valid? */
+	if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
+		DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
+		   "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
+		   le16_to_cpu(counters->xstats_counter), bp->stats_counter);
+		return -EAGAIN;
+	}
+
+	if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
+		DP(BNX2X_MSG_STATS, "stats not updated by ustorm"
+		   "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
+		   le16_to_cpu(counters->ustats_counter), bp->stats_counter);
+		return -EAGAIN;
+	}
+
+	if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
+		DP(BNX2X_MSG_STATS, "stats not updated by cstorm"
+		   "  cstorm counter (0x%x) != stats_counter (0x%x)\n",
+		   le16_to_cpu(counters->cstats_counter), bp->stats_counter);
+		return -EAGAIN;
+	}
+
+	if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
+		DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
+		   "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
+		   le16_to_cpu(counters->tstats_counter), bp->stats_counter);
+		return -EAGAIN;
+	}
+
+	memcpy(&(fstats->total_bytes_received_hi),
+	       &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
+	       sizeof(struct host_func_stats) - 2*sizeof(u32));
+	estats->error_bytes_received_hi = 0;
+	estats->error_bytes_received_lo = 0;
+	estats->etherstatsoverrsizepkts_hi = 0;
+	estats->etherstatsoverrsizepkts_lo = 0;
+	estats->no_buff_discard_hi = 0;
+	estats->no_buff_discard_lo = 0;
+	estats->total_tpa_aggregations_hi = 0;
+	estats->total_tpa_aggregations_lo = 0;
+	estats->total_tpa_aggregated_frames_hi = 0;
+	estats->total_tpa_aggregated_frames_lo = 0;
+	estats->total_tpa_bytes_hi = 0;
+	estats->total_tpa_bytes_lo = 0;
+
+	for_each_eth_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+		struct tstorm_per_queue_stats *tclient =
+			&bp->fw_stats_data->queue_stats[i].
+			tstorm_queue_statistics;
+		struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient;
+		struct ustorm_per_queue_stats *uclient =
+			&bp->fw_stats_data->queue_stats[i].
+			ustorm_queue_statistics;
+		struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient;
+		struct xstorm_per_queue_stats *xclient =
+			&bp->fw_stats_data->queue_stats[i].
+			xstorm_queue_statistics;
+		struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
+		struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+		u32 diff;
+
+		DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, "
+				    "bcast_sent 0x%x mcast_sent 0x%x\n",
+		   i, xclient->ucast_pkts_sent,
+		   xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
+
+		DP(BNX2X_MSG_STATS, "---------------\n");
+
+		qstats->total_broadcast_bytes_received_hi =
+			le32_to_cpu(tclient->rcv_bcast_bytes.hi);
+		qstats->total_broadcast_bytes_received_lo =
+			le32_to_cpu(tclient->rcv_bcast_bytes.lo);
+
+		qstats->total_multicast_bytes_received_hi =
+			le32_to_cpu(tclient->rcv_mcast_bytes.hi);
+		qstats->total_multicast_bytes_received_lo =
+			le32_to_cpu(tclient->rcv_mcast_bytes.lo);
+
+		qstats->total_unicast_bytes_received_hi =
+			le32_to_cpu(tclient->rcv_ucast_bytes.hi);
+		qstats->total_unicast_bytes_received_lo =
+			le32_to_cpu(tclient->rcv_ucast_bytes.lo);
+
+		/*
+		 * sum to total_bytes_received all
+		 * unicast/multicast/broadcast
+		 */
+		qstats->total_bytes_received_hi =
+			qstats->total_broadcast_bytes_received_hi;
+		qstats->total_bytes_received_lo =
+			qstats->total_broadcast_bytes_received_lo;
+
+		ADD_64(qstats->total_bytes_received_hi,
+		       qstats->total_multicast_bytes_received_hi,
+		       qstats->total_bytes_received_lo,
+		       qstats->total_multicast_bytes_received_lo);
+
+		ADD_64(qstats->total_bytes_received_hi,
+		       qstats->total_unicast_bytes_received_hi,
+		       qstats->total_bytes_received_lo,
+		       qstats->total_unicast_bytes_received_lo);
+
+		qstats->valid_bytes_received_hi =
+					qstats->total_bytes_received_hi;
+		qstats->valid_bytes_received_lo =
+					qstats->total_bytes_received_lo;
+
+
+		UPDATE_EXTEND_TSTAT(rcv_ucast_pkts,
+					total_unicast_packets_received);
+		UPDATE_EXTEND_TSTAT(rcv_mcast_pkts,
+					total_multicast_packets_received);
+		UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
+					total_broadcast_packets_received);
+		UPDATE_EXTEND_TSTAT(pkts_too_big_discard,
+					etherstatsoverrsizepkts);
+		UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
+
+		SUB_EXTEND_USTAT(ucast_no_buff_pkts,
+					total_unicast_packets_received);
+		SUB_EXTEND_USTAT(mcast_no_buff_pkts,
+					total_multicast_packets_received);
+		SUB_EXTEND_USTAT(bcast_no_buff_pkts,
+					total_broadcast_packets_received);
+		UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
+		UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
+		UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
+
+		qstats->total_broadcast_bytes_transmitted_hi =
+			le32_to_cpu(xclient->bcast_bytes_sent.hi);
+		qstats->total_broadcast_bytes_transmitted_lo =
+			le32_to_cpu(xclient->bcast_bytes_sent.lo);
+
+		qstats->total_multicast_bytes_transmitted_hi =
+			le32_to_cpu(xclient->mcast_bytes_sent.hi);
+		qstats->total_multicast_bytes_transmitted_lo =
+			le32_to_cpu(xclient->mcast_bytes_sent.lo);
+
+		qstats->total_unicast_bytes_transmitted_hi =
+			le32_to_cpu(xclient->ucast_bytes_sent.hi);
+		qstats->total_unicast_bytes_transmitted_lo =
+			le32_to_cpu(xclient->ucast_bytes_sent.lo);
+		/*
+		 * sum to total_bytes_transmitted all
+		 * unicast/multicast/broadcast
+		 */
+		qstats->total_bytes_transmitted_hi =
+				qstats->total_unicast_bytes_transmitted_hi;
+		qstats->total_bytes_transmitted_lo =
+				qstats->total_unicast_bytes_transmitted_lo;
+
+		ADD_64(qstats->total_bytes_transmitted_hi,
+		       qstats->total_broadcast_bytes_transmitted_hi,
+		       qstats->total_bytes_transmitted_lo,
+		       qstats->total_broadcast_bytes_transmitted_lo);
+
+		ADD_64(qstats->total_bytes_transmitted_hi,
+		       qstats->total_multicast_bytes_transmitted_hi,
+		       qstats->total_bytes_transmitted_lo,
+		       qstats->total_multicast_bytes_transmitted_lo);
+
+		UPDATE_EXTEND_XSTAT(ucast_pkts_sent,
+					total_unicast_packets_transmitted);
+		UPDATE_EXTEND_XSTAT(mcast_pkts_sent,
+					total_multicast_packets_transmitted);
+		UPDATE_EXTEND_XSTAT(bcast_pkts_sent,
+					total_broadcast_packets_transmitted);
+
+		UPDATE_EXTEND_TSTAT(checksum_discard,
+				    total_packets_received_checksum_discarded);
+		UPDATE_EXTEND_TSTAT(ttl0_discard,
+				    total_packets_received_ttl0_discarded);
+
+		UPDATE_EXTEND_XSTAT(error_drop_pkts,
+				    total_transmitted_dropped_packets_error);
+
+		/* TPA aggregations completed */
+		UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations);
+		/* Number of network frames aggregated by TPA */
+		UPDATE_EXTEND_USTAT(coalesced_pkts,
+				    total_tpa_aggregated_frames);
+		/* Total number of bytes in completed TPA aggregations */
+		qstats->total_tpa_bytes_lo =
+			le32_to_cpu(uclient->coalesced_bytes.lo);
+		qstats->total_tpa_bytes_hi =
+			le32_to_cpu(uclient->coalesced_bytes.hi);
+
+		/* TPA stats per-function */
+		ADD_64(estats->total_tpa_aggregations_hi,
+		       qstats->total_tpa_aggregations_hi,
+		       estats->total_tpa_aggregations_lo,
+		       qstats->total_tpa_aggregations_lo);
+		ADD_64(estats->total_tpa_aggregated_frames_hi,
+		       qstats->total_tpa_aggregated_frames_hi,
+		       estats->total_tpa_aggregated_frames_lo,
+		       qstats->total_tpa_aggregated_frames_lo);
+		ADD_64(estats->total_tpa_bytes_hi,
+		       qstats->total_tpa_bytes_hi,
+		       estats->total_tpa_bytes_lo,
+		       qstats->total_tpa_bytes_lo);
+
+		ADD_64(fstats->total_bytes_received_hi,
+		       qstats->total_bytes_received_hi,
+		       fstats->total_bytes_received_lo,
+		       qstats->total_bytes_received_lo);
+		ADD_64(fstats->total_bytes_transmitted_hi,
+		       qstats->total_bytes_transmitted_hi,
+		       fstats->total_bytes_transmitted_lo,
+		       qstats->total_bytes_transmitted_lo);
+		ADD_64(fstats->total_unicast_packets_received_hi,
+		       qstats->total_unicast_packets_received_hi,
+		       fstats->total_unicast_packets_received_lo,
+		       qstats->total_unicast_packets_received_lo);
+		ADD_64(fstats->total_multicast_packets_received_hi,
+		       qstats->total_multicast_packets_received_hi,
+		       fstats->total_multicast_packets_received_lo,
+		       qstats->total_multicast_packets_received_lo);
+		ADD_64(fstats->total_broadcast_packets_received_hi,
+		       qstats->total_broadcast_packets_received_hi,
+		       fstats->total_broadcast_packets_received_lo,
+		       qstats->total_broadcast_packets_received_lo);
+		ADD_64(fstats->total_unicast_packets_transmitted_hi,
+		       qstats->total_unicast_packets_transmitted_hi,
+		       fstats->total_unicast_packets_transmitted_lo,
+		       qstats->total_unicast_packets_transmitted_lo);
+		ADD_64(fstats->total_multicast_packets_transmitted_hi,
+		       qstats->total_multicast_packets_transmitted_hi,
+		       fstats->total_multicast_packets_transmitted_lo,
+		       qstats->total_multicast_packets_transmitted_lo);
+		ADD_64(fstats->total_broadcast_packets_transmitted_hi,
+		       qstats->total_broadcast_packets_transmitted_hi,
+		       fstats->total_broadcast_packets_transmitted_lo,
+		       qstats->total_broadcast_packets_transmitted_lo);
+		ADD_64(fstats->valid_bytes_received_hi,
+		       qstats->valid_bytes_received_hi,
+		       fstats->valid_bytes_received_lo,
+		       qstats->valid_bytes_received_lo);
+
+		ADD_64(estats->etherstatsoverrsizepkts_hi,
+		       qstats->etherstatsoverrsizepkts_hi,
+		       estats->etherstatsoverrsizepkts_lo,
+		       qstats->etherstatsoverrsizepkts_lo);
+		ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
+		       estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
+	}
+
+	ADD_64(fstats->total_bytes_received_hi,
+	       estats->rx_stat_ifhcinbadoctets_hi,
+	       fstats->total_bytes_received_lo,
+	       estats->rx_stat_ifhcinbadoctets_lo);
+
+	ADD_64(fstats->total_bytes_received_hi,
+	       tfunc->rcv_error_bytes.hi,
+	       fstats->total_bytes_received_lo,
+	       tfunc->rcv_error_bytes.lo);
+
+	memcpy(estats, &(fstats->total_bytes_received_hi),
+	       sizeof(struct host_func_stats) - 2*sizeof(u32));
+
+	ADD_64(estats->error_bytes_received_hi,
+	       tfunc->rcv_error_bytes.hi,
+	       estats->error_bytes_received_lo,
+	       tfunc->rcv_error_bytes.lo);
+
+	ADD_64(estats->etherstatsoverrsizepkts_hi,
+	       estats->rx_stat_dot3statsframestoolong_hi,
+	       estats->etherstatsoverrsizepkts_lo,
+	       estats->rx_stat_dot3statsframestoolong_lo);
+	ADD_64(estats->error_bytes_received_hi,
+	       estats->rx_stat_ifhcinbadoctets_hi,
+	       estats->error_bytes_received_lo,
+	       estats->rx_stat_ifhcinbadoctets_lo);
+
+	if (bp->port.pmf) {
+		estats->mac_filter_discard =
+				le32_to_cpu(tport->mac_filter_discard);
+		estats->mf_tag_discard =
+				le32_to_cpu(tport->mf_tag_discard);
+		estats->brb_truncate_discard =
+				le32_to_cpu(tport->brb_truncate_discard);
+		estats->mac_discard = le32_to_cpu(tport->mac_discard);
+	}
+
+	fstats->host_func_stats_start = ++fstats->host_func_stats_end;
+
+	bp->stats_pending = 0;
+
+	return 0;
+}
+
+static void bnx2x_net_stats_update(struct bnx2x *bp)
+{
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	struct net_device_stats *nstats = &bp->dev->stats;
+	unsigned long tmp;
+	int i;
+
+	nstats->rx_packets =
+		bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
+		bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
+		bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
+
+	nstats->tx_packets =
+		bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
+		bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
+		bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
+
+	nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
+
+	nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
+
+	tmp = estats->mac_discard;
+	for_each_rx_queue(bp, i)
+		tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
+	nstats->rx_dropped = tmp;
+
+	nstats->tx_dropped = 0;
+
+	nstats->multicast =
+		bnx2x_hilo(&estats->total_multicast_packets_received_hi);
+
+	nstats->collisions =
+		bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
+
+	nstats->rx_length_errors =
+		bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
+		bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
+	nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
+				 bnx2x_hilo(&estats->brb_truncate_hi);
+	nstats->rx_crc_errors =
+		bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
+	nstats->rx_frame_errors =
+		bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
+	nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
+	nstats->rx_missed_errors = 0;
+
+	nstats->rx_errors = nstats->rx_length_errors +
+			    nstats->rx_over_errors +
+			    nstats->rx_crc_errors +
+			    nstats->rx_frame_errors +
+			    nstats->rx_fifo_errors +
+			    nstats->rx_missed_errors;
+
+	nstats->tx_aborted_errors =
+		bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
+		bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
+	nstats->tx_carrier_errors =
+		bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
+	nstats->tx_fifo_errors = 0;
+	nstats->tx_heartbeat_errors = 0;
+	nstats->tx_window_errors = 0;
+
+	nstats->tx_errors = nstats->tx_aborted_errors +
+			    nstats->tx_carrier_errors +
+	    bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
+}
+
+static void bnx2x_drv_stats_update(struct bnx2x *bp)
+{
+	struct bnx2x_eth_stats *estats = &bp->eth_stats;
+	int i;
+
+	estats->driver_xoff = 0;
+	estats->rx_err_discard_pkt = 0;
+	estats->rx_skb_alloc_failed = 0;
+	estats->hw_csum_err = 0;
+	for_each_queue(bp, i) {
+		struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
+
+		estats->driver_xoff += qstats->driver_xoff;
+		estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
+		estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
+		estats->hw_csum_err += qstats->hw_csum_err;
+	}
+}
+
+static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp)
+{
+	u32 val;
+
+	if (SHMEM2_HAS(bp, edebug_driver_if[1])) {
+		val = SHMEM2_RD(bp, edebug_driver_if[1]);
+
+		if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT)
+			return true;
+	}
+
+	return false;
+}
+
+static void bnx2x_stats_update(struct bnx2x *bp)
+{
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	if (bnx2x_edebug_stats_stopped(bp))
+		return;
+
+	if (*stats_comp != DMAE_COMP_VAL)
+		return;
+
+	if (bp->port.pmf)
+		bnx2x_hw_stats_update(bp);
+
+	if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
+		BNX2X_ERR("storm stats were not updated for 3 times\n");
+		bnx2x_panic();
+		return;
+	}
+
+	bnx2x_net_stats_update(bp);
+	bnx2x_drv_stats_update(bp);
+
+	if (netif_msg_timer(bp)) {
+		struct bnx2x_eth_stats *estats = &bp->eth_stats;
+		int i, cos;
+
+		netdev_dbg(bp->dev, "brb drops %u  brb truncate %u\n",
+		       estats->brb_drop_lo, estats->brb_truncate_lo);
+
+		for_each_eth_queue(bp, i) {
+			struct bnx2x_fastpath *fp = &bp->fp[i];
+			struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+
+			printk(KERN_DEBUG "%s: rx usage(%4u)  *rx_cons_sb(%u)"
+					  "  rx pkt(%lu)  rx calls(%lu %lu)\n",
+			       fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
+			       fp->rx_comp_cons),
+			       le16_to_cpu(*fp->rx_cons_sb),
+			       bnx2x_hilo(&qstats->
+					  total_unicast_packets_received_hi),
+			       fp->rx_calls, fp->rx_pkt);
+		}
+
+		for_each_eth_queue(bp, i) {
+			struct bnx2x_fastpath *fp = &bp->fp[i];
+			struct bnx2x_fp_txdata *txdata;
+			struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+			struct netdev_queue *txq;
+
+			printk(KERN_DEBUG "%s: tx pkt(%lu) (Xoff events %u)",
+				fp->name, bnx2x_hilo(
+				&qstats->total_unicast_packets_transmitted_hi),
+				qstats->driver_xoff);
+
+			for_each_cos_in_tx_queue(fp, cos) {
+				txdata = &fp->txdata[cos];
+				txq = netdev_get_tx_queue(bp->dev,
+						FP_COS_TO_TXQ(fp, cos));
+
+				printk(KERN_DEBUG "%d: tx avail(%4u)"
+				       "  *tx_cons_sb(%u)"
+				       "  tx calls (%lu)"
+				       "  %s\n",
+				       cos,
+				       bnx2x_tx_avail(bp, txdata),
+				       le16_to_cpu(*txdata->tx_cons_sb),
+				       txdata->tx_pkt,
+				       (netif_tx_queue_stopped(txq) ?
+					"Xoff" : "Xon")
+				       );
+			}
+		}
+	}
+
+	bnx2x_hw_stats_post(bp);
+	bnx2x_storm_stats_post(bp);
+}
+
+static void bnx2x_port_stats_stop(struct bnx2x *bp)
+{
+	struct dmae_command *dmae;
+	u32 opcode;
+	int loader_idx = PMF_DMAE_C(bp);
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	bp->executer_idx = 0;
+
+	opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0);
+
+	if (bp->port.port_stx) {
+
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		if (bp->func_stx)
+			dmae->opcode = bnx2x_dmae_opcode_add_comp(
+						opcode, DMAE_COMP_GRC);
+		else
+			dmae->opcode = bnx2x_dmae_opcode_add_comp(
+						opcode, DMAE_COMP_PCI);
+
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+		dmae->dst_addr_lo = bp->port.port_stx >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = sizeof(struct host_port_stats) >> 2;
+		if (bp->func_stx) {
+			dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
+			dmae->comp_addr_hi = 0;
+			dmae->comp_val = 1;
+		} else {
+			dmae->comp_addr_lo =
+				U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+			dmae->comp_addr_hi =
+				U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+			dmae->comp_val = DMAE_COMP_VAL;
+
+			*stats_comp = 0;
+		}
+	}
+
+	if (bp->func_stx) {
+
+		dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+		dmae->opcode =
+			bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI);
+		dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
+		dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
+		dmae->dst_addr_lo = bp->func_stx >> 2;
+		dmae->dst_addr_hi = 0;
+		dmae->len = sizeof(struct host_func_stats) >> 2;
+		dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+		dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+		dmae->comp_val = DMAE_COMP_VAL;
+
+		*stats_comp = 0;
+	}
+}
+
+static void bnx2x_stats_stop(struct bnx2x *bp)
+{
+	int update = 0;
+
+	bnx2x_stats_comp(bp);
+
+	if (bp->port.pmf)
+		update = (bnx2x_hw_stats_update(bp) == 0);
+
+	update |= (bnx2x_storm_stats_update(bp) == 0);
+
+	if (update) {
+		bnx2x_net_stats_update(bp);
+
+		if (bp->port.pmf)
+			bnx2x_port_stats_stop(bp);
+
+		bnx2x_hw_stats_post(bp);
+		bnx2x_stats_comp(bp);
+	}
+}
+
+static void bnx2x_stats_do_nothing(struct bnx2x *bp)
+{
+}
+
+static const struct {
+	void (*action)(struct bnx2x *bp);
+	enum bnx2x_stats_state next_state;
+} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
+/* state	event	*/
+{
+/* DISABLED	PMF	*/ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
+/*		LINK_UP	*/ {bnx2x_stats_start,      STATS_STATE_ENABLED},
+/*		UPDATE	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
+/*		STOP	*/ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
+},
+{
+/* ENABLED	PMF	*/ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
+/*		LINK_UP	*/ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
+/*		UPDATE	*/ {bnx2x_stats_update,     STATS_STATE_ENABLED},
+/*		STOP	*/ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
+}
+};
+
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
+{
+	enum bnx2x_stats_state state;
+	if (unlikely(bp->panic))
+		return;
+	bnx2x_stats_stm[bp->stats_state][event].action(bp);
+	spin_lock_bh(&bp->stats_lock);
+	state = bp->stats_state;
+	bp->stats_state = bnx2x_stats_stm[state][event].next_state;
+	spin_unlock_bh(&bp->stats_lock);
+
+	if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
+		DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
+		   state, event, bp->stats_state);
+}
+
+static void bnx2x_port_stats_base_init(struct bnx2x *bp)
+{
+	struct dmae_command *dmae;
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	/* sanity */
+	if (!bp->port.pmf || !bp->port.port_stx) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	bp->executer_idx = 0;
+
+	dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
+	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC,
+					 true, DMAE_COMP_PCI);
+	dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
+	dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
+	dmae->dst_addr_lo = bp->port.port_stx >> 2;
+	dmae->dst_addr_hi = 0;
+	dmae->len = sizeof(struct host_port_stats) >> 2;
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+
+	*stats_comp = 0;
+	bnx2x_hw_stats_post(bp);
+	bnx2x_stats_comp(bp);
+}
+
+static void bnx2x_func_stats_base_init(struct bnx2x *bp)
+{
+	int vn, vn_max = IS_MF(bp) ? E1HVN_MAX : E1VN_MAX;
+	u32 func_stx;
+
+	/* sanity */
+	if (!bp->port.pmf || !bp->func_stx) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	/* save our func_stx */
+	func_stx = bp->func_stx;
+
+	for (vn = VN_0; vn < vn_max; vn++) {
+		int mb_idx = CHIP_IS_E1x(bp) ? 2*vn + BP_PORT(bp) : vn;
+
+		bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
+		bnx2x_func_stats_init(bp);
+		bnx2x_hw_stats_post(bp);
+		bnx2x_stats_comp(bp);
+	}
+
+	/* restore our func_stx */
+	bp->func_stx = func_stx;
+}
+
+static void bnx2x_func_stats_base_update(struct bnx2x *bp)
+{
+	struct dmae_command *dmae = &bp->stats_dmae;
+	u32 *stats_comp = bnx2x_sp(bp, stats_comp);
+
+	/* sanity */
+	if (!bp->func_stx) {
+		BNX2X_ERR("BUG!\n");
+		return;
+	}
+
+	bp->executer_idx = 0;
+	memset(dmae, 0, sizeof(struct dmae_command));
+
+	dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
+					 true, DMAE_COMP_PCI);
+	dmae->src_addr_lo = bp->func_stx >> 2;
+	dmae->src_addr_hi = 0;
+	dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
+	dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
+	dmae->len = sizeof(struct host_func_stats) >> 2;
+	dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
+	dmae->comp_val = DMAE_COMP_VAL;
+
+	*stats_comp = 0;
+	bnx2x_hw_stats_post(bp);
+	bnx2x_stats_comp(bp);
+}
+
+/**
+ * This function will prepare the statistics ramrod data the way
+ * we will only have to increment the statistics counter and
+ * send the ramrod each time we have to.
+ *
+ * @param bp
+ */
+static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
+{
+	int i;
+	struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
+
+	dma_addr_t cur_data_offset;
+	struct stats_query_entry *cur_query_entry;
+
+	stats_hdr->cmd_num = bp->fw_stats_num;
+	stats_hdr->drv_stats_counter = 0;
+
+	/* storm_counters struct contains the counters of completed
+	 * statistics requests per storm which are incremented by FW
+	 * each time it completes hadning a statistics ramrod. We will
+	 * check these counters in the timer handler and discard a
+	 * (statistics) ramrod completion.
+	 */
+	cur_data_offset = bp->fw_stats_data_mapping +
+		offsetof(struct bnx2x_fw_stats_data, storm_counters);
+
+	stats_hdr->stats_counters_addrs.hi =
+		cpu_to_le32(U64_HI(cur_data_offset));
+	stats_hdr->stats_counters_addrs.lo =
+		cpu_to_le32(U64_LO(cur_data_offset));
+
+	/* prepare to the first stats ramrod (will be completed with
+	 * the counters equal to zero) - init counters to somethig different.
+	 */
+	memset(&bp->fw_stats_data->storm_counters, 0xff,
+	       sizeof(struct stats_counter));
+
+	/**** Port FW statistics data ****/
+	cur_data_offset = bp->fw_stats_data_mapping +
+		offsetof(struct bnx2x_fw_stats_data, port);
+
+	cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX];
+
+	cur_query_entry->kind = STATS_TYPE_PORT;
+	/* For port query index is a DONT CARE */
+	cur_query_entry->index = BP_PORT(bp);
+	/* For port query funcID is a DONT CARE */
+	cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+	cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
+	cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+
+	/**** PF FW statistics data ****/
+	cur_data_offset = bp->fw_stats_data_mapping +
+		offsetof(struct bnx2x_fw_stats_data, pf);
+
+	cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX];
+
+	cur_query_entry->kind = STATS_TYPE_PF;
+	/* For PF query index is a DONT CARE */
+	cur_query_entry->index = BP_PORT(bp);
+	cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+	cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
+	cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+
+	/**** Clients' queries ****/
+	cur_data_offset = bp->fw_stats_data_mapping +
+		offsetof(struct bnx2x_fw_stats_data, queue_stats);
+
+	for_each_eth_queue(bp, i) {
+		cur_query_entry =
+			&bp->fw_stats_req->
+					query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
+
+		cur_query_entry->kind = STATS_TYPE_QUEUE;
+		cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
+		cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+		cur_query_entry->address.hi =
+			cpu_to_le32(U64_HI(cur_data_offset));
+		cur_query_entry->address.lo =
+			cpu_to_le32(U64_LO(cur_data_offset));
+
+		cur_data_offset += sizeof(struct per_queue_stats);
+	}
+}
+
+void bnx2x_stats_init(struct bnx2x *bp)
+{
+	int /*abs*/port = BP_PORT(bp);
+	int mb_idx = BP_FW_MB_IDX(bp);
+	int i;
+
+	bp->stats_pending = 0;
+	bp->executer_idx = 0;
+	bp->stats_counter = 0;
+
+	/* port and func stats for management */
+	if (!BP_NOMCP(bp)) {
+		bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
+		bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
+
+	} else {
+		bp->port.port_stx = 0;
+		bp->func_stx = 0;
+	}
+	DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
+	   bp->port.port_stx, bp->func_stx);
+
+	port = BP_PORT(bp);
+	/* port stats */
+	memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
+	bp->port.old_nig_stats.brb_discard =
+			REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
+	bp->port.old_nig_stats.brb_truncate =
+			REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
+	if (!CHIP_IS_E3(bp)) {
+		REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
+			    &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
+		REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
+			    &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
+	}
+
+	/* function stats */
+	for_each_queue(bp, i) {
+		struct bnx2x_fastpath *fp = &bp->fp[i];
+
+		memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
+		memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
+		memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
+		memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
+	}
+
+	/* Prepare statistics ramrod data */
+	bnx2x_prep_fw_stats_req(bp);
+
+	memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
+	memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
+
+	bp->stats_state = STATS_STATE_DISABLED;
+
+	if (bp->port.pmf) {
+		if (bp->port.port_stx)
+			bnx2x_port_stats_base_init(bp);
+
+		if (bp->func_stx)
+			bnx2x_func_stats_base_init(bp);
+
+	} else if (bp->func_stx)
+		bnx2x_func_stats_base_update(bp);
+}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
new file mode 100644
index 0000000..5d8ce2f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -0,0 +1,381 @@
+/* bnx2x_stats.h: Broadcom Everest network driver.
+ *
+ * Copyright (c) 2007-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Written by: Eliezer Tamir
+ * Based on code from Michael Chan's bnx2 driver
+ * UDP CSUM errata workaround by Arik Gendelman
+ * Slowpath and fastpath rework by Vladislav Zolotarov
+ * Statistics and Link management by Yitchak Gertner
+ *
+ */
+#ifndef BNX2X_STATS_H
+#define BNX2X_STATS_H
+
+#include <linux/types.h>
+
+struct nig_stats {
+	u32 brb_discard;
+	u32 brb_packet;
+	u32 brb_truncate;
+	u32 flow_ctrl_discard;
+	u32 flow_ctrl_octets;
+	u32 flow_ctrl_packet;
+	u32 mng_discard;
+	u32 mng_octet_inp;
+	u32 mng_octet_out;
+	u32 mng_packet_inp;
+	u32 mng_packet_out;
+	u32 pbf_octets;
+	u32 pbf_packet;
+	u32 safc_inp;
+	u32 egress_mac_pkt0_lo;
+	u32 egress_mac_pkt0_hi;
+	u32 egress_mac_pkt1_lo;
+	u32 egress_mac_pkt1_hi;
+};
+
+
+enum bnx2x_stats_event {
+	STATS_EVENT_PMF = 0,
+	STATS_EVENT_LINK_UP,
+	STATS_EVENT_UPDATE,
+	STATS_EVENT_STOP,
+	STATS_EVENT_MAX
+};
+
+enum bnx2x_stats_state {
+	STATS_STATE_DISABLED = 0,
+	STATS_STATE_ENABLED,
+	STATS_STATE_MAX
+};
+
+struct bnx2x_eth_stats {
+	u32 total_bytes_received_hi;
+	u32 total_bytes_received_lo;
+	u32 total_bytes_transmitted_hi;
+	u32 total_bytes_transmitted_lo;
+	u32 total_unicast_packets_received_hi;
+	u32 total_unicast_packets_received_lo;
+	u32 total_multicast_packets_received_hi;
+	u32 total_multicast_packets_received_lo;
+	u32 total_broadcast_packets_received_hi;
+	u32 total_broadcast_packets_received_lo;
+	u32 total_unicast_packets_transmitted_hi;
+	u32 total_unicast_packets_transmitted_lo;
+	u32 total_multicast_packets_transmitted_hi;
+	u32 total_multicast_packets_transmitted_lo;
+	u32 total_broadcast_packets_transmitted_hi;
+	u32 total_broadcast_packets_transmitted_lo;
+	u32 valid_bytes_received_hi;
+	u32 valid_bytes_received_lo;
+
+	u32 error_bytes_received_hi;
+	u32 error_bytes_received_lo;
+	u32 etherstatsoverrsizepkts_hi;
+	u32 etherstatsoverrsizepkts_lo;
+	u32 no_buff_discard_hi;
+	u32 no_buff_discard_lo;
+
+	u32 rx_stat_ifhcinbadoctets_hi;
+	u32 rx_stat_ifhcinbadoctets_lo;
+	u32 tx_stat_ifhcoutbadoctets_hi;
+	u32 tx_stat_ifhcoutbadoctets_lo;
+	u32 rx_stat_dot3statsfcserrors_hi;
+	u32 rx_stat_dot3statsfcserrors_lo;
+	u32 rx_stat_dot3statsalignmenterrors_hi;
+	u32 rx_stat_dot3statsalignmenterrors_lo;
+	u32 rx_stat_dot3statscarriersenseerrors_hi;
+	u32 rx_stat_dot3statscarriersenseerrors_lo;
+	u32 rx_stat_falsecarriererrors_hi;
+	u32 rx_stat_falsecarriererrors_lo;
+	u32 rx_stat_etherstatsundersizepkts_hi;
+	u32 rx_stat_etherstatsundersizepkts_lo;
+	u32 rx_stat_dot3statsframestoolong_hi;
+	u32 rx_stat_dot3statsframestoolong_lo;
+	u32 rx_stat_etherstatsfragments_hi;
+	u32 rx_stat_etherstatsfragments_lo;
+	u32 rx_stat_etherstatsjabbers_hi;
+	u32 rx_stat_etherstatsjabbers_lo;
+	u32 rx_stat_maccontrolframesreceived_hi;
+	u32 rx_stat_maccontrolframesreceived_lo;
+	u32 rx_stat_bmac_xpf_hi;
+	u32 rx_stat_bmac_xpf_lo;
+	u32 rx_stat_bmac_xcf_hi;
+	u32 rx_stat_bmac_xcf_lo;
+	u32 rx_stat_xoffstateentered_hi;
+	u32 rx_stat_xoffstateentered_lo;
+	u32 rx_stat_xonpauseframesreceived_hi;
+	u32 rx_stat_xonpauseframesreceived_lo;
+	u32 rx_stat_xoffpauseframesreceived_hi;
+	u32 rx_stat_xoffpauseframesreceived_lo;
+	u32 tx_stat_outxonsent_hi;
+	u32 tx_stat_outxonsent_lo;
+	u32 tx_stat_outxoffsent_hi;
+	u32 tx_stat_outxoffsent_lo;
+	u32 tx_stat_flowcontroldone_hi;
+	u32 tx_stat_flowcontroldone_lo;
+	u32 tx_stat_etherstatscollisions_hi;
+	u32 tx_stat_etherstatscollisions_lo;
+	u32 tx_stat_dot3statssinglecollisionframes_hi;
+	u32 tx_stat_dot3statssinglecollisionframes_lo;
+	u32 tx_stat_dot3statsmultiplecollisionframes_hi;
+	u32 tx_stat_dot3statsmultiplecollisionframes_lo;
+	u32 tx_stat_dot3statsdeferredtransmissions_hi;
+	u32 tx_stat_dot3statsdeferredtransmissions_lo;
+	u32 tx_stat_dot3statsexcessivecollisions_hi;
+	u32 tx_stat_dot3statsexcessivecollisions_lo;
+	u32 tx_stat_dot3statslatecollisions_hi;
+	u32 tx_stat_dot3statslatecollisions_lo;
+	u32 tx_stat_etherstatspkts64octets_hi;
+	u32 tx_stat_etherstatspkts64octets_lo;
+	u32 tx_stat_etherstatspkts65octetsto127octets_hi;
+	u32 tx_stat_etherstatspkts65octetsto127octets_lo;
+	u32 tx_stat_etherstatspkts128octetsto255octets_hi;
+	u32 tx_stat_etherstatspkts128octetsto255octets_lo;
+	u32 tx_stat_etherstatspkts256octetsto511octets_hi;
+	u32 tx_stat_etherstatspkts256octetsto511octets_lo;
+	u32 tx_stat_etherstatspkts512octetsto1023octets_hi;
+	u32 tx_stat_etherstatspkts512octetsto1023octets_lo;
+	u32 tx_stat_etherstatspkts1024octetsto1522octets_hi;
+	u32 tx_stat_etherstatspkts1024octetsto1522octets_lo;
+	u32 tx_stat_etherstatspktsover1522octets_hi;
+	u32 tx_stat_etherstatspktsover1522octets_lo;
+	u32 tx_stat_bmac_2047_hi;
+	u32 tx_stat_bmac_2047_lo;
+	u32 tx_stat_bmac_4095_hi;
+	u32 tx_stat_bmac_4095_lo;
+	u32 tx_stat_bmac_9216_hi;
+	u32 tx_stat_bmac_9216_lo;
+	u32 tx_stat_bmac_16383_hi;
+	u32 tx_stat_bmac_16383_lo;
+	u32 tx_stat_dot3statsinternalmactransmiterrors_hi;
+	u32 tx_stat_dot3statsinternalmactransmiterrors_lo;
+	u32 tx_stat_bmac_ufl_hi;
+	u32 tx_stat_bmac_ufl_lo;
+
+	u32 pause_frames_received_hi;
+	u32 pause_frames_received_lo;
+	u32 pause_frames_sent_hi;
+	u32 pause_frames_sent_lo;
+
+	u32 etherstatspkts1024octetsto1522octets_hi;
+	u32 etherstatspkts1024octetsto1522octets_lo;
+	u32 etherstatspktsover1522octets_hi;
+	u32 etherstatspktsover1522octets_lo;
+
+	u32 brb_drop_hi;
+	u32 brb_drop_lo;
+	u32 brb_truncate_hi;
+	u32 brb_truncate_lo;
+
+	u32 mac_filter_discard;
+	u32 mf_tag_discard;
+	u32 brb_truncate_discard;
+	u32 mac_discard;
+
+	u32 driver_xoff;
+	u32 rx_err_discard_pkt;
+	u32 rx_skb_alloc_failed;
+	u32 hw_csum_err;
+
+	u32 nig_timer_max;
+
+	/* TPA */
+	u32 total_tpa_aggregations_hi;
+	u32 total_tpa_aggregations_lo;
+	u32 total_tpa_aggregated_frames_hi;
+	u32 total_tpa_aggregated_frames_lo;
+	u32 total_tpa_bytes_hi;
+	u32 total_tpa_bytes_lo;
+};
+
+
+struct bnx2x_eth_q_stats {
+	u32 total_unicast_bytes_received_hi;
+	u32 total_unicast_bytes_received_lo;
+	u32 total_broadcast_bytes_received_hi;
+	u32 total_broadcast_bytes_received_lo;
+	u32 total_multicast_bytes_received_hi;
+	u32 total_multicast_bytes_received_lo;
+	u32 total_bytes_received_hi;
+	u32 total_bytes_received_lo;
+	u32 total_unicast_bytes_transmitted_hi;
+	u32 total_unicast_bytes_transmitted_lo;
+	u32 total_broadcast_bytes_transmitted_hi;
+	u32 total_broadcast_bytes_transmitted_lo;
+	u32 total_multicast_bytes_transmitted_hi;
+	u32 total_multicast_bytes_transmitted_lo;
+	u32 total_bytes_transmitted_hi;
+	u32 total_bytes_transmitted_lo;
+	u32 total_unicast_packets_received_hi;
+	u32 total_unicast_packets_received_lo;
+	u32 total_multicast_packets_received_hi;
+	u32 total_multicast_packets_received_lo;
+	u32 total_broadcast_packets_received_hi;
+	u32 total_broadcast_packets_received_lo;
+	u32 total_unicast_packets_transmitted_hi;
+	u32 total_unicast_packets_transmitted_lo;
+	u32 total_multicast_packets_transmitted_hi;
+	u32 total_multicast_packets_transmitted_lo;
+	u32 total_broadcast_packets_transmitted_hi;
+	u32 total_broadcast_packets_transmitted_lo;
+	u32 valid_bytes_received_hi;
+	u32 valid_bytes_received_lo;
+
+	u32 etherstatsoverrsizepkts_hi;
+	u32 etherstatsoverrsizepkts_lo;
+	u32 no_buff_discard_hi;
+	u32 no_buff_discard_lo;
+
+	u32 driver_xoff;
+	u32 rx_err_discard_pkt;
+	u32 rx_skb_alloc_failed;
+	u32 hw_csum_err;
+
+	u32 total_packets_received_checksum_discarded_hi;
+	u32 total_packets_received_checksum_discarded_lo;
+	u32 total_packets_received_ttl0_discarded_hi;
+	u32 total_packets_received_ttl0_discarded_lo;
+	u32 total_transmitted_dropped_packets_error_hi;
+	u32 total_transmitted_dropped_packets_error_lo;
+
+	/* TPA */
+	u32 total_tpa_aggregations_hi;
+	u32 total_tpa_aggregations_lo;
+	u32 total_tpa_aggregated_frames_hi;
+	u32 total_tpa_aggregated_frames_lo;
+	u32 total_tpa_bytes_hi;
+	u32 total_tpa_bytes_lo;
+};
+
+/****************************************************************************
+* Macros
+****************************************************************************/
+
+/* sum[hi:lo] += add[hi:lo] */
+#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
+	do { \
+		s_lo += a_lo; \
+		s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
+	} while (0)
+
+/* difference = minuend - subtrahend */
+#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
+	do { \
+		if (m_lo < s_lo) { \
+			/* underflow */ \
+			d_hi = m_hi - s_hi; \
+			if (d_hi > 0) { \
+				/* we can 'loan' 1 */ \
+				d_hi--; \
+				d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
+			} else { \
+				/* m_hi <= s_hi */ \
+				d_hi = 0; \
+				d_lo = 0; \
+			} \
+		} else { \
+			/* m_lo >= s_lo */ \
+			if (m_hi < s_hi) { \
+				d_hi = 0; \
+				d_lo = 0; \
+			} else { \
+				/* m_hi >= s_hi */ \
+				d_hi = m_hi - s_hi; \
+				d_lo = m_lo - s_lo; \
+			} \
+		} \
+	} while (0)
+
+#define UPDATE_STAT64(s, t) \
+	do { \
+		DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
+			diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
+		pstats->mac_stx[0].t##_hi = new->s##_hi; \
+		pstats->mac_stx[0].t##_lo = new->s##_lo; \
+		ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
+		       pstats->mac_stx[1].t##_lo, diff.lo); \
+	} while (0)
+
+#define UPDATE_STAT64_NIG(s, t) \
+	do { \
+		DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
+			diff.lo, new->s##_lo, old->s##_lo); \
+		ADD_64(estats->t##_hi, diff.hi, \
+		       estats->t##_lo, diff.lo); \
+	} while (0)
+
+/* sum[hi:lo] += add */
+#define ADD_EXTEND_64(s_hi, s_lo, a) \
+	do { \
+		s_lo += a; \
+		s_hi += (s_lo < a) ? 1 : 0; \
+	} while (0)
+
+#define ADD_STAT64(diff, t) \
+	do { \
+		ADD_64(pstats->mac_stx[1].t##_hi, new->diff##_hi, \
+		       pstats->mac_stx[1].t##_lo, new->diff##_lo); \
+	} while (0)
+
+#define UPDATE_EXTEND_STAT(s) \
+	do { \
+		ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
+			      pstats->mac_stx[1].s##_lo, \
+			      new->s); \
+	} while (0)
+
+#define UPDATE_EXTEND_TSTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
+		old_tclient->s = tclient->s; \
+		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+#define UPDATE_EXTEND_USTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+		old_uclient->s = uclient->s; \
+		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+#define UPDATE_EXTEND_XSTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
+		old_xclient->s = xclient->s; \
+		ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+/* minuend -= subtrahend */
+#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
+	do { \
+		DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
+	} while (0)
+
+/* minuend[hi:lo] -= subtrahend */
+#define SUB_EXTEND_64(m_hi, m_lo, s) \
+	do { \
+		SUB_64(m_hi, 0, m_lo, s); \
+	} while (0)
+
+#define SUB_EXTEND_USTAT(s, t) \
+	do { \
+		diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
+		SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
+	} while (0)
+
+
+/* forward */
+struct bnx2x;
+
+void bnx2x_stats_init(struct bnx2x *bp);
+
+void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
+
+#endif /* BNX2X_STATS_H */
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
new file mode 100644
index 0000000..7698161
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -0,0 +1,5487 @@
+/* cnic.c: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
+ * Modified and maintained by: Michael Chan <mchan@broadcom.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/uio_driver.h>
+#include <linux/in.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/prefetch.h>
+#include <linux/random.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define BCM_VLAN 1
+#endif
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/ip6_checksum.h>
+#include <scsi/iscsi_if.h>
+
+#include "cnic_if.h"
+#include "bnx2.h"
+#include "bnx2x/bnx2x_reg.h"
+#include "bnx2x/bnx2x_fw_defs.h"
+#include "bnx2x/bnx2x_hsi.h"
+#include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
+#include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
+#include "cnic.h"
+#include "cnic_defs.h"
+
+#define DRV_MODULE_NAME		"cnic"
+
+static char version[] __devinitdata =
+	"Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
+	      "Chen (zongxi@broadcom.com");
+MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(CNIC_MODULE_VERSION);
+
+/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
+static LIST_HEAD(cnic_dev_list);
+static LIST_HEAD(cnic_udev_list);
+static DEFINE_RWLOCK(cnic_dev_lock);
+static DEFINE_MUTEX(cnic_lock);
+
+static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+
+/* helper function, assuming cnic_lock is held */
+static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
+{
+	return rcu_dereference_protected(cnic_ulp_tbl[type],
+					 lockdep_is_held(&cnic_lock));
+}
+
+static int cnic_service_bnx2(void *, void *);
+static int cnic_service_bnx2x(void *, void *);
+static int cnic_ctl(void *, struct cnic_ctl_info *);
+
+static struct cnic_ops cnic_bnx2_ops = {
+	.cnic_owner	= THIS_MODULE,
+	.cnic_handler	= cnic_service_bnx2,
+	.cnic_ctl	= cnic_ctl,
+};
+
+static struct cnic_ops cnic_bnx2x_ops = {
+	.cnic_owner	= THIS_MODULE,
+	.cnic_handler	= cnic_service_bnx2x,
+	.cnic_ctl	= cnic_ctl,
+};
+
+static struct workqueue_struct *cnic_wq;
+
+static void cnic_shutdown_rings(struct cnic_dev *);
+static void cnic_init_rings(struct cnic_dev *);
+static int cnic_cm_set_pg(struct cnic_sock *);
+
+static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
+{
+	struct cnic_uio_dev *udev = uinfo->priv;
+	struct cnic_dev *dev;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (udev->uio_dev != -1)
+		return -EBUSY;
+
+	rtnl_lock();
+	dev = udev->dev;
+
+	if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+		rtnl_unlock();
+		return -ENODEV;
+	}
+
+	udev->uio_dev = iminor(inode);
+
+	cnic_shutdown_rings(dev);
+	cnic_init_rings(dev);
+	rtnl_unlock();
+
+	return 0;
+}
+
+static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
+{
+	struct cnic_uio_dev *udev = uinfo->priv;
+
+	udev->uio_dev = -1;
+	return 0;
+}
+
+static inline void cnic_hold(struct cnic_dev *dev)
+{
+	atomic_inc(&dev->ref_count);
+}
+
+static inline void cnic_put(struct cnic_dev *dev)
+{
+	atomic_dec(&dev->ref_count);
+}
+
+static inline void csk_hold(struct cnic_sock *csk)
+{
+	atomic_inc(&csk->ref_count);
+}
+
+static inline void csk_put(struct cnic_sock *csk)
+{
+	atomic_dec(&csk->ref_count);
+}
+
+static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
+{
+	struct cnic_dev *cdev;
+
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(cdev, &cnic_dev_list, list) {
+		if (netdev == cdev->netdev) {
+			cnic_hold(cdev);
+			read_unlock(&cnic_dev_lock);
+			return cdev;
+		}
+	}
+	read_unlock(&cnic_dev_lock);
+	return NULL;
+}
+
+static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
+{
+	atomic_inc(&ulp_ops->ref_count);
+}
+
+static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
+{
+	atomic_dec(&ulp_ops->ref_count);
+}
+
+static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	info.cmd = DRV_CTL_CTX_WR_CMD;
+	io->cid_addr = cid_addr;
+	io->offset = off;
+	io->data = val;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	info.cmd = DRV_CTL_CTXTBL_WR_CMD;
+	io->offset = off;
+	io->dma_addr = addr;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_l2_ring *ring = &info.data.ring;
+
+	if (start)
+		info.cmd = DRV_CTL_START_L2_CMD;
+	else
+		info.cmd = DRV_CTL_STOP_L2_CMD;
+
+	ring->cid = cid;
+	ring->client_id = cl_id;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	info.cmd = DRV_CTL_IO_WR_CMD;
+	io->offset = off;
+	io->data = val;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+	struct drv_ctl_io *io = &info.data.io;
+
+	info.cmd = DRV_CTL_IO_RD_CMD;
+	io->offset = off;
+	ethdev->drv_ctl(dev->netdev, &info);
+	return io->data;
+}
+
+static int cnic_in_use(struct cnic_sock *csk)
+{
+	return test_bit(SK_F_INUSE, &csk->flags);
+}
+
+static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct drv_ctl_info info;
+
+	info.cmd = cmd;
+	info.data.credit.credit_count = count;
+	ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
+{
+	u32 i;
+
+	for (i = 0; i < cp->max_cid_space; i++) {
+		if (cp->ctx_tbl[i].cid == cid) {
+			*l5_cid = i;
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
+			   struct cnic_sock *csk)
+{
+	struct iscsi_path path_req;
+	char *buf = NULL;
+	u16 len = 0;
+	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
+	struct cnic_ulp_ops *ulp_ops;
+	struct cnic_uio_dev *udev = cp->udev;
+	int rc = 0, retry = 0;
+
+	if (!udev || udev->uio_dev == -1)
+		return -ENODEV;
+
+	if (csk) {
+		len = sizeof(path_req);
+		buf = (char *) &path_req;
+		memset(&path_req, 0, len);
+
+		msg_type = ISCSI_KEVENT_PATH_REQ;
+		path_req.handle = (u64) csk->l5_cid;
+		if (test_bit(SK_F_IPV6, &csk->flags)) {
+			memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
+			       sizeof(struct in6_addr));
+			path_req.ip_addr_len = 16;
+		} else {
+			memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
+			       sizeof(struct in_addr));
+			path_req.ip_addr_len = 4;
+		}
+		path_req.vlan_id = csk->vlan_id;
+		path_req.pmtu = csk->mtu;
+	}
+
+	while (retry < 3) {
+		rc = 0;
+		rcu_read_lock();
+		ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
+		if (ulp_ops)
+			rc = ulp_ops->iscsi_nl_send_msg(
+				cp->ulp_handle[CNIC_ULP_ISCSI],
+				msg_type, buf, len);
+		rcu_read_unlock();
+		if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
+			break;
+
+		msleep(100);
+		retry++;
+	}
+	return rc;
+}
+
+static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
+
+static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
+				  char *buf, u16 len)
+{
+	int rc = -EINVAL;
+
+	switch (msg_type) {
+	case ISCSI_UEVENT_PATH_UPDATE: {
+		struct cnic_local *cp;
+		u32 l5_cid;
+		struct cnic_sock *csk;
+		struct iscsi_path *path_resp;
+
+		if (len < sizeof(*path_resp))
+			break;
+
+		path_resp = (struct iscsi_path *) buf;
+		cp = dev->cnic_priv;
+		l5_cid = (u32) path_resp->handle;
+		if (l5_cid >= MAX_CM_SK_TBL_SZ)
+			break;
+
+		rcu_read_lock();
+		if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
+			rc = -ENODEV;
+			rcu_read_unlock();
+			break;
+		}
+		csk = &cp->csk_tbl[l5_cid];
+		csk_hold(csk);
+		if (cnic_in_use(csk) &&
+		    test_bit(SK_F_CONNECT_START, &csk->flags)) {
+
+			memcpy(csk->ha, path_resp->mac_addr, 6);
+			if (test_bit(SK_F_IPV6, &csk->flags))
+				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
+				       sizeof(struct in6_addr));
+			else
+				memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
+				       sizeof(struct in_addr));
+
+			if (is_valid_ether_addr(csk->ha)) {
+				cnic_cm_set_pg(csk);
+			} else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
+				!test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+
+				cnic_cm_upcall(cp, csk,
+					L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
+				clear_bit(SK_F_CONNECT_START, &csk->flags);
+			}
+		}
+		csk_put(csk);
+		rcu_read_unlock();
+		rc = 0;
+	}
+	}
+
+	return rc;
+}
+
+static int cnic_offld_prep(struct cnic_sock *csk)
+{
+	if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+		return 0;
+
+	if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		return 0;
+	}
+
+	return 1;
+}
+
+static int cnic_close_prep(struct cnic_sock *csk)
+{
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	smp_mb__after_clear_bit();
+
+	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+			msleep(1);
+
+		return 1;
+	}
+	return 0;
+}
+
+static int cnic_abort_prep(struct cnic_sock *csk)
+{
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	smp_mb__after_clear_bit();
+
+	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+		msleep(1);
+
+	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+		csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+		return 1;
+	}
+
+	return 0;
+}
+
+int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
+{
+	struct cnic_dev *dev;
+
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
+		pr_err("%s: Bad type %d\n", __func__, ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (cnic_ulp_tbl_prot(ulp_type)) {
+		pr_err("%s: Type %d has already been registered\n",
+		       __func__, ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EBUSY;
+	}
+
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(dev, &cnic_dev_list, list) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
+	}
+	read_unlock(&cnic_dev_lock);
+
+	atomic_set(&ulp_ops->ref_count, 0);
+	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
+	mutex_unlock(&cnic_lock);
+
+	/* Prevent race conditions with netdev_event */
+	rtnl_lock();
+	list_for_each_entry(dev, &cnic_dev_list, list) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
+			ulp_ops->cnic_init(dev);
+	}
+	rtnl_unlock();
+
+	return 0;
+}
+
+int cnic_unregister_driver(int ulp_type)
+{
+	struct cnic_dev *dev;
+	struct cnic_ulp_ops *ulp_ops;
+	int i = 0;
+
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
+		pr_err("%s: Bad type %d\n", __func__, ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+	if (!ulp_ops) {
+		pr_err("%s: Type %d has not been registered\n",
+		       __func__, ulp_type);
+		goto out_unlock;
+	}
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(dev, &cnic_dev_list, list) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+			pr_err("%s: Type %d still has devices registered\n",
+			       __func__, ulp_type);
+			read_unlock(&cnic_dev_lock);
+			goto out_unlock;
+		}
+	}
+	read_unlock(&cnic_dev_lock);
+
+	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
+
+	mutex_unlock(&cnic_lock);
+	synchronize_rcu();
+	while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
+		msleep(100);
+		i++;
+	}
+
+	if (atomic_read(&ulp_ops->ref_count) != 0)
+		netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
+	return 0;
+
+out_unlock:
+	mutex_unlock(&cnic_lock);
+	return -EINVAL;
+}
+
+static int cnic_start_hw(struct cnic_dev *);
+static void cnic_stop_hw(struct cnic_dev *);
+
+static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
+				void *ulp_ctx)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_ulp_ops *ulp_ops;
+
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
+		pr_err("%s: Bad type %d\n", __func__, ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
+		pr_err("%s: Driver with type %d has not been registered\n",
+		       __func__, ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EAGAIN;
+	}
+	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+		pr_err("%s: Type %d has already been registered to this device\n",
+		       __func__, ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EBUSY;
+	}
+
+	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
+	cp->ulp_handle[ulp_type] = ulp_ctx;
+	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
+	cnic_hold(dev);
+
+	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
+			ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
+
+	mutex_unlock(&cnic_lock);
+
+	return 0;
+
+}
+EXPORT_SYMBOL(cnic_register_driver);
+
+static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i = 0;
+
+	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
+		pr_err("%s: Bad type %d\n", __func__, ulp_type);
+		return -EINVAL;
+	}
+	mutex_lock(&cnic_lock);
+	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+		rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
+		cnic_put(dev);
+	} else {
+		pr_err("%s: device not registered to this ulp type %d\n",
+		       __func__, ulp_type);
+		mutex_unlock(&cnic_lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&cnic_lock);
+
+	if (ulp_type == CNIC_ULP_ISCSI)
+		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+
+	synchronize_rcu();
+
+	while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
+	       i < 20) {
+		msleep(100);
+		i++;
+	}
+	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
+		netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
+
+	return 0;
+}
+EXPORT_SYMBOL(cnic_unregister_driver);
+
+static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
+			    u32 next)
+{
+	id_tbl->start = start_id;
+	id_tbl->max = size;
+	id_tbl->next = next;
+	spin_lock_init(&id_tbl->lock);
+	id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+	if (!id_tbl->table)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
+{
+	kfree(id_tbl->table);
+	id_tbl->table = NULL;
+}
+
+static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+	int ret = -1;
+
+	id -= id_tbl->start;
+	if (id >= id_tbl->max)
+		return ret;
+
+	spin_lock(&id_tbl->lock);
+	if (!test_bit(id, id_tbl->table)) {
+		set_bit(id, id_tbl->table);
+		ret = 0;
+	}
+	spin_unlock(&id_tbl->lock);
+	return ret;
+}
+
+/* Returns -1 if not successful */
+static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
+{
+	u32 id;
+
+	spin_lock(&id_tbl->lock);
+	id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
+	if (id >= id_tbl->max) {
+		id = -1;
+		if (id_tbl->next != 0) {
+			id = find_first_zero_bit(id_tbl->table, id_tbl->next);
+			if (id >= id_tbl->next)
+				id = -1;
+		}
+	}
+
+	if (id < id_tbl->max) {
+		set_bit(id, id_tbl->table);
+		id_tbl->next = (id + 1) & (id_tbl->max - 1);
+		id += id_tbl->start;
+	}
+
+	spin_unlock(&id_tbl->lock);
+
+	return id;
+}
+
+static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+	if (id == -1)
+		return;
+
+	id -= id_tbl->start;
+	if (id >= id_tbl->max)
+		return;
+
+	clear_bit(id, id_tbl->table);
+}
+
+static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+	int i;
+
+	if (!dma->pg_arr)
+		return;
+
+	for (i = 0; i < dma->num_pages; i++) {
+		if (dma->pg_arr[i]) {
+			dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
+					  dma->pg_arr[i], dma->pg_map_arr[i]);
+			dma->pg_arr[i] = NULL;
+		}
+	}
+	if (dma->pgtbl) {
+		dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
+				  dma->pgtbl, dma->pgtbl_map);
+		dma->pgtbl = NULL;
+	}
+	kfree(dma->pg_arr);
+	dma->pg_arr = NULL;
+	dma->num_pages = 0;
+}
+
+static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+	int i;
+	__le32 *page_table = (__le32 *) dma->pgtbl;
+
+	for (i = 0; i < dma->num_pages; i++) {
+		/* Each entry needs to be in big endian format. */
+		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
+		page_table++;
+		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
+		page_table++;
+	}
+}
+
+static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+	int i;
+	__le32 *page_table = (__le32 *) dma->pgtbl;
+
+	for (i = 0; i < dma->num_pages; i++) {
+		/* Each entry needs to be in little endian format. */
+		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
+		page_table++;
+		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
+		page_table++;
+	}
+}
+
+static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
+			  int pages, int use_pg_tbl)
+{
+	int i, size;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	size = pages * (sizeof(void *) + sizeof(dma_addr_t));
+	dma->pg_arr = kzalloc(size, GFP_ATOMIC);
+	if (dma->pg_arr == NULL)
+		return -ENOMEM;
+
+	dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
+	dma->num_pages = pages;
+
+	for (i = 0; i < pages; i++) {
+		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
+						    BCM_PAGE_SIZE,
+						    &dma->pg_map_arr[i],
+						    GFP_ATOMIC);
+		if (dma->pg_arr[i] == NULL)
+			goto error;
+	}
+	if (!use_pg_tbl)
+		return 0;
+
+	dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
+			  ~(BCM_PAGE_SIZE - 1);
+	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
+					&dma->pgtbl_map, GFP_ATOMIC);
+	if (dma->pgtbl == NULL)
+		goto error;
+
+	cp->setup_pgtbl(dev, dma);
+
+	return 0;
+
+error:
+	cnic_free_dma(dev, dma);
+	return -ENOMEM;
+}
+
+static void cnic_free_context(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i;
+
+	for (i = 0; i < cp->ctx_blks; i++) {
+		if (cp->ctx_arr[i].ctx) {
+			dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
+					  cp->ctx_arr[i].ctx,
+					  cp->ctx_arr[i].mapping);
+			cp->ctx_arr[i].ctx = NULL;
+		}
+	}
+}
+
+static void __cnic_free_uio(struct cnic_uio_dev *udev)
+{
+	uio_unregister_device(&udev->cnic_uinfo);
+
+	if (udev->l2_buf) {
+		dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
+				  udev->l2_buf, udev->l2_buf_map);
+		udev->l2_buf = NULL;
+	}
+
+	if (udev->l2_ring) {
+		dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
+				  udev->l2_ring, udev->l2_ring_map);
+		udev->l2_ring = NULL;
+	}
+
+	pci_dev_put(udev->pdev);
+	kfree(udev);
+}
+
+static void cnic_free_uio(struct cnic_uio_dev *udev)
+{
+	if (!udev)
+		return;
+
+	write_lock(&cnic_dev_lock);
+	list_del_init(&udev->list);
+	write_unlock(&cnic_dev_lock);
+	__cnic_free_uio(udev);
+}
+
+static void cnic_free_resc(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_uio_dev *udev = cp->udev;
+
+	if (udev) {
+		udev->dev = NULL;
+		cp->udev = NULL;
+	}
+
+	cnic_free_context(dev);
+	kfree(cp->ctx_arr);
+	cp->ctx_arr = NULL;
+	cp->ctx_blks = 0;
+
+	cnic_free_dma(dev, &cp->gbl_buf_info);
+	cnic_free_dma(dev, &cp->kwq_info);
+	cnic_free_dma(dev, &cp->kwq_16_data_info);
+	cnic_free_dma(dev, &cp->kcq2.dma);
+	cnic_free_dma(dev, &cp->kcq1.dma);
+	kfree(cp->iscsi_tbl);
+	cp->iscsi_tbl = NULL;
+	kfree(cp->ctx_tbl);
+	cp->ctx_tbl = NULL;
+
+	cnic_free_id_tbl(&cp->fcoe_cid_tbl);
+	cnic_free_id_tbl(&cp->cid_tbl);
+}
+
+static int cnic_alloc_context(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+		int i, k, arr_size;
+
+		cp->ctx_blk_size = BCM_PAGE_SIZE;
+		cp->cids_per_blk = BCM_PAGE_SIZE / 128;
+		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
+			   sizeof(struct cnic_ctx);
+		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
+		if (cp->ctx_arr == NULL)
+			return -ENOMEM;
+
+		k = 0;
+		for (i = 0; i < 2; i++) {
+			u32 j, reg, off, lo, hi;
+
+			if (i == 0)
+				off = BNX2_PG_CTX_MAP;
+			else
+				off = BNX2_ISCSI_CTX_MAP;
+
+			reg = cnic_reg_rd_ind(dev, off);
+			lo = reg >> 16;
+			hi = reg & 0xffff;
+			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
+				cp->ctx_arr[k].cid = j;
+		}
+
+		cp->ctx_blks = k;
+		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
+			cp->ctx_blks = 0;
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < cp->ctx_blks; i++) {
+			cp->ctx_arr[i].ctx =
+				dma_alloc_coherent(&dev->pcidev->dev,
+						   BCM_PAGE_SIZE,
+						   &cp->ctx_arr[i].mapping,
+						   GFP_KERNEL);
+			if (cp->ctx_arr[i].ctx == NULL)
+				return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+static u16 cnic_bnx2_next_idx(u16 idx)
+{
+	return idx + 1;
+}
+
+static u16 cnic_bnx2_hw_idx(u16 idx)
+{
+	return idx;
+}
+
+static u16 cnic_bnx2x_next_idx(u16 idx)
+{
+	idx++;
+	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
+		idx++;
+
+	return idx;
+}
+
+static u16 cnic_bnx2x_hw_idx(u16 idx)
+{
+	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
+		idx++;
+	return idx;
+}
+
+static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
+			  bool use_pg_tbl)
+{
+	int err, i, use_page_tbl = 0;
+	struct kcqe **kcq;
+
+	if (use_pg_tbl)
+		use_page_tbl = 1;
+
+	err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
+	if (err)
+		return err;
+
+	kcq = (struct kcqe **) info->dma.pg_arr;
+	info->kcq = kcq;
+
+	info->next_idx = cnic_bnx2_next_idx;
+	info->hw_idx = cnic_bnx2_hw_idx;
+	if (use_pg_tbl)
+		return 0;
+
+	info->next_idx = cnic_bnx2x_next_idx;
+	info->hw_idx = cnic_bnx2x_hw_idx;
+
+	for (i = 0; i < KCQ_PAGE_CNT; i++) {
+		struct bnx2x_bd_chain_next *next =
+			(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
+		int j = i + 1;
+
+		if (j >= KCQ_PAGE_CNT)
+			j = 0;
+		next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
+		next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
+	}
+	return 0;
+}
+
+static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_uio_dev *udev;
+
+	read_lock(&cnic_dev_lock);
+	list_for_each_entry(udev, &cnic_udev_list, list) {
+		if (udev->pdev == dev->pcidev) {
+			udev->dev = dev;
+			cp->udev = udev;
+			read_unlock(&cnic_dev_lock);
+			return 0;
+		}
+	}
+	read_unlock(&cnic_dev_lock);
+
+	udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
+	if (!udev)
+		return -ENOMEM;
+
+	udev->uio_dev = -1;
+
+	udev->dev = dev;
+	udev->pdev = dev->pcidev;
+	udev->l2_ring_size = pages * BCM_PAGE_SIZE;
+	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
+					   &udev->l2_ring_map,
+					   GFP_KERNEL | __GFP_COMP);
+	if (!udev->l2_ring)
+		goto err_udev;
+
+	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
+	udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
+	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
+					  &udev->l2_buf_map,
+					  GFP_KERNEL | __GFP_COMP);
+	if (!udev->l2_buf)
+		goto err_dma;
+
+	write_lock(&cnic_dev_lock);
+	list_add(&udev->list, &cnic_udev_list);
+	write_unlock(&cnic_dev_lock);
+
+	pci_dev_get(udev->pdev);
+
+	cp->udev = udev;
+
+	return 0;
+ err_dma:
+	dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
+			  udev->l2_ring, udev->l2_ring_map);
+ err_udev:
+	kfree(udev);
+	return -ENOMEM;
+}
+
+static int cnic_init_uio(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_uio_dev *udev = cp->udev;
+	struct uio_info *uinfo;
+	int ret = 0;
+
+	if (!udev)
+		return -ENOMEM;
+
+	uinfo = &udev->cnic_uinfo;
+
+	uinfo->mem[0].addr = dev->netdev->base_addr;
+	uinfo->mem[0].internal_addr = dev->regview;
+	uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
+	uinfo->mem[0].memtype = UIO_MEM_PHYS;
+
+	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+		uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
+					PAGE_MASK;
+		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
+			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
+		else
+			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
+
+		uinfo->name = "bnx2_cnic";
+	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
+			PAGE_MASK;
+		uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
+
+		uinfo->name = "bnx2x_cnic";
+	}
+
+	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+
+	uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
+	uinfo->mem[2].size = udev->l2_ring_size;
+	uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+
+	uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
+	uinfo->mem[3].size = udev->l2_buf_size;
+	uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
+
+	uinfo->version = CNIC_MODULE_VERSION;
+	uinfo->irq = UIO_IRQ_CUSTOM;
+
+	uinfo->open = cnic_uio_open;
+	uinfo->release = cnic_uio_close;
+
+	if (udev->uio_dev == -1) {
+		if (!uinfo->priv) {
+			uinfo->priv = udev;
+
+			ret = uio_register_device(&udev->pdev->dev, uinfo);
+		}
+	} else {
+		cnic_init_rings(dev);
+	}
+
+	return ret;
+}
+
+static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int ret;
+
+	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
+	if (ret)
+		goto error;
+	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
+
+	ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
+	if (ret)
+		goto error;
+
+	ret = cnic_alloc_context(dev);
+	if (ret)
+		goto error;
+
+	ret = cnic_alloc_uio_rings(dev, 2);
+	if (ret)
+		goto error;
+
+	ret = cnic_init_uio(dev);
+	if (ret)
+		goto error;
+
+	return 0;
+
+error:
+	cnic_free_resc(dev);
+	return ret;
+}
+
+static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int ctx_blk_size = cp->ethdev->ctx_blk_size;
+	int total_mem, blks, i;
+
+	total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
+	blks = total_mem / ctx_blk_size;
+	if (total_mem % ctx_blk_size)
+		blks++;
+
+	if (blks > cp->ethdev->ctx_tbl_len)
+		return -ENOMEM;
+
+	cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
+	if (cp->ctx_arr == NULL)
+		return -ENOMEM;
+
+	cp->ctx_blks = blks;
+	cp->ctx_blk_size = ctx_blk_size;
+	if (!BNX2X_CHIP_IS_57710(cp->chip_id))
+		cp->ctx_align = 0;
+	else
+		cp->ctx_align = ctx_blk_size;
+
+	cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
+
+	for (i = 0; i < blks; i++) {
+		cp->ctx_arr[i].ctx =
+			dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
+					   &cp->ctx_arr[i].mapping,
+					   GFP_KERNEL);
+		if (cp->ctx_arr[i].ctx == NULL)
+			return -ENOMEM;
+
+		if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
+			if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
+				cnic_free_context(dev);
+				cp->ctx_blk_size += cp->ctx_align;
+				i = -1;
+				continue;
+			}
+		}
+	}
+	return 0;
+}
+
+static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	u32 start_cid = ethdev->starting_cid;
+	int i, j, n, ret, pages;
+	struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
+
+	cp->iro_arr = ethdev->iro_arr;
+
+	cp->max_cid_space = MAX_ISCSI_TBL_SZ;
+	cp->iscsi_start_cid = start_cid;
+	cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
+
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+		cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
+		cp->fcoe_init_cid = ethdev->fcoe_init_cid;
+		if (!cp->fcoe_init_cid)
+			cp->fcoe_init_cid = 0x10;
+	}
+
+	cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
+				GFP_KERNEL);
+	if (!cp->iscsi_tbl)
+		goto error;
+
+	cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
+				cp->max_cid_space, GFP_KERNEL);
+	if (!cp->ctx_tbl)
+		goto error;
+
+	for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
+		cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
+		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
+	}
+
+	for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
+		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
+
+	pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
+		PAGE_SIZE;
+
+	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
+	if (ret)
+		return -ENOMEM;
+
+	n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
+	for (i = 0, j = 0; i < cp->max_cid_space; i++) {
+		long off = CNIC_KWQ16_DATA_SIZE * (i % n);
+
+		cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
+		cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
+						   off;
+
+		if ((i % n) == (n - 1))
+			j++;
+	}
+
+	ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
+	if (ret)
+		goto error;
+
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+		ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
+		if (ret)
+			goto error;
+	}
+
+	pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
+	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
+	if (ret)
+		goto error;
+
+	ret = cnic_alloc_bnx2x_context(dev);
+	if (ret)
+		goto error;
+
+	cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
+
+	cp->l2_rx_ring_size = 15;
+
+	ret = cnic_alloc_uio_rings(dev, 4);
+	if (ret)
+		goto error;
+
+	ret = cnic_init_uio(dev);
+	if (ret)
+		goto error;
+
+	return 0;
+
+error:
+	cnic_free_resc(dev);
+	return -ENOMEM;
+}
+
+static inline u32 cnic_kwq_avail(struct cnic_local *cp)
+{
+	return cp->max_kwq_idx -
+		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
+}
+
+static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
+				  u32 num_wqes)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct kwqe *prod_qe;
+	u16 prod, sw_prod, i;
+
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EAGAIN;		/* bnx2 is down */
+
+	spin_lock_bh(&cp->cnic_ulp_lock);
+	if (num_wqes > cnic_kwq_avail(cp) &&
+	    !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
+		spin_unlock_bh(&cp->cnic_ulp_lock);
+		return -EAGAIN;
+	}
+
+	clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
+
+	prod = cp->kwq_prod_idx;
+	sw_prod = prod & MAX_KWQ_IDX;
+	for (i = 0; i < num_wqes; i++) {
+		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
+		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
+		prod++;
+		sw_prod = prod & MAX_KWQ_IDX;
+	}
+	cp->kwq_prod_idx = prod;
+
+	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
+
+	spin_unlock_bh(&cp->cnic_ulp_lock);
+	return 0;
+}
+
+static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
+				   union l5cm_specific_data *l5_data)
+{
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+	dma_addr_t map;
+
+	map = ctx->kwqe_data_mapping;
+	l5_data->phy_address.lo = (u64) map & 0xffffffff;
+	l5_data->phy_address.hi = (u64) map >> 32;
+	return ctx->kwqe_data;
+}
+
+static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
+				u32 type, union l5cm_specific_data *l5_data)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct l5cm_spe kwqe;
+	struct kwqe_16 *kwq[1];
+	u16 type_16;
+	int ret;
+
+	kwqe.hdr.conn_and_cmd_data =
+		cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
+			     BNX2X_HW_CID(cp, cid)));
+
+	type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
+	type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
+		   SPE_HDR_FUNCTION_ID;
+
+	kwqe.hdr.type = cpu_to_le16(type_16);
+	kwqe.hdr.reserved1 = 0;
+	kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
+	kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
+
+	kwq[0] = (struct kwqe_16 *) &kwqe;
+
+	spin_lock_bh(&cp->cnic_ulp_lock);
+	ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
+	spin_unlock_bh(&cp->cnic_ulp_lock);
+
+	if (ret == 1)
+		return 0;
+
+	return -EBUSY;
+}
+
+static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
+				   struct kcqe *cqes[], u32 num_cqes)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_ulp_ops *ulp_ops;
+
+	rcu_read_lock();
+	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+	if (likely(ulp_ops)) {
+		ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
+					  cqes, num_cqes);
+	}
+	rcu_read_unlock();
+}
+
+static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
+	int hq_bds, pages;
+	u32 pfid = cp->pfid;
+
+	cp->num_iscsi_tasks = req1->num_tasks_per_conn;
+	cp->num_ccells = req1->num_ccells_per_conn;
+	cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
+			      cp->num_iscsi_tasks;
+	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
+			BNX2X_ISCSI_R2TQE_SIZE;
+	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
+	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
+	hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
+	cp->num_cqs = req1->num_cqs;
+
+	if (!dev->max_iscsi_conn)
+		return 0;
+
+	/* init Tstorm RAM */
+	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
+		  req1->rq_num_wqes);
+	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
+		  PAGE_SIZE);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
+		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
+		  req1->num_tasks_per_conn);
+
+	/* init Ustorm RAM */
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
+		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
+		  req1->rq_buffer_size);
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
+		  PAGE_SIZE);
+	CNIC_WR8(dev, BAR_USTRORM_INTMEM +
+		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
+		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
+		  req1->num_tasks_per_conn);
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
+		  req1->rq_num_wqes);
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
+		  req1->cq_num_wqes);
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
+		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
+
+	/* init Xstorm RAM */
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
+		  PAGE_SIZE);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
+		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
+		  req1->num_tasks_per_conn);
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
+		  hq_bds);
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
+		  req1->num_tasks_per_conn);
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
+		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
+
+	/* init Cstorm RAM */
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
+		  PAGE_SIZE);
+	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
+		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
+		  req1->num_tasks_per_conn);
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
+		  req1->cq_num_wqes);
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
+		  hq_bds);
+
+	return 0;
+}
+
+static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 pfid = cp->pfid;
+	struct iscsi_kcqe kcqe;
+	struct kcqe *cqes[1];
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	if (!dev->max_iscsi_conn) {
+		kcqe.completion_status =
+			ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
+		goto done;
+	}
+
+	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
+	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
+		req2->error_bit_map[1]);
+
+	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
+		  USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
+		req2->error_bit_map[1]);
+
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
+		  CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
+
+	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
+
+done:
+	kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
+
+	return 0;
+}
+
+static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+	if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
+		struct cnic_iscsi *iscsi = ctx->proto.iscsi;
+
+		cnic_free_dma(dev, &iscsi->hq_info);
+		cnic_free_dma(dev, &iscsi->r2tq_info);
+		cnic_free_dma(dev, &iscsi->task_array_info);
+		cnic_free_id(&cp->cid_tbl, ctx->cid);
+	} else {
+		cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
+	}
+
+	ctx->cid = 0;
+}
+
+static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
+{
+	u32 cid;
+	int ret, pages;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
+
+	if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
+		cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
+		if (cid == -1) {
+			ret = -ENOMEM;
+			goto error;
+		}
+		ctx->cid = cid;
+		return 0;
+	}
+
+	cid = cnic_alloc_new_id(&cp->cid_tbl);
+	if (cid == -1) {
+		ret = -ENOMEM;
+		goto error;
+	}
+
+	ctx->cid = cid;
+	pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
+
+	ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
+	if (ret)
+		goto error;
+
+	pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
+	ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
+	if (ret)
+		goto error;
+
+	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
+	ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
+	if (ret)
+		goto error;
+
+	return 0;
+
+error:
+	cnic_free_bnx2x_conn_resc(dev, l5_cid);
+	return ret;
+}
+
+static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
+				struct regpair *ctx_addr)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
+	int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
+	unsigned long align_off = 0;
+	dma_addr_t ctx_map;
+	void *ctx;
+
+	if (cp->ctx_align) {
+		unsigned long mask = cp->ctx_align - 1;
+
+		if (cp->ctx_arr[blk].mapping & mask)
+			align_off = cp->ctx_align -
+				    (cp->ctx_arr[blk].mapping & mask);
+	}
+	ctx_map = cp->ctx_arr[blk].mapping + align_off +
+		(off * BNX2X_CONTEXT_MEM_SIZE);
+	ctx = cp->ctx_arr[blk].ctx + align_off +
+	      (off * BNX2X_CONTEXT_MEM_SIZE);
+	if (init)
+		memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
+
+	ctx_addr->lo = ctx_map & 0xffffffff;
+	ctx_addr->hi = (u64) ctx_map >> 32;
+	return ctx;
+}
+
+static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
+				u32 num)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct iscsi_kwqe_conn_offload1 *req1 =
+			(struct iscsi_kwqe_conn_offload1 *) wqes[0];
+	struct iscsi_kwqe_conn_offload2 *req2 =
+			(struct iscsi_kwqe_conn_offload2 *) wqes[1];
+	struct iscsi_kwqe_conn_offload3 *req3;
+	struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
+	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
+	u32 cid = ctx->cid;
+	u32 hw_cid = BNX2X_HW_CID(cp, cid);
+	struct iscsi_context *ictx;
+	struct regpair context_addr;
+	int i, j, n = 2, n_max;
+	u8 port = CNIC_PORT(cp);
+
+	ctx->ctx_flags = 0;
+	if (!req2->num_additional_wqes)
+		return -EINVAL;
+
+	n_max = req2->num_additional_wqes + 2;
+
+	ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
+	if (ictx == NULL)
+		return -ENOMEM;
+
+	req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
+
+	ictx->xstorm_ag_context.hq_prod = 1;
+
+	ictx->xstorm_st_context.iscsi.first_burst_length =
+		ISCSI_DEF_FIRST_BURST_LEN;
+	ictx->xstorm_st_context.iscsi.max_send_pdu_length =
+		ISCSI_DEF_MAX_RECV_SEG_LEN;
+	ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
+		req1->sq_page_table_addr_lo;
+	ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
+		req1->sq_page_table_addr_hi;
+	ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
+	ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
+	ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
+		iscsi->hq_info.pgtbl_map & 0xffffffff;
+	ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
+		(u64) iscsi->hq_info.pgtbl_map >> 32;
+	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
+		iscsi->hq_info.pgtbl[0];
+	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
+		iscsi->hq_info.pgtbl[1];
+	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
+		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
+	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
+		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
+	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
+		iscsi->r2tq_info.pgtbl[0];
+	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
+		iscsi->r2tq_info.pgtbl[1];
+	ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
+		iscsi->task_array_info.pgtbl_map & 0xffffffff;
+	ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
+		(u64) iscsi->task_array_info.pgtbl_map >> 32;
+	ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
+		BNX2X_ISCSI_PBL_NOT_CACHED;
+	ictx->xstorm_st_context.iscsi.flags.flags |=
+		XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
+	ictx->xstorm_st_context.iscsi.flags.flags |=
+		XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
+	ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
+		ETH_P_8021Q;
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
+		cp->port_mode == CHIP_2_PORT_MODE) {
+
+		port = 0;
+	}
+	ictx->xstorm_st_context.common.flags =
+		1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
+	ictx->xstorm_st_context.common.flags =
+		port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
+
+	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
+	/* TSTORM requires the base address of RQ DB & not PTE */
+	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
+		req2->rq_page_table_addr_lo & PAGE_MASK;
+	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
+		req2->rq_page_table_addr_hi;
+	ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
+	ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
+	ictx->tstorm_st_context.tcp.flags2 |=
+		TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
+	ictx->tstorm_st_context.tcp.ooo_support_mode =
+		TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
+
+	ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
+
+	ictx->ustorm_st_context.ring.rq.pbl_base.lo =
+		req2->rq_page_table_addr_lo;
+	ictx->ustorm_st_context.ring.rq.pbl_base.hi =
+		req2->rq_page_table_addr_hi;
+	ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
+	ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
+	ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
+		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
+	ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
+		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
+	ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
+		iscsi->r2tq_info.pgtbl[0];
+	ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
+		iscsi->r2tq_info.pgtbl[1];
+	ictx->ustorm_st_context.ring.cq_pbl_base.lo =
+		req1->cq_page_table_addr_lo;
+	ictx->ustorm_st_context.ring.cq_pbl_base.hi =
+		req1->cq_page_table_addr_hi;
+	ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
+	ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
+	ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
+	ictx->ustorm_st_context.task_pbe_cache_index =
+		BNX2X_ISCSI_PBL_NOT_CACHED;
+	ictx->ustorm_st_context.task_pdu_cache_index =
+		BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
+
+	for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
+		if (j == 3) {
+			if (n >= n_max)
+				break;
+			req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
+			j = 0;
+		}
+		ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
+		ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
+			req3->qp_first_pte[j].hi;
+		ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
+			req3->qp_first_pte[j].lo;
+	}
+
+	ictx->ustorm_st_context.task_pbl_base.lo =
+		iscsi->task_array_info.pgtbl_map & 0xffffffff;
+	ictx->ustorm_st_context.task_pbl_base.hi =
+		(u64) iscsi->task_array_info.pgtbl_map >> 32;
+	ictx->ustorm_st_context.tce_phy_addr.lo =
+		iscsi->task_array_info.pgtbl[0];
+	ictx->ustorm_st_context.tce_phy_addr.hi =
+		iscsi->task_array_info.pgtbl[1];
+	ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
+	ictx->ustorm_st_context.num_cqs = cp->num_cqs;
+	ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
+	ictx->ustorm_st_context.negotiated_rx_and_flags |=
+		ISCSI_DEF_MAX_BURST_LEN;
+	ictx->ustorm_st_context.negotiated_rx |=
+		ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
+		USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
+
+	ictx->cstorm_st_context.hq_pbl_base.lo =
+		iscsi->hq_info.pgtbl_map & 0xffffffff;
+	ictx->cstorm_st_context.hq_pbl_base.hi =
+		(u64) iscsi->hq_info.pgtbl_map >> 32;
+	ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
+	ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
+	ictx->cstorm_st_context.task_pbl_base.lo =
+		iscsi->task_array_info.pgtbl_map & 0xffffffff;
+	ictx->cstorm_st_context.task_pbl_base.hi =
+		(u64) iscsi->task_array_info.pgtbl_map >> 32;
+	/* CSTORM and USTORM initialization is different, CSTORM requires
+	 * CQ DB base & not PTE addr */
+	ictx->cstorm_st_context.cq_db_base.lo =
+		req1->cq_page_table_addr_lo & PAGE_MASK;
+	ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
+	ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
+	ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
+	for (i = 0; i < cp->num_cqs; i++) {
+		ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
+			ISCSI_INITIAL_SN;
+		ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
+			ISCSI_INITIAL_SN;
+	}
+
+	ictx->xstorm_ag_context.cdu_reserved =
+		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
+				       ISCSI_CONNECTION_TYPE);
+	ictx->ustorm_ag_context.cdu_usage =
+		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
+				       ISCSI_CONNECTION_TYPE);
+	return 0;
+
+}
+
+static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
+				   u32 num, int *work)
+{
+	struct iscsi_kwqe_conn_offload1 *req1;
+	struct iscsi_kwqe_conn_offload2 *req2;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_context *ctx;
+	struct iscsi_kcqe kcqe;
+	struct kcqe *cqes[1];
+	u32 l5_cid;
+	int ret = 0;
+
+	if (num < 2) {
+		*work = num;
+		return -EINVAL;
+	}
+
+	req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
+	req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
+	if ((num - 2) < req2->num_additional_wqes) {
+		*work = num;
+		return -EINVAL;
+	}
+	*work = 2 + req2->num_additional_wqes;
+
+	l5_cid = req1->iscsi_conn_id;
+	if (l5_cid >= MAX_ISCSI_TBL_SZ)
+		return -EINVAL;
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
+	kcqe.iscsi_conn_id = l5_cid;
+	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
+
+	ctx = &cp->ctx_tbl[l5_cid];
+	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
+		kcqe.completion_status =
+			ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
+		goto done;
+	}
+
+	if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
+		atomic_dec(&cp->iscsi_conn);
+		goto done;
+	}
+	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
+	if (ret) {
+		atomic_dec(&cp->iscsi_conn);
+		ret = 0;
+		goto done;
+	}
+	ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
+	if (ret < 0) {
+		cnic_free_bnx2x_conn_resc(dev, l5_cid);
+		atomic_dec(&cp->iscsi_conn);
+		goto done;
+	}
+
+	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
+	kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
+
+done:
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
+	return ret;
+}
+
+
+static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct iscsi_kwqe_conn_update *req =
+		(struct iscsi_kwqe_conn_update *) kwqe;
+	void *data;
+	union l5cm_specific_data l5_data;
+	u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
+	int ret;
+
+	if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
+		return -EINVAL;
+
+	data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+	if (!data)
+		return -ENOMEM;
+
+	memcpy(data, kwqe, sizeof(struct kwqe));
+
+	ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
+			req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+
+static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+	union l5cm_specific_data l5_data;
+	int ret;
+	u32 hw_cid;
+
+	init_waitqueue_head(&ctx->waitq);
+	ctx->wait_cond = 0;
+	memset(&l5_data, 0, sizeof(l5_data));
+	hw_cid = BNX2X_HW_CID(cp, ctx->cid);
+
+	ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
+				  hw_cid, NONE_CONNECTION_TYPE, &l5_data);
+
+	if (ret == 0) {
+		wait_event(ctx->waitq, ctx->wait_cond);
+		if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
+			return -EBUSY;
+	}
+
+	return ret;
+}
+
+static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct iscsi_kwqe_conn_destroy *req =
+		(struct iscsi_kwqe_conn_destroy *) kwqe;
+	u32 l5_cid = req->reserved0;
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+	int ret = 0;
+	struct iscsi_kcqe kcqe;
+	struct kcqe *cqes[1];
+
+	if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+		goto skip_cfc_delete;
+
+	if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
+		unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
+
+		if (delta > (2 * HZ))
+			delta = 0;
+
+		set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
+		queue_delayed_work(cnic_wq, &cp->delete_task, delta);
+		goto destroy_reply;
+	}
+
+	ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
+
+skip_cfc_delete:
+	cnic_free_bnx2x_conn_resc(dev, l5_cid);
+
+	if (!ret) {
+		atomic_dec(&cp->iscsi_conn);
+		clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+	}
+
+destroy_reply:
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
+	kcqe.iscsi_conn_id = l5_cid;
+	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
+	kcqe.iscsi_conn_context_id = req->context_id;
+
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
+
+	return ret;
+}
+
+static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
+				      struct l4_kwq_connect_req1 *kwqe1,
+				      struct l4_kwq_connect_req3 *kwqe3,
+				      struct l5cm_active_conn_buffer *conn_buf)
+{
+	struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
+	struct l5cm_xstorm_conn_buffer *xstorm_buf =
+		&conn_buf->xstorm_conn_buffer;
+	struct l5cm_tstorm_conn_buffer *tstorm_buf =
+		&conn_buf->tstorm_conn_buffer;
+	struct regpair context_addr;
+	u32 cid = BNX2X_SW_CID(kwqe1->cid);
+	struct in6_addr src_ip, dst_ip;
+	int i;
+	u32 *addrp;
+
+	addrp = (u32 *) &conn_addr->local_ip_addr;
+	for (i = 0; i < 4; i++, addrp++)
+		src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
+
+	addrp = (u32 *) &conn_addr->remote_ip_addr;
+	for (i = 0; i < 4; i++, addrp++)
+		dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
+
+	cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
+
+	xstorm_buf->context_addr.hi = context_addr.hi;
+	xstorm_buf->context_addr.lo = context_addr.lo;
+	xstorm_buf->mss = 0xffff;
+	xstorm_buf->rcv_buf = kwqe3->rcv_buf;
+	if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
+		xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
+	xstorm_buf->pseudo_header_checksum =
+		swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
+
+	if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
+		tstorm_buf->params |=
+			L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
+	if (kwqe3->ka_timeout) {
+		tstorm_buf->ka_enable = 1;
+		tstorm_buf->ka_timeout = kwqe3->ka_timeout;
+		tstorm_buf->ka_interval = kwqe3->ka_interval;
+		tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
+	}
+	tstorm_buf->max_rt_time = 0xffffffff;
+}
+
+static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 pfid = cp->pfid;
+	u8 *mac = dev->mac_addr;
+
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
+
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
+		 mac[4]);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
+		 mac[2]);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
+	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
+		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
+		 mac[0]);
+}
+
+static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
+	u16 tstorm_flags = 0;
+
+	if (tcp_ts) {
+		xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
+	}
+
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
+
+	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
+		  TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
+}
+
+static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
+			      u32 num, int *work)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct l4_kwq_connect_req1 *kwqe1 =
+		(struct l4_kwq_connect_req1 *) wqes[0];
+	struct l4_kwq_connect_req3 *kwqe3;
+	struct l5cm_active_conn_buffer *conn_buf;
+	struct l5cm_conn_addr_params *conn_addr;
+	union l5cm_specific_data l5_data;
+	u32 l5_cid = kwqe1->pg_cid;
+	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+	int ret;
+
+	if (num < 2) {
+		*work = num;
+		return -EINVAL;
+	}
+
+	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
+		*work = 3;
+	else
+		*work = 2;
+
+	if (num < *work) {
+		*work = num;
+		return -EINVAL;
+	}
+
+	if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
+		netdev_err(dev->netdev, "conn_buf size too big\n");
+		return -ENOMEM;
+	}
+	conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+	if (!conn_buf)
+		return -ENOMEM;
+
+	memset(conn_buf, 0, sizeof(*conn_buf));
+
+	conn_addr = &conn_buf->conn_addr_buf;
+	conn_addr->remote_addr_0 = csk->ha[0];
+	conn_addr->remote_addr_1 = csk->ha[1];
+	conn_addr->remote_addr_2 = csk->ha[2];
+	conn_addr->remote_addr_3 = csk->ha[3];
+	conn_addr->remote_addr_4 = csk->ha[4];
+	conn_addr->remote_addr_5 = csk->ha[5];
+
+	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
+		struct l4_kwq_connect_req2 *kwqe2 =
+			(struct l4_kwq_connect_req2 *) wqes[1];
+
+		conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
+		conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
+		conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
+
+		conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
+		conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
+		conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
+		conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
+	}
+	kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
+
+	conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
+	conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
+	conn_addr->local_tcp_port = kwqe1->src_port;
+	conn_addr->remote_tcp_port = kwqe1->dst_port;
+
+	conn_addr->pmtu = kwqe3->pmtu;
+	cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
+
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
+		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
+
+	cnic_bnx2x_set_tcp_timestamp(dev,
+		kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
+
+	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
+			kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
+	if (!ret)
+		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+
+	return ret;
+}
+
+static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
+	union l5cm_specific_data l5_data;
+	int ret;
+
+	memset(&l5_data, 0, sizeof(l5_data));
+	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
+			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+
+static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
+	union l5cm_specific_data l5_data;
+	int ret;
+
+	memset(&l5_data, 0, sizeof(l5_data));
+	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
+			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
+	struct l4_kcq kcqe;
+	struct kcqe *cqes[1];
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.pg_host_opaque = req->host_opaque;
+	kcqe.pg_cid = req->host_opaque;
+	kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
+	return 0;
+}
+
+static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
+	struct l4_kcq kcqe;
+	struct kcqe *cqes[1];
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.pg_host_opaque = req->pg_host_opaque;
+	kcqe.pg_cid = req->pg_cid;
+	kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
+	return 0;
+}
+
+static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct fcoe_kwqe_stat *req;
+	struct fcoe_stat_ramrod_params *fcoe_stat;
+	union l5cm_specific_data l5_data;
+	struct cnic_local *cp = dev->cnic_priv;
+	int ret;
+	u32 cid;
+
+	req = (struct fcoe_kwqe_stat *) kwqe;
+	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+
+	fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
+	if (!fcoe_stat)
+		return -ENOMEM;
+
+	memset(fcoe_stat, 0, sizeof(*fcoe_stat));
+	memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
+
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
+				  FCOE_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+
+static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
+				 u32 num, int *work)
+{
+	int ret;
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 cid;
+	struct fcoe_init_ramrod_params *fcoe_init;
+	struct fcoe_kwqe_init1 *req1;
+	struct fcoe_kwqe_init2 *req2;
+	struct fcoe_kwqe_init3 *req3;
+	union l5cm_specific_data l5_data;
+
+	if (num < 3) {
+		*work = num;
+		return -EINVAL;
+	}
+	req1 = (struct fcoe_kwqe_init1 *) wqes[0];
+	req2 = (struct fcoe_kwqe_init2 *) wqes[1];
+	req3 = (struct fcoe_kwqe_init3 *) wqes[2];
+	if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
+		*work = 1;
+		return -EINVAL;
+	}
+	if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
+		*work = 2;
+		return -EINVAL;
+	}
+
+	if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
+		netdev_err(dev->netdev, "fcoe_init size too big\n");
+		return -ENOMEM;
+	}
+	fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
+	if (!fcoe_init)
+		return -ENOMEM;
+
+	memset(fcoe_init, 0, sizeof(*fcoe_init));
+	memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
+	memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
+	memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
+	fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
+	fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
+	fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
+
+	fcoe_init->sb_num = cp->status_blk_num;
+	fcoe_init->eq_prod = MAX_KCQ_IDX;
+	fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
+	cp->kcq2.sw_prod_idx = 0;
+
+	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
+				  FCOE_CONNECTION_TYPE, &l5_data);
+	*work = 3;
+	return ret;
+}
+
+static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
+				 u32 num, int *work)
+{
+	int ret = 0;
+	u32 cid = -1, l5_cid;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct fcoe_kwqe_conn_offload1 *req1;
+	struct fcoe_kwqe_conn_offload2 *req2;
+	struct fcoe_kwqe_conn_offload3 *req3;
+	struct fcoe_kwqe_conn_offload4 *req4;
+	struct fcoe_conn_offload_ramrod_params *fcoe_offload;
+	struct cnic_context *ctx;
+	struct fcoe_context *fctx;
+	struct regpair ctx_addr;
+	union l5cm_specific_data l5_data;
+	struct fcoe_kcqe kcqe;
+	struct kcqe *cqes[1];
+
+	if (num < 4) {
+		*work = num;
+		return -EINVAL;
+	}
+	req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
+	req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
+	req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
+	req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
+
+	*work = 4;
+
+	l5_cid = req1->fcoe_conn_id;
+	if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
+		goto err_reply;
+
+	l5_cid += BNX2X_FCOE_L5_CID_BASE;
+
+	ctx = &cp->ctx_tbl[l5_cid];
+	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+		goto err_reply;
+
+	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
+	if (ret) {
+		ret = 0;
+		goto err_reply;
+	}
+	cid = ctx->cid;
+
+	fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
+	if (fctx) {
+		u32 hw_cid = BNX2X_HW_CID(cp, cid);
+		u32 val;
+
+		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
+					     FCOE_CONNECTION_TYPE);
+		fctx->xstorm_ag_context.cdu_reserved = val;
+		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
+					     FCOE_CONNECTION_TYPE);
+		fctx->ustorm_ag_context.cdu_usage = val;
+	}
+	if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
+		netdev_err(dev->netdev, "fcoe_offload size too big\n");
+		goto err_reply;
+	}
+	fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+	if (!fcoe_offload)
+		goto err_reply;
+
+	memset(fcoe_offload, 0, sizeof(*fcoe_offload));
+	memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
+	memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
+	memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
+	memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
+
+	cid = BNX2X_HW_CID(cp, cid);
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
+				  FCOE_CONNECTION_TYPE, &l5_data);
+	if (!ret)
+		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+
+	return ret;
+
+err_reply:
+	if (cid != -1)
+		cnic_free_bnx2x_conn_resc(dev, l5_cid);
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
+	kcqe.fcoe_conn_id = req1->fcoe_conn_id;
+	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
+
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
+	return ret;
+}
+
+static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct fcoe_kwqe_conn_enable_disable *req;
+	struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
+	union l5cm_specific_data l5_data;
+	int ret;
+	u32 cid, l5_cid;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
+	cid = req->context_id;
+	l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
+
+	if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
+		netdev_err(dev->netdev, "fcoe_enable size too big\n");
+		return -ENOMEM;
+	}
+	fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+	if (!fcoe_enable)
+		return -ENOMEM;
+
+	memset(fcoe_enable, 0, sizeof(*fcoe_enable));
+	memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
+				  FCOE_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+
+static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct fcoe_kwqe_conn_enable_disable *req;
+	struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
+	union l5cm_specific_data l5_data;
+	int ret;
+	u32 cid, l5_cid;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
+	cid = req->context_id;
+	l5_cid = req->conn_id;
+	if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
+		return -EINVAL;
+
+	l5_cid += BNX2X_FCOE_L5_CID_BASE;
+
+	if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
+		netdev_err(dev->netdev, "fcoe_disable size too big\n");
+		return -ENOMEM;
+	}
+	fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
+	if (!fcoe_disable)
+		return -ENOMEM;
+
+	memset(fcoe_disable, 0, sizeof(*fcoe_disable));
+	memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
+				  FCOE_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+
+static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct fcoe_kwqe_conn_destroy *req;
+	union l5cm_specific_data l5_data;
+	int ret;
+	u32 cid, l5_cid;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_context *ctx;
+	struct fcoe_kcqe kcqe;
+	struct kcqe *cqes[1];
+
+	req = (struct fcoe_kwqe_conn_destroy *) kwqe;
+	cid = req->context_id;
+	l5_cid = req->conn_id;
+	if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
+		return -EINVAL;
+
+	l5_cid += BNX2X_FCOE_L5_CID_BASE;
+
+	ctx = &cp->ctx_tbl[l5_cid];
+
+	init_waitqueue_head(&ctx->waitq);
+	ctx->wait_cond = 0;
+
+	memset(&l5_data, 0, sizeof(l5_data));
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
+				  FCOE_CONNECTION_TYPE, &l5_data);
+	if (ret == 0) {
+		wait_event(ctx->waitq, ctx->wait_cond);
+		set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
+		queue_delayed_work(cnic_wq, &cp->delete_task,
+				   msecs_to_jiffies(2000));
+	}
+
+	memset(&kcqe, 0, sizeof(kcqe));
+	kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
+	kcqe.fcoe_conn_id = req->conn_id;
+	kcqe.fcoe_conn_context_id = cid;
+
+	cqes[0] = (struct kcqe *) &kcqe;
+	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
+	return ret;
+}
+
+static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 i;
+
+	for (i = start_cid; i < cp->max_cid_space; i++) {
+		struct cnic_context *ctx = &cp->ctx_tbl[i];
+		int j;
+
+		while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
+			msleep(10);
+
+		for (j = 0; j < 5; j++) {
+			if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+				break;
+			msleep(20);
+		}
+
+		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+			netdev_warn(dev->netdev, "CID %x not deleted\n",
+				   ctx->cid);
+	}
+}
+
+static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+	struct fcoe_kwqe_destroy *req;
+	union l5cm_specific_data l5_data;
+	struct cnic_local *cp = dev->cnic_priv;
+	int ret;
+	u32 cid;
+
+	cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
+
+	req = (struct fcoe_kwqe_destroy *) kwqe;
+	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
+
+	memset(&l5_data, 0, sizeof(l5_data));
+	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
+				  FCOE_CONNECTION_TYPE, &l5_data);
+	return ret;
+}
+
+static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
+					 struct kwqe *wqes[], u32 num_wqes)
+{
+	int i, work, ret;
+	u32 opcode;
+	struct kwqe *kwqe;
+
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EAGAIN;		/* bnx2 is down */
+
+	for (i = 0; i < num_wqes; ) {
+		kwqe = wqes[i];
+		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
+		work = 1;
+
+		switch (opcode) {
+		case ISCSI_KWQE_OPCODE_INIT1:
+			ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
+			break;
+		case ISCSI_KWQE_OPCODE_INIT2:
+			ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
+			break;
+		case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
+			ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
+						     num_wqes - i, &work);
+			break;
+		case ISCSI_KWQE_OPCODE_UPDATE_CONN:
+			ret = cnic_bnx2x_iscsi_update(dev, kwqe);
+			break;
+		case ISCSI_KWQE_OPCODE_DESTROY_CONN:
+			ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
+			break;
+		case L4_KWQE_OPCODE_VALUE_CONNECT1:
+			ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
+						 &work);
+			break;
+		case L4_KWQE_OPCODE_VALUE_CLOSE:
+			ret = cnic_bnx2x_close(dev, kwqe);
+			break;
+		case L4_KWQE_OPCODE_VALUE_RESET:
+			ret = cnic_bnx2x_reset(dev, kwqe);
+			break;
+		case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
+			ret = cnic_bnx2x_offload_pg(dev, kwqe);
+			break;
+		case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
+			ret = cnic_bnx2x_update_pg(dev, kwqe);
+			break;
+		case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
+			ret = 0;
+			break;
+		default:
+			ret = 0;
+			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
+				   opcode);
+			break;
+		}
+		if (ret < 0)
+			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
+				   opcode);
+		i += work;
+	}
+	return 0;
+}
+
+static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
+					struct kwqe *wqes[], u32 num_wqes)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i, work, ret;
+	u32 opcode;
+	struct kwqe *kwqe;
+
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EAGAIN;		/* bnx2 is down */
+
+	if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+		return -EINVAL;
+
+	for (i = 0; i < num_wqes; ) {
+		kwqe = wqes[i];
+		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
+		work = 1;
+
+		switch (opcode) {
+		case FCOE_KWQE_OPCODE_INIT1:
+			ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
+						    num_wqes - i, &work);
+			break;
+		case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
+			ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
+						    num_wqes - i, &work);
+			break;
+		case FCOE_KWQE_OPCODE_ENABLE_CONN:
+			ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
+			break;
+		case FCOE_KWQE_OPCODE_DISABLE_CONN:
+			ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
+			break;
+		case FCOE_KWQE_OPCODE_DESTROY_CONN:
+			ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
+			break;
+		case FCOE_KWQE_OPCODE_DESTROY:
+			ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
+			break;
+		case FCOE_KWQE_OPCODE_STAT:
+			ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
+			break;
+		default:
+			ret = 0;
+			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
+				   opcode);
+			break;
+		}
+		if (ret < 0)
+			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
+				   opcode);
+		i += work;
+	}
+	return 0;
+}
+
+static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
+				   u32 num_wqes)
+{
+	int ret = -EINVAL;
+	u32 layer_code;
+
+	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EAGAIN;		/* bnx2x is down */
+
+	if (!num_wqes)
+		return 0;
+
+	layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
+	switch (layer_code) {
+	case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
+	case KWQE_FLAGS_LAYER_MASK_L4:
+	case KWQE_FLAGS_LAYER_MASK_L2:
+		ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
+		break;
+
+	case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
+		ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
+		break;
+	}
+	return ret;
+}
+
+static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
+{
+	if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
+		return KCQE_FLAGS_LAYER_MASK_L4;
+
+	return opflag & KCQE_FLAGS_LAYER_MASK;
+}
+
+static void service_kcqes(struct cnic_dev *dev, int num_cqes)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i, j, comp = 0;
+
+	i = 0;
+	j = 1;
+	while (num_cqes) {
+		struct cnic_ulp_ops *ulp_ops;
+		int ulp_type;
+		u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
+		u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
+
+		if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
+			comp++;
+
+		while (j < num_cqes) {
+			u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
+
+			if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
+				break;
+
+			if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
+				comp++;
+			j++;
+		}
+
+		if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
+			ulp_type = CNIC_ULP_RDMA;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
+			ulp_type = CNIC_ULP_ISCSI;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
+			ulp_type = CNIC_ULP_FCOE;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
+			ulp_type = CNIC_ULP_L4;
+		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
+			goto end;
+		else {
+			netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
+				   kcqe_op_flag);
+			goto end;
+		}
+
+		rcu_read_lock();
+		ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+		if (likely(ulp_ops)) {
+			ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
+						  cp->completed_kcq + i, j);
+		}
+		rcu_read_unlock();
+end:
+		num_cqes -= j;
+		i += j;
+		j = 1;
+	}
+	if (unlikely(comp))
+		cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
+}
+
+static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u16 i, ri, hw_prod, last;
+	struct kcqe *kcqe;
+	int kcqe_cnt = 0, last_cnt = 0;
+
+	i = ri = last = info->sw_prod_idx;
+	ri &= MAX_KCQ_IDX;
+	hw_prod = *info->hw_prod_idx_ptr;
+	hw_prod = info->hw_idx(hw_prod);
+
+	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
+		kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
+		cp->completed_kcq[kcqe_cnt++] = kcqe;
+		i = info->next_idx(i);
+		ri = i & MAX_KCQ_IDX;
+		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
+			last_cnt = kcqe_cnt;
+			last = i;
+		}
+	}
+
+	info->sw_prod_idx = last;
+	return last_cnt;
+}
+
+static int cnic_l2_completion(struct cnic_local *cp)
+{
+	u16 hw_cons, sw_cons;
+	struct cnic_uio_dev *udev = cp->udev;
+	union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
+					(udev->l2_ring + (2 * BCM_PAGE_SIZE));
+	u32 cmd;
+	int comp = 0;
+
+	if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
+		return 0;
+
+	hw_cons = *cp->rx_cons_ptr;
+	if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
+		hw_cons++;
+
+	sw_cons = cp->rx_cons;
+	while (sw_cons != hw_cons) {
+		u8 cqe_fp_flags;
+
+		cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
+		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+		if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
+			cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
+			cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
+			if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
+			    cmd == RAMROD_CMD_ID_ETH_HALT)
+				comp++;
+		}
+		sw_cons = BNX2X_NEXT_RCQE(sw_cons);
+	}
+	return comp;
+}
+
+static void cnic_chk_pkt_rings(struct cnic_local *cp)
+{
+	u16 rx_cons, tx_cons;
+	int comp = 0;
+
+	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
+		return;
+
+	rx_cons = *cp->rx_cons_ptr;
+	tx_cons = *cp->tx_cons_ptr;
+	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
+		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+			comp = cnic_l2_completion(cp);
+
+		cp->tx_cons = tx_cons;
+		cp->rx_cons = rx_cons;
+
+		if (cp->udev)
+			uio_event_notify(&cp->udev->cnic_uinfo);
+	}
+	if (comp)
+		clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
+}
+
+static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
+	int kcqe_cnt;
+
+	/* status block index must be read before reading other fields */
+	rmb();
+	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+
+	while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
+
+		service_kcqes(dev, kcqe_cnt);
+
+		/* Tell compiler that status_blk fields can change. */
+		barrier();
+		status_idx = (u16) *cp->kcq1.status_idx_ptr;
+		/* status block index must be read first */
+		rmb();
+		cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+	}
+
+	CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
+
+	cnic_chk_pkt_rings(cp);
+
+	return status_idx;
+}
+
+static int cnic_service_bnx2(void *data, void *status_blk)
+{
+	struct cnic_dev *dev = data;
+
+	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
+		struct status_block *sblk = status_blk;
+
+		return sblk->status_idx;
+	}
+
+	return cnic_service_bnx2_queues(dev);
+}
+
+static void cnic_service_bnx2_msix(unsigned long data)
+{
+	struct cnic_dev *dev = (struct cnic_dev *) data;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cp->last_status_idx = cnic_service_bnx2_queues(dev);
+
+	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static void cnic_doirq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
+		u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
+
+		prefetch(cp->status_blk.gen);
+		prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+
+		tasklet_schedule(&cp->cnic_irq_task);
+	}
+}
+
+static irqreturn_t cnic_irq(int irq, void *dev_instance)
+{
+	struct cnic_dev *dev = dev_instance;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (cp->ack_int)
+		cp->ack_int(dev);
+
+	cnic_doirq(dev);
+
+	return IRQ_HANDLED;
+}
+
+static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
+				      u16 index, u8 op, u8 update)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
+		       COMMAND_REG_INT_ACK);
+	struct igu_ack_register igu_ack;
+
+	igu_ack.status_block_index = index;
+	igu_ack.sb_id_and_flags =
+			((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
+			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
+			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
+			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
+
+	CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
+}
+
+static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
+			    u16 index, u8 op, u8 update)
+{
+	struct igu_regular cmd_data;
+	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
+
+	cmd_data.sb_id_and_flags =
+		(index << IGU_REGULAR_SB_INDEX_SHIFT) |
+		(segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
+		(update << IGU_REGULAR_BUPDATE_SHIFT) |
+		(op << IGU_REGULAR_ENABLE_INT_SHIFT);
+
+
+	CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
+}
+
+static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
+			   IGU_INT_DISABLE, 0);
+}
+
+static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
+			IGU_INT_DISABLE, 0);
+}
+
+static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
+{
+	u32 last_status = *info->status_idx_ptr;
+	int kcqe_cnt;
+
+	/* status block index must be read before reading the KCQ */
+	rmb();
+	while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
+
+		service_kcqes(dev, kcqe_cnt);
+
+		/* Tell compiler that sblk fields can change. */
+		barrier();
+
+		last_status = *info->status_idx_ptr;
+		/* status block index must be read before reading the KCQ */
+		rmb();
+	}
+	return last_status;
+}
+
+static void cnic_service_bnx2x_bh(unsigned long data)
+{
+	struct cnic_dev *dev = (struct cnic_dev *) data;
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 status_idx, new_status_idx;
+
+	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+		return;
+
+	while (1) {
+		status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
+
+		CNIC_WR16(dev, cp->kcq1.io_addr,
+			  cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
+
+		if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+			cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
+					   status_idx, IGU_INT_ENABLE, 1);
+			break;
+		}
+
+		new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
+
+		if (new_status_idx != status_idx)
+			continue;
+
+		CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
+			  MAX_KCQ_IDX);
+
+		cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
+				status_idx, IGU_INT_ENABLE, 1);
+
+		break;
+	}
+}
+
+static int cnic_service_bnx2x(void *data, void *status_blk)
+{
+	struct cnic_dev *dev = data;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+		cnic_doirq(dev);
+
+	cnic_chk_pkt_rings(cp);
+
+	return 0;
+}
+
+static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
+{
+	struct cnic_ulp_ops *ulp_ops;
+
+	if (if_type == CNIC_ULP_ISCSI)
+		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+
+	mutex_lock(&cnic_lock);
+	ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+					    lockdep_is_held(&cnic_lock));
+	if (!ulp_ops) {
+		mutex_unlock(&cnic_lock);
+		return;
+	}
+	set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+	mutex_unlock(&cnic_lock);
+
+	if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+		ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
+
+	clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+}
+
+static void cnic_ulp_stop(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int if_type;
+
+	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
+		cnic_ulp_stop_one(cp, if_type);
+}
+
+static void cnic_ulp_start(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int if_type;
+
+	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		mutex_lock(&cnic_lock);
+		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+						    lockdep_is_held(&cnic_lock));
+		if (!ulp_ops || !ulp_ops->cnic_start) {
+			mutex_unlock(&cnic_lock);
+			continue;
+		}
+		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+		mutex_unlock(&cnic_lock);
+
+		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
+
+		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+	}
+}
+
+static int cnic_ctl(void *data, struct cnic_ctl_info *info)
+{
+	struct cnic_dev *dev = data;
+
+	switch (info->cmd) {
+	case CNIC_CTL_STOP_CMD:
+		cnic_hold(dev);
+
+		cnic_ulp_stop(dev);
+		cnic_stop_hw(dev);
+
+		cnic_put(dev);
+		break;
+	case CNIC_CTL_START_CMD:
+		cnic_hold(dev);
+
+		if (!cnic_start_hw(dev))
+			cnic_ulp_start(dev);
+
+		cnic_put(dev);
+		break;
+	case CNIC_CTL_STOP_ISCSI_CMD: {
+		struct cnic_local *cp = dev->cnic_priv;
+		set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
+		queue_delayed_work(cnic_wq, &cp->delete_task, 0);
+		break;
+	}
+	case CNIC_CTL_COMPLETION_CMD: {
+		struct cnic_ctl_completion *comp = &info->data.comp;
+		u32 cid = BNX2X_SW_CID(comp->cid);
+		u32 l5_cid;
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
+			struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+			if (unlikely(comp->error)) {
+				set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
+				netdev_err(dev->netdev,
+					   "CID %x CFC delete comp error %x\n",
+					   cid, comp->error);
+			}
+
+			ctx->wait_cond = 1;
+			wake_up(&ctx->waitq);
+		}
+		break;
+	}
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void cnic_ulp_init(struct cnic_dev *dev)
+{
+	int i;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		mutex_lock(&cnic_lock);
+		ulp_ops = cnic_ulp_tbl_prot(i);
+		if (!ulp_ops || !ulp_ops->cnic_init) {
+			mutex_unlock(&cnic_lock);
+			continue;
+		}
+		ulp_get(ulp_ops);
+		mutex_unlock(&cnic_lock);
+
+		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+			ulp_ops->cnic_init(dev);
+
+		ulp_put(ulp_ops);
+	}
+}
+
+static void cnic_ulp_exit(struct cnic_dev *dev)
+{
+	int i;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+		struct cnic_ulp_ops *ulp_ops;
+
+		mutex_lock(&cnic_lock);
+		ulp_ops = cnic_ulp_tbl_prot(i);
+		if (!ulp_ops || !ulp_ops->cnic_exit) {
+			mutex_unlock(&cnic_lock);
+			continue;
+		}
+		ulp_get(ulp_ops);
+		mutex_unlock(&cnic_lock);
+
+		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+			ulp_ops->cnic_exit(dev);
+
+		ulp_put(ulp_ops);
+	}
+}
+
+static int cnic_cm_offload_pg(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_offload_pg *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
+	l4kwqe->flags =
+		L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
+	l4kwqe->l2hdr_nbytes = ETH_HLEN;
+
+	l4kwqe->da0 = csk->ha[0];
+	l4kwqe->da1 = csk->ha[1];
+	l4kwqe->da2 = csk->ha[2];
+	l4kwqe->da3 = csk->ha[3];
+	l4kwqe->da4 = csk->ha[4];
+	l4kwqe->da5 = csk->ha[5];
+
+	l4kwqe->sa0 = dev->mac_addr[0];
+	l4kwqe->sa1 = dev->mac_addr[1];
+	l4kwqe->sa2 = dev->mac_addr[2];
+	l4kwqe->sa3 = dev->mac_addr[3];
+	l4kwqe->sa4 = dev->mac_addr[4];
+	l4kwqe->sa5 = dev->mac_addr[5];
+
+	l4kwqe->etype = ETH_P_IP;
+	l4kwqe->ipid_start = DEF_IPID_START;
+	l4kwqe->host_opaque = csk->l5_cid;
+
+	if (csk->vlan_id) {
+		l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
+		l4kwqe->vlan_tag = csk->vlan_id;
+		l4kwqe->l2hdr_nbytes += 4;
+	}
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_update_pg(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_update_pg *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
+	l4kwqe->flags =
+		L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
+	l4kwqe->pg_cid = csk->pg_cid;
+
+	l4kwqe->da0 = csk->ha[0];
+	l4kwqe->da1 = csk->ha[1];
+	l4kwqe->da2 = csk->ha[2];
+	l4kwqe->da3 = csk->ha[3];
+	l4kwqe->da4 = csk->ha[4];
+	l4kwqe->da5 = csk->ha[5];
+
+	l4kwqe->pg_host_opaque = csk->l5_cid;
+	l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_upload_pg(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_upload *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
+	l4kwqe->flags =
+		L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
+	l4kwqe->cid = csk->pg_cid;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_conn_req(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_connect_req1 *l4kwqe1;
+	struct l4_kwq_connect_req2 *l4kwqe2;
+	struct l4_kwq_connect_req3 *l4kwqe3;
+	struct kwqe *wqes[3];
+	u8 tcp_flags = 0;
+	int num_wqes = 2;
+
+	l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
+	l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
+	l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
+	memset(l4kwqe1, 0, sizeof(*l4kwqe1));
+	memset(l4kwqe2, 0, sizeof(*l4kwqe2));
+	memset(l4kwqe3, 0, sizeof(*l4kwqe3));
+
+	l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
+	l4kwqe3->flags =
+		L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
+	l4kwqe3->ka_timeout = csk->ka_timeout;
+	l4kwqe3->ka_interval = csk->ka_interval;
+	l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
+	l4kwqe3->tos = csk->tos;
+	l4kwqe3->ttl = csk->ttl;
+	l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
+	l4kwqe3->pmtu = csk->mtu;
+	l4kwqe3->rcv_buf = csk->rcv_buf;
+	l4kwqe3->snd_buf = csk->snd_buf;
+	l4kwqe3->seed = csk->seed;
+
+	wqes[0] = (struct kwqe *) l4kwqe1;
+	if (test_bit(SK_F_IPV6, &csk->flags)) {
+		wqes[1] = (struct kwqe *) l4kwqe2;
+		wqes[2] = (struct kwqe *) l4kwqe3;
+		num_wqes = 3;
+
+		l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
+		l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
+		l4kwqe2->flags =
+			L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
+			L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
+		l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
+		l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
+		l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
+		l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
+		l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
+		l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
+		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
+			       sizeof(struct tcphdr);
+	} else {
+		wqes[1] = (struct kwqe *) l4kwqe3;
+		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
+			       sizeof(struct tcphdr);
+	}
+
+	l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
+	l4kwqe1->flags =
+		(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
+		 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
+	l4kwqe1->cid = csk->cid;
+	l4kwqe1->pg_cid = csk->pg_cid;
+	l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
+	l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
+	l4kwqe1->src_port = be16_to_cpu(csk->src_port);
+	l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
+	if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
+	if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
+	if (csk->tcp_flags & SK_TCP_NAGLE)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
+	if (csk->tcp_flags & SK_TCP_TIMESTAMP)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
+	if (csk->tcp_flags & SK_TCP_SACK)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
+	if (csk->tcp_flags & SK_TCP_SEG_SCALING)
+		tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
+
+	l4kwqe1->tcp_flags = tcp_flags;
+
+	return dev->submit_kwqes(dev, wqes, num_wqes);
+}
+
+static int cnic_cm_close_req(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_close_req *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
+	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
+	l4kwqe->cid = csk->cid;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_abort_req(struct cnic_sock *csk)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct l4_kwq_reset_req *l4kwqe;
+	struct kwqe *wqes[1];
+
+	l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
+	memset(l4kwqe, 0, sizeof(*l4kwqe));
+	wqes[0] = (struct kwqe *) l4kwqe;
+
+	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
+	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
+	l4kwqe->cid = csk->cid;
+
+	return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
+			  u32 l5_cid, struct cnic_sock **csk, void *context)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_sock *csk1;
+
+	if (l5_cid >= MAX_CM_SK_TBL_SZ)
+		return -EINVAL;
+
+	if (cp->ctx_tbl) {
+		struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
+			return -EAGAIN;
+	}
+
+	csk1 = &cp->csk_tbl[l5_cid];
+	if (atomic_read(&csk1->ref_count))
+		return -EAGAIN;
+
+	if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
+		return -EBUSY;
+
+	csk1->dev = dev;
+	csk1->cid = cid;
+	csk1->l5_cid = l5_cid;
+	csk1->ulp_type = ulp_type;
+	csk1->context = context;
+
+	csk1->ka_timeout = DEF_KA_TIMEOUT;
+	csk1->ka_interval = DEF_KA_INTERVAL;
+	csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
+	csk1->tos = DEF_TOS;
+	csk1->ttl = DEF_TTL;
+	csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
+	csk1->rcv_buf = DEF_RCV_BUF;
+	csk1->snd_buf = DEF_SND_BUF;
+	csk1->seed = DEF_SEED;
+
+	*csk = csk1;
+	return 0;
+}
+
+static void cnic_cm_cleanup(struct cnic_sock *csk)
+{
+	if (csk->src_port) {
+		struct cnic_dev *dev = csk->dev;
+		struct cnic_local *cp = dev->cnic_priv;
+
+		cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
+		csk->src_port = 0;
+	}
+}
+
+static void cnic_close_conn(struct cnic_sock *csk)
+{
+	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
+		cnic_cm_upload_pg(csk);
+		clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+	}
+	cnic_cm_cleanup(csk);
+}
+
+static int cnic_cm_destroy(struct cnic_sock *csk)
+{
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	csk_hold(csk);
+	clear_bit(SK_F_INUSE, &csk->flags);
+	smp_mb__after_clear_bit();
+	while (atomic_read(&csk->ref_count) != 1)
+		msleep(1);
+	cnic_cm_cleanup(csk);
+
+	csk->flags = 0;
+	csk_put(csk);
+	return 0;
+}
+
+static inline u16 cnic_get_vlan(struct net_device *dev,
+				struct net_device **vlan_dev)
+{
+	if (dev->priv_flags & IFF_802_1Q_VLAN) {
+		*vlan_dev = vlan_dev_real_dev(dev);
+		return vlan_dev_vlan_id(dev);
+	}
+	*vlan_dev = dev;
+	return 0;
+}
+
+static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
+			     struct dst_entry **dst)
+{
+#if defined(CONFIG_INET)
+	struct rtable *rt;
+
+	rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
+	if (!IS_ERR(rt)) {
+		*dst = &rt->dst;
+		return 0;
+	}
+	return PTR_ERR(rt);
+#else
+	return -ENETUNREACH;
+#endif
+}
+
+static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
+			     struct dst_entry **dst)
+{
+#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
+	struct flowi6 fl6;
+
+	memset(&fl6, 0, sizeof(fl6));
+	ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
+	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
+		fl6.flowi6_oif = dst_addr->sin6_scope_id;
+
+	*dst = ip6_route_output(&init_net, NULL, &fl6);
+	if (*dst)
+		return 0;
+#endif
+
+	return -ENETUNREACH;
+}
+
+static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
+					   int ulp_type)
+{
+	struct cnic_dev *dev = NULL;
+	struct dst_entry *dst;
+	struct net_device *netdev = NULL;
+	int err = -ENETUNREACH;
+
+	if (dst_addr->sin_family == AF_INET)
+		err = cnic_get_v4_route(dst_addr, &dst);
+	else if (dst_addr->sin_family == AF_INET6) {
+		struct sockaddr_in6 *dst_addr6 =
+			(struct sockaddr_in6 *) dst_addr;
+
+		err = cnic_get_v6_route(dst_addr6, &dst);
+	} else
+		return NULL;
+
+	if (err)
+		return NULL;
+
+	if (!dst->dev)
+		goto done;
+
+	cnic_get_vlan(dst->dev, &netdev);
+
+	dev = cnic_from_netdev(netdev);
+
+done:
+	dst_release(dst);
+	if (dev)
+		cnic_put(dev);
+	return dev;
+}
+
+static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
+}
+
+static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+	int is_v6, rc = 0;
+	struct dst_entry *dst = NULL;
+	struct net_device *realdev;
+	__be16 local_port;
+	u32 port_id;
+
+	if (saddr->local.v6.sin6_family == AF_INET6 &&
+	    saddr->remote.v6.sin6_family == AF_INET6)
+		is_v6 = 1;
+	else if (saddr->local.v4.sin_family == AF_INET &&
+		 saddr->remote.v4.sin_family == AF_INET)
+		is_v6 = 0;
+	else
+		return -EINVAL;
+
+	clear_bit(SK_F_IPV6, &csk->flags);
+
+	if (is_v6) {
+		set_bit(SK_F_IPV6, &csk->flags);
+		cnic_get_v6_route(&saddr->remote.v6, &dst);
+
+		memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
+		       sizeof(struct in6_addr));
+		csk->dst_port = saddr->remote.v6.sin6_port;
+		local_port = saddr->local.v6.sin6_port;
+
+	} else {
+		cnic_get_v4_route(&saddr->remote.v4, &dst);
+
+		csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
+		csk->dst_port = saddr->remote.v4.sin_port;
+		local_port = saddr->local.v4.sin_port;
+	}
+
+	csk->vlan_id = 0;
+	csk->mtu = dev->netdev->mtu;
+	if (dst && dst->dev) {
+		u16 vlan = cnic_get_vlan(dst->dev, &realdev);
+		if (realdev == dev->netdev) {
+			csk->vlan_id = vlan;
+			csk->mtu = dst_mtu(dst);
+		}
+	}
+
+	port_id = be16_to_cpu(local_port);
+	if (port_id >= CNIC_LOCAL_PORT_MIN &&
+	    port_id < CNIC_LOCAL_PORT_MAX) {
+		if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
+			port_id = 0;
+	} else
+		port_id = 0;
+
+	if (!port_id) {
+		port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
+		if (port_id == -1) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		local_port = cpu_to_be16(port_id);
+	}
+	csk->src_port = local_port;
+
+err_out:
+	dst_release(dst);
+	return rc;
+}
+
+static void cnic_init_csk_state(struct cnic_sock *csk)
+{
+	csk->state = 0;
+	clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+	clear_bit(SK_F_CLOSING, &csk->flags);
+}
+
+static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+	struct cnic_local *cp = csk->dev->cnic_priv;
+	int err = 0;
+
+	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
+		return -EOPNOTSUPP;
+
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
+		return -EINVAL;
+
+	cnic_init_csk_state(csk);
+
+	err = cnic_get_route(csk, saddr);
+	if (err)
+		goto err_out;
+
+	err = cnic_resolve_addr(csk, saddr);
+	if (!err)
+		return 0;
+
+err_out:
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	return err;
+}
+
+static int cnic_cm_abort(struct cnic_sock *csk)
+{
+	struct cnic_local *cp = csk->dev->cnic_priv;
+	u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	if (cnic_abort_prep(csk))
+		return cnic_cm_abort_req(csk);
+
+	/* Getting here means that we haven't started connect, or
+	 * connect was not successful.
+	 */
+
+	cp->close_conn(csk, opcode);
+	if (csk->state != opcode)
+		return -EALREADY;
+
+	return 0;
+}
+
+static int cnic_cm_close(struct cnic_sock *csk)
+{
+	if (!cnic_in_use(csk))
+		return -EINVAL;
+
+	if (cnic_close_prep(csk)) {
+		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+		return cnic_cm_close_req(csk);
+	} else {
+		return -EALREADY;
+	}
+	return 0;
+}
+
+static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
+			   u8 opcode)
+{
+	struct cnic_ulp_ops *ulp_ops;
+	int ulp_type = csk->ulp_type;
+
+	rcu_read_lock();
+	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+	if (ulp_ops) {
+		if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
+			ulp_ops->cm_connect_complete(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
+			ulp_ops->cm_close_complete(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
+			ulp_ops->cm_remote_abort(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
+			ulp_ops->cm_abort_complete(csk);
+		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
+			ulp_ops->cm_remote_close(csk);
+	}
+	rcu_read_unlock();
+}
+
+static int cnic_cm_set_pg(struct cnic_sock *csk)
+{
+	if (cnic_offld_prep(csk)) {
+		if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+			cnic_cm_update_pg(csk);
+		else
+			cnic_cm_offload_pg(csk);
+	}
+	return 0;
+}
+
+static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 l5_cid = kcqe->pg_host_opaque;
+	u8 opcode = kcqe->op_code;
+	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
+
+	csk_hold(csk);
+	if (!cnic_in_use(csk))
+		goto done;
+
+	if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		goto done;
+	}
+	/* Possible PG kcqe status:  SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
+	if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		cnic_cm_upcall(cp, csk,
+			       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
+		goto done;
+	}
+
+	csk->pg_cid = kcqe->pg_cid;
+	set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+	cnic_cm_conn_req(csk);
+
+done:
+	csk_put(csk);
+}
+
+static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
+	u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
+	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
+
+	ctx->timestamp = jiffies;
+	ctx->wait_cond = 1;
+	wake_up(&ctx->waitq);
+}
+
+static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
+	u8 opcode = l4kcqe->op_code;
+	u32 l5_cid;
+	struct cnic_sock *csk;
+
+	if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
+		cnic_process_fcoe_term_conn(dev, kcqe);
+		return;
+	}
+	if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
+	    opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+		cnic_cm_process_offld_pg(dev, l4kcqe);
+		return;
+	}
+
+	l5_cid = l4kcqe->conn_id;
+	if (opcode & 0x80)
+		l5_cid = l4kcqe->cid;
+	if (l5_cid >= MAX_CM_SK_TBL_SZ)
+		return;
+
+	csk = &cp->csk_tbl[l5_cid];
+	csk_hold(csk);
+
+	if (!cnic_in_use(csk)) {
+		csk_put(csk);
+		return;
+	}
+
+	switch (opcode) {
+	case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
+		if (l4kcqe->status != 0) {
+			clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+			cnic_cm_upcall(cp, csk,
+				       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
+		}
+		break;
+	case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
+		if (l4kcqe->status == 0)
+			set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
+
+		smp_mb__before_clear_bit();
+		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+		cnic_cm_upcall(cp, csk, opcode);
+		break;
+
+	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
+	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
+	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
+	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
+	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
+		cp->close_conn(csk, opcode);
+		break;
+
+	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
+		/* after we already sent CLOSE_REQ */
+		if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
+		    !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
+		    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
+			cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
+		else
+			cnic_cm_upcall(cp, csk, opcode);
+		break;
+	}
+	csk_put(csk);
+}
+
+static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
+{
+	struct cnic_dev *dev = data;
+	int i;
+
+	for (i = 0; i < num; i++)
+		cnic_cm_process_kcqe(dev, kcqe[i]);
+}
+
+static struct cnic_ulp_ops cm_ulp_ops = {
+	.indicate_kcqes		= cnic_cm_indicate_kcqe,
+};
+
+static void cnic_cm_free_mem(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	kfree(cp->csk_tbl);
+	cp->csk_tbl = NULL;
+	cnic_free_id_tbl(&cp->csk_port_tbl);
+}
+
+static int cnic_cm_alloc_mem(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 port_id;
+
+	cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
+			      GFP_KERNEL);
+	if (!cp->csk_tbl)
+		return -ENOMEM;
+
+	port_id = random32();
+	port_id %= CNIC_LOCAL_PORT_RANGE;
+	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
+			     CNIC_LOCAL_PORT_MIN, port_id)) {
+		cnic_cm_free_mem(dev);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
+{
+	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+		/* Unsolicited RESET_COMP or RESET_RECEIVED */
+		opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
+		csk->state = opcode;
+	}
+
+	/* 1. If event opcode matches the expected event in csk->state
+	 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
+	 *    event
+	 * 3. If the expected event is 0, meaning the connection was never
+	 *    never established, we accept the opcode from cm_abort.
+	 */
+	if (opcode == csk->state || csk->state == 0 ||
+	    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
+	    csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
+		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
+			if (csk->state == 0)
+				csk->state = opcode;
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
+		cnic_cm_upcall(cp, csk, opcode);
+		return;
+	}
+
+	clear_bit(SK_F_CONNECT_START, &csk->flags);
+	cnic_close_conn(csk);
+	csk->state = opcode;
+	cnic_cm_upcall(cp, csk, opcode);
+}
+
+static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
+{
+}
+
+static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
+{
+	u32 seed;
+
+	seed = random32();
+	cnic_ctx_wr(dev, 45, 0, seed);
+	return 0;
+}
+
+static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
+{
+	struct cnic_dev *dev = csk->dev;
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
+	union l5cm_specific_data l5_data;
+	u32 cmd = 0;
+	int close_complete = 0;
+
+	switch (opcode) {
+	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
+	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
+	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
+		if (cnic_ready_to_close(csk, opcode)) {
+			if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+				cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
+			else
+				close_complete = 1;
+		}
+		break;
+	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
+		cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
+		break;
+	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
+		close_complete = 1;
+		break;
+	}
+	if (cmd) {
+		memset(&l5_data, 0, sizeof(l5_data));
+
+		cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
+				    &l5_data);
+	} else if (close_complete) {
+		ctx->timestamp = jiffies;
+		cnic_close_conn(csk);
+		cnic_cm_upcall(cp, csk, csk->state);
+	}
+}
+
+static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	if (!cp->ctx_tbl)
+		return;
+
+	if (!netif_running(dev->netdev))
+		return;
+
+	cnic_bnx2x_delete_wait(dev, 0);
+
+	cancel_delayed_work(&cp->delete_task);
+	flush_workqueue(cnic_wq);
+
+	if (atomic_read(&cp->iscsi_conn) != 0)
+		netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
+			    atomic_read(&cp->iscsi_conn));
+}
+
+static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 pfid = cp->pfid;
+	u32 port = CNIC_PORT(cp);
+
+	cnic_init_bnx2x_mac(dev);
+	cnic_bnx2x_set_tcp_timestamp(dev, 1);
+
+	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
+		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
+
+	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
+		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
+	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
+		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
+		DEF_MAX_DA_COUNT);
+
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
+	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
+		 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
+	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
+		XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
+
+	CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
+		DEF_MAX_CWND);
+	return 0;
+}
+
+static void cnic_delete_task(struct work_struct *work)
+{
+	struct cnic_local *cp;
+	struct cnic_dev *dev;
+	u32 i;
+	int need_resched = 0;
+
+	cp = container_of(work, struct cnic_local, delete_task.work);
+	dev = cp->dev;
+
+	if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
+		struct drv_ctl_info info;
+
+		cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
+
+		info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
+		cp->ethdev->drv_ctl(dev->netdev, &info);
+	}
+
+	for (i = 0; i < cp->max_cid_space; i++) {
+		struct cnic_context *ctx = &cp->ctx_tbl[i];
+		int err;
+
+		if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
+		    !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
+			continue;
+
+		if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
+			need_resched = 1;
+			continue;
+		}
+
+		if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
+			continue;
+
+		err = cnic_bnx2x_destroy_ramrod(dev, i);
+
+		cnic_free_bnx2x_conn_resc(dev, i);
+		if (!err) {
+			if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
+				atomic_dec(&cp->iscsi_conn);
+
+			clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
+		}
+	}
+
+	if (need_resched)
+		queue_delayed_work(cnic_wq, &cp->delete_task,
+				   msecs_to_jiffies(10));
+
+}
+
+static int cnic_cm_open(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int err;
+
+	err = cnic_cm_alloc_mem(dev);
+	if (err)
+		return err;
+
+	err = cp->start_cm(dev);
+
+	if (err)
+		goto err_out;
+
+	INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
+
+	dev->cm_create = cnic_cm_create;
+	dev->cm_destroy = cnic_cm_destroy;
+	dev->cm_connect = cnic_cm_connect;
+	dev->cm_abort = cnic_cm_abort;
+	dev->cm_close = cnic_cm_close;
+	dev->cm_select_dev = cnic_cm_select_dev;
+
+	cp->ulp_handle[CNIC_ULP_L4] = dev;
+	rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
+	return 0;
+
+err_out:
+	cnic_cm_free_mem(dev);
+	return err;
+}
+
+static int cnic_cm_shutdown(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int i;
+
+	cp->stop_cm(dev);
+
+	if (!cp->csk_tbl)
+		return 0;
+
+	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
+		struct cnic_sock *csk = &cp->csk_tbl[i];
+
+		clear_bit(SK_F_INUSE, &csk->flags);
+		cnic_cm_cleanup(csk);
+	}
+	cnic_cm_free_mem(dev);
+
+	return 0;
+}
+
+static void cnic_init_context(struct cnic_dev *dev, u32 cid)
+{
+	u32 cid_addr;
+	int i;
+
+	cid_addr = GET_CID_ADDR(cid);
+
+	for (i = 0; i < CTX_SIZE; i += 4)
+		cnic_ctx_wr(dev, cid_addr, i, 0);
+}
+
+static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	int ret = 0, i;
+	u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
+
+	if (CHIP_NUM(cp) != CHIP_NUM_5709)
+		return 0;
+
+	for (i = 0; i < cp->ctx_blks; i++) {
+		int j;
+		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
+		u32 val;
+
+		memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
+
+		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
+		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+			(u64) cp->ctx_arr[i].mapping >> 32);
+		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
+			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+		for (j = 0; j < 10; j++) {
+
+			val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
+				break;
+			udelay(5);
+		}
+		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
+			ret = -EBUSY;
+			break;
+		}
+	}
+	return ret;
+}
+
+static void cnic_free_irq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		cp->disable_int_sync(dev);
+		tasklet_kill(&cp->cnic_irq_task);
+		free_irq(ethdev->irq_arr[0].vector, dev);
+	}
+}
+
+static int cnic_request_irq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int err;
+
+	err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
+	if (err)
+		tasklet_disable(&cp->cnic_irq_task);
+
+	return err;
+}
+
+static int cnic_init_bnx2_irq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		int err, i = 0;
+		int sblk_num = cp->status_blk_num;
+		u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
+			   BNX2_HC_SB_CONFIG_1;
+
+		CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
+
+		CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
+		CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
+		CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
+
+		cp->last_status_idx = cp->status_blk.bnx2->status_idx;
+		tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
+			     (unsigned long) dev);
+		err = cnic_request_irq(dev);
+		if (err)
+			return err;
+
+		while (cp->status_blk.bnx2->status_completion_producer_index &&
+		       i < 10) {
+			CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
+				1 << (11 + sblk_num));
+			udelay(10);
+			i++;
+			barrier();
+		}
+		if (cp->status_blk.bnx2->status_completion_producer_index) {
+			cnic_free_irq(dev);
+			goto failed;
+		}
+
+	} else {
+		struct status_block *sblk = cp->status_blk.gen;
+		u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
+		int i = 0;
+
+		while (sblk->status_completion_producer_index && i < 10) {
+			CNIC_WR(dev, BNX2_HC_COMMAND,
+				hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+			udelay(10);
+			i++;
+			barrier();
+		}
+		if (sblk->status_completion_producer_index)
+			goto failed;
+
+	}
+	return 0;
+
+failed:
+	netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
+	return -EBUSY;
+}
+
+static void cnic_enable_bnx2_int(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+		return;
+
+	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+		return;
+
+	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+	CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
+	synchronize_irq(ethdev->irq_arr[0].vector);
+}
+
+static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct cnic_uio_dev *udev = cp->udev;
+	u32 cid_addr, tx_cid, sb_id;
+	u32 val, offset0, offset1, offset2, offset3;
+	int i;
+	struct tx_bd *txbd;
+	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
+	struct status_block *s_blk = cp->status_blk.gen;
+
+	sb_id = cp->status_blk_num;
+	tx_cid = 20;
+	cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		struct status_block_msix *sblk = cp->status_blk.bnx2;
+
+		tx_cid = TX_TSS_CID + sb_id - 1;
+		CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
+			(TX_TSS_CID << 7));
+		cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
+	}
+	cp->tx_cons = *cp->tx_cons_ptr;
+
+	cid_addr = GET_CID_ADDR(tx_cid);
+	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+		u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
+
+		for (i = 0; i < PHY_CTX_SIZE; i += 4)
+			cnic_ctx_wr(dev, cid_addr2, i, 0);
+
+		offset0 = BNX2_L2CTX_TYPE_XI;
+		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
+	} else {
+		cnic_init_context(dev, tx_cid);
+		cnic_init_context(dev, tx_cid + 1);
+
+		offset0 = BNX2_L2CTX_TYPE;
+		offset1 = BNX2_L2CTX_CMD_TYPE;
+		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
+		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
+	}
+	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
+	cnic_ctx_wr(dev, cid_addr, offset0, val);
+
+	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
+	cnic_ctx_wr(dev, cid_addr, offset1, val);
+
+	txbd = udev->l2_ring;
+
+	buf_map = udev->l2_buf_map;
+	for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
+		txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
+		txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+	}
+	val = (u64) ring_map >> 32;
+	cnic_ctx_wr(dev, cid_addr, offset2, val);
+	txbd->tx_bd_haddr_hi = val;
+
+	val = (u64) ring_map & 0xffffffff;
+	cnic_ctx_wr(dev, cid_addr, offset3, val);
+	txbd->tx_bd_haddr_lo = val;
+}
+
+static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct cnic_uio_dev *udev = cp->udev;
+	u32 cid_addr, sb_id, val, coal_reg, coal_val;
+	int i;
+	struct rx_bd *rxbd;
+	struct status_block *s_blk = cp->status_blk.gen;
+	dma_addr_t ring_map = udev->l2_ring_map;
+
+	sb_id = cp->status_blk_num;
+	cnic_init_context(dev, 2);
+	cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
+	coal_reg = BNX2_HC_COMMAND;
+	coal_val = CNIC_RD(dev, coal_reg);
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		struct status_block_msix *sblk = cp->status_blk.bnx2;
+
+		cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
+		coal_reg = BNX2_HC_COALESCE_NOW;
+		coal_val = 1 << (11 + sb_id);
+	}
+	i = 0;
+	while (!(*cp->rx_cons_ptr != 0) && i < 10) {
+		CNIC_WR(dev, coal_reg, coal_val);
+		udelay(10);
+		i++;
+		barrier();
+	}
+	cp->rx_cons = *cp->rx_cons_ptr;
+
+	cid_addr = GET_CID_ADDR(2);
+	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
+	      BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
+
+	if (sb_id == 0)
+		val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
+	else
+		val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
+
+	rxbd = udev->l2_ring + BCM_PAGE_SIZE;
+	for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
+		dma_addr_t buf_map;
+		int n = (i % cp->l2_rx_ring_size) + 1;
+
+		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
+		rxbd->rx_bd_len = cp->l2_single_buf_size;
+		rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
+		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
+		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+	}
+	val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
+	rxbd->rx_bd_haddr_hi = val;
+
+	val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
+	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
+	rxbd->rx_bd_haddr_lo = val;
+
+	val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
+	cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
+}
+
+static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
+{
+	struct kwqe *wqes[1], l2kwqe;
+
+	memset(&l2kwqe, 0, sizeof(l2kwqe));
+	wqes[0] = &l2kwqe;
+	l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
+			      (L2_KWQE_OPCODE_VALUE_FLUSH <<
+			       KWQE_OPCODE_SHIFT) | 2;
+	dev->submit_kwqes(dev, wqes, 1);
+}
+
+static void cnic_set_bnx2_mac(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 val;
+
+	val = cp->func << 2;
+
+	cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
+
+	val = cnic_reg_rd_ind(dev, cp->shmem_base +
+			      BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
+	dev->mac_addr[0] = (u8) (val >> 8);
+	dev->mac_addr[1] = (u8) val;
+
+	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
+
+	val = cnic_reg_rd_ind(dev, cp->shmem_base +
+			      BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
+	dev->mac_addr[2] = (u8) (val >> 24);
+	dev->mac_addr[3] = (u8) (val >> 16);
+	dev->mac_addr[4] = (u8) (val >> 8);
+	dev->mac_addr[5] = (u8) val;
+
+	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
+
+	val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
+	if (CHIP_NUM(cp) != CHIP_NUM_5709)
+		val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
+
+	CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
+	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
+	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
+}
+
+static int cnic_start_bnx2_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	struct status_block *sblk = cp->status_blk.gen;
+	u32 val, kcq_cid_addr, kwq_cid_addr;
+	int err;
+
+	cnic_set_bnx2_mac(dev);
+
+	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
+	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
+	if (BCM_PAGE_BITS > 12)
+		val |= (12 - 8)  << 4;
+	else
+		val |= (BCM_PAGE_BITS - 8)  << 4;
+
+	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
+
+	CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
+	CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
+	CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
+
+	err = cnic_setup_5709_context(dev, 1);
+	if (err)
+		return err;
+
+	cnic_init_context(dev, KWQ_CID);
+	cnic_init_context(dev, KCQ_CID);
+
+	kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
+	cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+	cp->max_kwq_idx = MAX_KWQ_IDX;
+	cp->kwq_prod_idx = 0;
+	cp->kwq_con_idx = 0;
+	set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
+
+	if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
+		cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
+	else
+		cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
+
+	/* Initialize the kernel work queue context. */
+	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
+
+	val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
+	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+	val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
+	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
+	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+	val = (u32) cp->kwq_info.pgtbl_map;
+	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+	kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
+	cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+	cp->kcq1.sw_prod_idx = 0;
+	cp->kcq1.hw_prod_idx_ptr =
+		(u16 *) &sblk->status_completion_producer_index;
+
+	cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
+
+	/* Initialize the kernel complete queue context. */
+	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
+
+	val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
+	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+	val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
+	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+	val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
+	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+	val = (u32) cp->kcq1.dma.pgtbl_map;
+	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+	cp->int_num = 0;
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+		struct status_block_msix *msblk = cp->status_blk.bnx2;
+		u32 sb_id = cp->status_blk_num;
+		u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
+
+		cp->kcq1.hw_prod_idx_ptr =
+			(u16 *) &msblk->status_completion_producer_index;
+		cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
+		cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
+		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
+		cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+		cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+	}
+
+	/* Enable Commnad Scheduler notification when we write to the
+	 * host producer index of the kernel contexts. */
+	CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
+
+	/* Enable Command Scheduler notification when we write to either
+	 * the Send Queue or Receive Queue producer indexes of the kernel
+	 * bypass contexts. */
+	CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
+	CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
+
+	/* Notify COM when the driver post an application buffer. */
+	CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
+
+	/* Set the CP and COM doorbells.  These two processors polls the
+	 * doorbell for a non zero value before running.  This must be done
+	 * after setting up the kernel queue contexts. */
+	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
+	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
+
+	cnic_init_bnx2_tx_ring(dev);
+	cnic_init_bnx2_rx_ring(dev);
+
+	err = cnic_init_bnx2_irq(dev);
+	if (err) {
+		netdev_err(dev->netdev, "cnic_init_irq failed\n");
+		cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+		cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+		return err;
+	}
+
+	return 0;
+}
+
+static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	u32 start_offset = ethdev->ctx_tbl_offset;
+	int i;
+
+	for (i = 0; i < cp->ctx_blks; i++) {
+		struct cnic_ctx *ctx = &cp->ctx_arr[i];
+		dma_addr_t map = ctx->mapping;
+
+		if (cp->ctx_align) {
+			unsigned long mask = cp->ctx_align - 1;
+
+			map = (map + mask) & ~mask;
+		}
+
+		cnic_ctx_tbl_wr(dev, start_offset + i, map);
+	}
+}
+
+static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int err = 0;
+
+	tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
+		     (unsigned long) dev);
+	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
+		err = cnic_request_irq(dev);
+
+	return err;
+}
+
+static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
+						u16 sb_id, u8 sb_index,
+						u8 disable)
+{
+
+	u32 addr = BAR_CSTRORM_INTMEM +
+			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
+			offsetof(struct hc_status_block_data_e1x, index_data) +
+			sizeof(struct hc_index_data)*sb_index +
+			offsetof(struct hc_index_data, flags);
+	u16 flags = CNIC_RD16(dev, addr);
+	/* clear and set */
+	flags &= ~HC_INDEX_DATA_HC_ENABLED;
+	flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
+		  HC_INDEX_DATA_HC_ENABLED);
+	CNIC_WR16(dev, addr, flags);
+}
+
+static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u8 sb_id = cp->status_blk_num;
+
+	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
+			offsetof(struct hc_status_block_data_e1x, index_data) +
+			sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
+			offsetof(struct hc_index_data, timeout), 64 / 4);
+	cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
+}
+
+static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
+{
+}
+
+static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
+				    struct client_init_ramrod_data *data)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_uio_dev *udev = cp->udev;
+	union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
+	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
+	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
+	int i;
+	u32 cli = cp->ethdev->iscsi_l2_client_id;
+	u32 val;
+
+	memset(txbd, 0, BCM_PAGE_SIZE);
+
+	buf_map = udev->l2_buf_map;
+	for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
+		struct eth_tx_start_bd *start_bd = &txbd->start_bd;
+		struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
+
+		start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
+		start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
+		reg_bd->addr_hi = start_bd->addr_hi;
+		reg_bd->addr_lo = start_bd->addr_lo + 0x10;
+		start_bd->nbytes = cpu_to_le16(0x10);
+		start_bd->nbd = cpu_to_le16(3);
+		start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+		start_bd->general_data = (UNICAST_ADDRESS <<
+			ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
+		start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
+
+	}
+
+	val = (u64) ring_map >> 32;
+	txbd->next_bd.addr_hi = cpu_to_le32(val);
+
+	data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
+
+	val = (u64) ring_map & 0xffffffff;
+	txbd->next_bd.addr_lo = cpu_to_le32(val);
+
+	data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
+
+	/* Other ramrod params */
+	data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
+	data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
+
+	/* reset xstorm per client statistics */
+	if (cli < MAX_STAT_COUNTER_ID) {
+		data->general.statistics_zero_flg = 1;
+		data->general.statistics_en_flg = 1;
+		data->general.statistics_counter_id = cli;
+	}
+
+	cp->tx_cons_ptr =
+		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
+}
+
+static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
+				    struct client_init_ramrod_data *data)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_uio_dev *udev = cp->udev;
+	struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
+				BCM_PAGE_SIZE);
+	struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
+				(udev->l2_ring + (2 * BCM_PAGE_SIZE));
+	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
+	int i;
+	u32 cli = cp->ethdev->iscsi_l2_client_id;
+	int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
+	u32 val;
+	dma_addr_t ring_map = udev->l2_ring_map;
+
+	/* General data */
+	data->general.client_id = cli;
+	data->general.activate_flg = 1;
+	data->general.sp_client_id = cli;
+	data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
+	data->general.func_id = cp->pfid;
+
+	for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
+		dma_addr_t buf_map;
+		int n = (i % cp->l2_rx_ring_size) + 1;
+
+		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
+		rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
+		rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
+	}
+
+	val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
+	rxbd->addr_hi = cpu_to_le32(val);
+	data->rx.bd_page_base.hi = cpu_to_le32(val);
+
+	val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
+	rxbd->addr_lo = cpu_to_le32(val);
+	data->rx.bd_page_base.lo = cpu_to_le32(val);
+
+	rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
+	val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
+	rxcqe->addr_hi = cpu_to_le32(val);
+	data->rx.cqe_page_base.hi = cpu_to_le32(val);
+
+	val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
+	rxcqe->addr_lo = cpu_to_le32(val);
+	data->rx.cqe_page_base.lo = cpu_to_le32(val);
+
+	/* Other ramrod params */
+	data->rx.client_qzone_id = cl_qzone_id;
+	data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
+	data->rx.status_block_id = BNX2X_DEF_SB_ID;
+
+	data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
+
+	data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
+	data->rx.outer_vlan_removal_enable_flg = 1;
+	data->rx.silent_vlan_removal_flg = 1;
+	data->rx.silent_vlan_value = 0;
+	data->rx.silent_vlan_mask = 0xffff;
+
+	cp->rx_cons_ptr =
+		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
+	cp->rx_cons = *cp->rx_cons_ptr;
+}
+
+static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	u32 pfid = cp->pfid;
+
+	cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
+			   CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
+	cp->kcq1.sw_prod_idx = 0;
+
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
+
+		cp->kcq1.hw_prod_idx_ptr =
+			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
+		cp->kcq1.status_idx_ptr =
+			&sb->sb.running_index[SM_RX_ID];
+	} else {
+		struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
+
+		cp->kcq1.hw_prod_idx_ptr =
+			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
+		cp->kcq1.status_idx_ptr =
+			&sb->sb.running_index[SM_RX_ID];
+	}
+
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
+
+		cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
+					USTORM_FCOE_EQ_PROD_OFFSET(pfid);
+		cp->kcq2.sw_prod_idx = 0;
+		cp->kcq2.hw_prod_idx_ptr =
+			&sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
+		cp->kcq2.status_idx_ptr =
+			&sb->sb.running_index[SM_RX_ID];
+	}
+}
+
+static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int func = CNIC_FUNC(cp), ret;
+	u32 pfid;
+
+	cp->port_mode = CHIP_PORT_MODE_NONE;
+
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+		u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
+
+		if (!(val & 1))
+			val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
+		else
+			val = (val >> 1) & 1;
+
+		if (val) {
+			cp->port_mode = CHIP_4_PORT_MODE;
+			cp->pfid = func >> 1;
+		} else {
+			cp->port_mode = CHIP_2_PORT_MODE;
+			cp->pfid = func & 0x6;
+		}
+	} else {
+		cp->pfid = func;
+	}
+	pfid = cp->pfid;
+
+	ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
+			       cp->iscsi_start_cid, 0);
+
+	if (ret)
+		return -ENOMEM;
+
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
+		ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
+					BNX2X_FCOE_NUM_CONNECTIONS,
+					cp->fcoe_start_cid, 0);
+
+		if (ret)
+			return -ENOMEM;
+	}
+
+	cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
+
+	cnic_init_bnx2x_kcq(dev);
+
+	/* Only 1 EQ */
+	CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
+		cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
+		(u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
+		cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
+		(u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
+	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
+	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
+	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
+		HC_INDEX_ISCSI_EQ_CONS);
+
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
+		cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
+	CNIC_WR(dev, BAR_USTRORM_INTMEM +
+		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
+		(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
+
+	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
+		TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
+
+	cnic_setup_bnx2x_context(dev);
+
+	ret = cnic_init_bnx2x_irq(dev);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void cnic_init_rings(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_uio_dev *udev = cp->udev;
+
+	if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
+		return;
+
+	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+		cnic_init_bnx2_tx_ring(dev);
+		cnic_init_bnx2_rx_ring(dev);
+		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
+	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+		u32 cli = cp->ethdev->iscsi_l2_client_id;
+		u32 cid = cp->ethdev->iscsi_l2_cid;
+		u32 cl_qzone_id;
+		struct client_init_ramrod_data *data;
+		union l5cm_specific_data l5_data;
+		struct ustorm_eth_rx_producers rx_prods = {0};
+		u32 off, i, *cid_ptr;
+
+		rx_prods.bd_prod = 0;
+		rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
+		barrier();
+
+		cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
+
+		off = BAR_USTRORM_INTMEM +
+			(BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
+			 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
+			 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
+
+		for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
+			CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
+
+		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
+
+		data = udev->l2_buf;
+		cid_ptr = udev->l2_buf + 12;
+
+		memset(data, 0, sizeof(*data));
+
+		cnic_init_bnx2x_tx_ring(dev, data);
+		cnic_init_bnx2x_rx_ring(dev, data);
+
+		l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
+		l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
+
+		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
+
+		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
+			cid, ETH_CONNECTION_TYPE, &l5_data);
+
+		i = 0;
+		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
+		       ++i < 10)
+			msleep(1);
+
+		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+			netdev_err(dev->netdev,
+				"iSCSI CLIENT_SETUP did not complete\n");
+		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
+		cnic_ring_ctl(dev, cid, cli, 1);
+		*cid_ptr = cid;
+	}
+}
+
+static void cnic_shutdown_rings(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_uio_dev *udev = cp->udev;
+	void *rx_ring;
+
+	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
+		return;
+
+	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+		cnic_shutdown_bnx2_rx_ring(dev);
+	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
+		u32 cli = cp->ethdev->iscsi_l2_client_id;
+		u32 cid = cp->ethdev->iscsi_l2_cid;
+		union l5cm_specific_data l5_data;
+		int i;
+
+		cnic_ring_ctl(dev, cid, cli, 0);
+
+		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
+
+		l5_data.phy_address.lo = cli;
+		l5_data.phy_address.hi = 0;
+		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
+			cid, ETH_CONNECTION_TYPE, &l5_data);
+		i = 0;
+		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
+		       ++i < 10)
+			msleep(1);
+
+		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
+			netdev_err(dev->netdev,
+				"iSCSI CLIENT_HALT did not complete\n");
+		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
+
+		memset(&l5_data, 0, sizeof(l5_data));
+		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
+			cid, NONE_CONNECTION_TYPE, &l5_data);
+		msleep(10);
+	}
+	clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
+	rx_ring = udev->l2_ring + BCM_PAGE_SIZE;
+	memset(rx_ring, 0, BCM_PAGE_SIZE);
+}
+
+static int cnic_register_netdev(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int err;
+
+	if (!ethdev)
+		return -ENODEV;
+
+	if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
+		return 0;
+
+	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
+	if (err)
+		netdev_err(dev->netdev, "register_cnic failed\n");
+
+	return err;
+}
+
+static void cnic_unregister_netdev(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+
+	if (!ethdev)
+		return;
+
+	ethdev->drv_unregister_cnic(dev->netdev);
+}
+
+static int cnic_start_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+	struct cnic_eth_dev *ethdev = cp->ethdev;
+	int err;
+
+	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+		return -EALREADY;
+
+	dev->regview = ethdev->io_base;
+	pci_dev_get(dev->pcidev);
+	cp->func = PCI_FUNC(dev->pcidev->devfn);
+	cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
+	cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
+
+	err = cp->alloc_resc(dev);
+	if (err) {
+		netdev_err(dev->netdev, "allocate resource failure\n");
+		goto err1;
+	}
+
+	err = cp->start_hw(dev);
+	if (err)
+		goto err1;
+
+	err = cnic_cm_open(dev);
+	if (err)
+		goto err1;
+
+	set_bit(CNIC_F_CNIC_UP, &dev->flags);
+
+	cp->enable_int(dev);
+
+	return 0;
+
+err1:
+	cp->free_resc(dev);
+	pci_dev_put(dev->pcidev);
+	return err;
+}
+
+static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
+{
+	cnic_disable_bnx2_int_sync(dev);
+
+	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+
+	cnic_init_context(dev, KWQ_CID);
+	cnic_init_context(dev, KCQ_CID);
+
+	cnic_setup_5709_context(dev, 0);
+	cnic_free_irq(dev);
+
+	cnic_free_resc(dev);
+}
+
+
+static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
+{
+	struct cnic_local *cp = dev->cnic_priv;
+
+	cnic_free_irq(dev);
+	*cp->kcq1.hw_prod_idx_ptr = 0;
+	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
+		CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
+	CNIC_WR16(dev, cp->kcq1.io_addr, 0);
+	cnic_free_resc(dev);
+}
+
+static void cnic_stop_hw(struct cnic_dev *dev)
+{
+	if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+		struct cnic_local *cp = dev->cnic_priv;
+		int i = 0;
+
+		/* Need to wait for the ring shutdown event to complete
+		 * before clearing the CNIC_UP flag.
+		 */
+		while (cp->udev->uio_dev != -1 && i < 15) {
+			msleep(100);
+			i++;
+		}
+		cnic_shutdown_rings(dev);
+		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
+		rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
+		synchronize_rcu();
+		cnic_cm_shutdown(dev);
+		cp->stop_hw(dev);
+		pci_dev_put(dev->pcidev);
+	}
+}
+
+static void cnic_free_dev(struct cnic_dev *dev)
+{
+	int i = 0;
+
+	while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
+		msleep(100);
+		i++;
+	}
+	if (atomic_read(&dev->ref_count) != 0)
+		netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
+
+	netdev_info(dev->netdev, "Removed CNIC device\n");
+	dev_put(dev->netdev);
+	kfree(dev);
+}
+
+static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
+				       struct pci_dev *pdev)
+{
+	struct cnic_dev *cdev;
+	struct cnic_local *cp;
+	int alloc_size;
+
+	alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
+
+	cdev = kzalloc(alloc_size , GFP_KERNEL);
+	if (cdev == NULL) {
+		netdev_err(dev, "allocate dev struct failure\n");
+		return NULL;
+	}
+
+	cdev->netdev = dev;
+	cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
+	cdev->register_device = cnic_register_device;
+	cdev->unregister_device = cnic_unregister_device;
+	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
+
+	cp = cdev->cnic_priv;
+	cp->dev = cdev;
+	cp->l2_single_buf_size = 0x400;
+	cp->l2_rx_ring_size = 3;
+
+	spin_lock_init(&cp->cnic_ulp_lock);
+
+	netdev_info(dev, "Added CNIC device\n");
+
+	return cdev;
+}
+
+static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
+{
+	struct pci_dev *pdev;
+	struct cnic_dev *cdev;
+	struct cnic_local *cp;
+	struct cnic_eth_dev *ethdev = NULL;
+	struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
+
+	probe = symbol_get(bnx2_cnic_probe);
+	if (probe) {
+		ethdev = (*probe)(dev);
+		symbol_put(bnx2_cnic_probe);
+	}
+	if (!ethdev)
+		return NULL;
+
+	pdev = ethdev->pdev;
+	if (!pdev)
+		return NULL;
+
+	dev_hold(dev);
+	pci_dev_get(pdev);
+	if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
+	     pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
+	    (pdev->revision < 0x10)) {
+		pci_dev_put(pdev);
+		goto cnic_err;
+	}
+	pci_dev_put(pdev);
+
+	cdev = cnic_alloc_dev(dev, pdev);
+	if (cdev == NULL)
+		goto cnic_err;
+
+	set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
+	cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
+
+	cp = cdev->cnic_priv;
+	cp->ethdev = ethdev;
+	cdev->pcidev = pdev;
+	cp->chip_id = ethdev->chip_id;
+
+	cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
+
+	cp->cnic_ops = &cnic_bnx2_ops;
+	cp->start_hw = cnic_start_bnx2_hw;
+	cp->stop_hw = cnic_stop_bnx2_hw;
+	cp->setup_pgtbl = cnic_setup_page_tbl;
+	cp->alloc_resc = cnic_alloc_bnx2_resc;
+	cp->free_resc = cnic_free_resc;
+	cp->start_cm = cnic_cm_init_bnx2_hw;
+	cp->stop_cm = cnic_cm_stop_bnx2_hw;
+	cp->enable_int = cnic_enable_bnx2_int;
+	cp->disable_int_sync = cnic_disable_bnx2_int_sync;
+	cp->close_conn = cnic_close_bnx2_conn;
+	return cdev;
+
+cnic_err:
+	dev_put(dev);
+	return NULL;
+}
+
+static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
+{
+	struct pci_dev *pdev;
+	struct cnic_dev *cdev;
+	struct cnic_local *cp;
+	struct cnic_eth_dev *ethdev = NULL;
+	struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
+
+	probe = symbol_get(bnx2x_cnic_probe);
+	if (probe) {
+		ethdev = (*probe)(dev);
+		symbol_put(bnx2x_cnic_probe);
+	}
+	if (!ethdev)
+		return NULL;
+
+	pdev = ethdev->pdev;
+	if (!pdev)
+		return NULL;
+
+	dev_hold(dev);
+	cdev = cnic_alloc_dev(dev, pdev);
+	if (cdev == NULL) {
+		dev_put(dev);
+		return NULL;
+	}
+
+	set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
+	cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
+
+	cp = cdev->cnic_priv;
+	cp->ethdev = ethdev;
+	cdev->pcidev = pdev;
+	cp->chip_id = ethdev->chip_id;
+
+	if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
+		cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
+	    !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
+		cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
+
+	memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
+
+	cp->cnic_ops = &cnic_bnx2x_ops;
+	cp->start_hw = cnic_start_bnx2x_hw;
+	cp->stop_hw = cnic_stop_bnx2x_hw;
+	cp->setup_pgtbl = cnic_setup_page_tbl_le;
+	cp->alloc_resc = cnic_alloc_bnx2x_resc;
+	cp->free_resc = cnic_free_resc;
+	cp->start_cm = cnic_cm_init_bnx2x_hw;
+	cp->stop_cm = cnic_cm_stop_bnx2x_hw;
+	cp->enable_int = cnic_enable_bnx2x_int;
+	cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
+	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
+		cp->ack_int = cnic_ack_bnx2x_e2_msix;
+	else
+		cp->ack_int = cnic_ack_bnx2x_msix;
+	cp->close_conn = cnic_close_bnx2x_conn;
+	return cdev;
+}
+
+static struct cnic_dev *is_cnic_dev(struct net_device *dev)
+{
+	struct ethtool_drvinfo drvinfo;
+	struct cnic_dev *cdev = NULL;
+
+	if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
+		memset(&drvinfo, 0, sizeof(drvinfo));
+		dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
+
+		if (!strcmp(drvinfo.driver, "bnx2"))
+			cdev = init_bnx2_cnic(dev);
+		if (!strcmp(drvinfo.driver, "bnx2x"))
+			cdev = init_bnx2x_cnic(dev);
+		if (cdev) {
+			write_lock(&cnic_dev_lock);
+			list_add(&cdev->list, &cnic_dev_list);
+			write_unlock(&cnic_dev_lock);
+		}
+	}
+	return cdev;
+}
+
+static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
+			      u16 vlan_id)
+{
+	int if_type;
+
+	rcu_read_lock();
+	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+		struct cnic_ulp_ops *ulp_ops;
+		void *ctx;
+
+		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+		if (!ulp_ops || !ulp_ops->indicate_netevent)
+			continue;
+
+		ctx = cp->ulp_handle[if_type];
+
+		ulp_ops->indicate_netevent(ctx, event, vlan_id);
+	}
+	rcu_read_unlock();
+}
+
+/**
+ * netdev event handler
+ */
+static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
+							 void *ptr)
+{
+	struct net_device *netdev = ptr;
+	struct cnic_dev *dev;
+	int new_dev = 0;
+
+	dev = cnic_from_netdev(netdev);
+
+	if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
+		/* Check for the hot-plug device */
+		dev = is_cnic_dev(netdev);
+		if (dev) {
+			new_dev = 1;
+			cnic_hold(dev);
+		}
+	}
+	if (dev) {
+		struct cnic_local *cp = dev->cnic_priv;
+
+		if (new_dev)
+			cnic_ulp_init(dev);
+		else if (event == NETDEV_UNREGISTER)
+			cnic_ulp_exit(dev);
+
+		if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
+			if (cnic_register_netdev(dev) != 0) {
+				cnic_put(dev);
+				goto done;
+			}
+			if (!cnic_start_hw(dev))
+				cnic_ulp_start(dev);
+		}
+
+		cnic_rcv_netevent(cp, event, 0);
+
+		if (event == NETDEV_GOING_DOWN) {
+			cnic_ulp_stop(dev);
+			cnic_stop_hw(dev);
+			cnic_unregister_netdev(dev);
+		} else if (event == NETDEV_UNREGISTER) {
+			write_lock(&cnic_dev_lock);
+			list_del_init(&dev->list);
+			write_unlock(&cnic_dev_lock);
+
+			cnic_put(dev);
+			cnic_free_dev(dev);
+			goto done;
+		}
+		cnic_put(dev);
+	} else {
+		struct net_device *realdev;
+		u16 vid;
+
+		vid = cnic_get_vlan(netdev, &realdev);
+		if (realdev) {
+			dev = cnic_from_netdev(realdev);
+			if (dev) {
+				vid |= VLAN_TAG_PRESENT;
+				cnic_rcv_netevent(dev->cnic_priv, event, vid);
+				cnic_put(dev);
+			}
+		}
+	}
+done:
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block cnic_netdev_notifier = {
+	.notifier_call = cnic_netdev_event
+};
+
+static void cnic_release(void)
+{
+	struct cnic_dev *dev;
+	struct cnic_uio_dev *udev;
+
+	while (!list_empty(&cnic_dev_list)) {
+		dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
+		if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+			cnic_ulp_stop(dev);
+			cnic_stop_hw(dev);
+		}
+
+		cnic_ulp_exit(dev);
+		cnic_unregister_netdev(dev);
+		list_del_init(&dev->list);
+		cnic_free_dev(dev);
+	}
+	while (!list_empty(&cnic_udev_list)) {
+		udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
+				  list);
+		cnic_free_uio(udev);
+	}
+}
+
+static int __init cnic_init(void)
+{
+	int rc = 0;
+
+	pr_info("%s", version);
+
+	rc = register_netdevice_notifier(&cnic_netdev_notifier);
+	if (rc) {
+		cnic_release();
+		return rc;
+	}
+
+	cnic_wq = create_singlethread_workqueue("cnic_wq");
+	if (!cnic_wq) {
+		cnic_release();
+		unregister_netdevice_notifier(&cnic_netdev_notifier);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void __exit cnic_exit(void)
+{
+	unregister_netdevice_notifier(&cnic_netdev_notifier);
+	cnic_release();
+	destroy_workqueue(cnic_wq);
+}
+
+module_init(cnic_init);
+module_exit(cnic_exit);
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
new file mode 100644
index 0000000..7a2928f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -0,0 +1,478 @@
+/* cnic.h: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_H
+#define CNIC_H
+
+#define HC_INDEX_ISCSI_EQ_CONS			6
+
+#define HC_INDEX_FCOE_EQ_CONS			3
+
+#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS		5
+#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS	1
+
+#define KWQ_PAGE_CNT	4
+#define KCQ_PAGE_CNT	16
+
+#define KWQ_CID 		24
+#define KCQ_CID 		25
+
+/*
+ *	krnlq_context definition
+ */
+#define L5_KRNLQ_FLAGS	0x00000000
+#define L5_KRNLQ_SIZE	0x00000000
+#define L5_KRNLQ_TYPE	0x00000000
+#define KRNLQ_FLAGS_PG_SZ					(0xf<<0)
+#define KRNLQ_FLAGS_PG_SZ_256					(0<<0)
+#define KRNLQ_FLAGS_PG_SZ_512					(1<<0)
+#define KRNLQ_FLAGS_PG_SZ_1K					(2<<0)
+#define KRNLQ_FLAGS_PG_SZ_2K					(3<<0)
+#define KRNLQ_FLAGS_PG_SZ_4K					(4<<0)
+#define KRNLQ_FLAGS_PG_SZ_8K					(5<<0)
+#define KRNLQ_FLAGS_PG_SZ_16K					(6<<0)
+#define KRNLQ_FLAGS_PG_SZ_32K					(7<<0)
+#define KRNLQ_FLAGS_PG_SZ_64K					(8<<0)
+#define KRNLQ_FLAGS_PG_SZ_128K					(9<<0)
+#define KRNLQ_FLAGS_PG_SZ_256K					(10<<0)
+#define KRNLQ_FLAGS_PG_SZ_512K					(11<<0)
+#define KRNLQ_FLAGS_PG_SZ_1M					(12<<0)
+#define KRNLQ_FLAGS_PG_SZ_2M					(13<<0)
+#define KRNLQ_FLAGS_QE_SELF_SEQ					(1<<15)
+#define KRNLQ_SIZE_TYPE_SIZE	((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
+#define KRNLQ_TYPE_TYPE						(0xf<<28)
+#define KRNLQ_TYPE_TYPE_EMPTY					(0<<28)
+#define KRNLQ_TYPE_TYPE_KRNLQ					(6<<28)
+
+#define L5_KRNLQ_HOST_QIDX		0x00000004
+#define L5_KRNLQ_HOST_FW_QIDX		0x00000008
+#define L5_KRNLQ_NX_QE_SELF_SEQ 	0x0000000c
+#define L5_KRNLQ_QE_SELF_SEQ_MAX	0x0000000c
+#define L5_KRNLQ_NX_QE_HADDR_HI 	0x00000010
+#define L5_KRNLQ_NX_QE_HADDR_LO 	0x00000014
+#define L5_KRNLQ_PGTBL_PGIDX		0x00000018
+#define L5_KRNLQ_NX_PG_QIDX 		0x00000018
+#define L5_KRNLQ_PGTBL_NPAGES		0x0000001c
+#define L5_KRNLQ_QIDX_INCR		0x0000001c
+#define L5_KRNLQ_PGTBL_HADDR_HI 	0x00000020
+#define L5_KRNLQ_PGTBL_HADDR_LO 	0x00000024
+
+#define BNX2_PG_CTX_MAP			0x1a0034
+#define BNX2_ISCSI_CTX_MAP		0x1a0074
+
+#define MAX_COMPLETED_KCQE	64
+
+#define MAX_CNIC_L5_CONTEXT	256
+
+#define MAX_CM_SK_TBL_SZ	MAX_CNIC_L5_CONTEXT
+
+#define MAX_ISCSI_TBL_SZ	256
+
+#define CNIC_LOCAL_PORT_MIN	60000
+#define CNIC_LOCAL_PORT_MAX	61024
+#define CNIC_LOCAL_PORT_RANGE	(CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
+
+#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
+#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
+#define MAX_KWQE_CNT (KWQE_CNT - 1)
+#define MAX_KCQE_CNT (KCQE_CNT - 1)
+
+#define MAX_KWQ_IDX	((KWQ_PAGE_CNT * KWQE_CNT) - 1)
+#define MAX_KCQ_IDX	((KCQ_PAGE_CNT * KCQE_CNT) - 1)
+
+#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
+
+#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
+
+#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) ==		\
+		(MAX_KCQE_CNT - 1)) ?					\
+		(x) + 2 : (x) + 1
+
+#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA(cp, x)						\
+	&(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
+
+#define DEF_IPID_START		0x8000
+
+#define DEF_KA_TIMEOUT		10000
+#define DEF_KA_INTERVAL		300000
+#define DEF_KA_MAX_PROBE_COUNT	3
+#define DEF_TOS			0
+#define DEF_TTL			0xfe
+#define DEF_SND_SEQ_SCALE	0
+#define DEF_RCV_BUF		0xffff
+#define DEF_SND_BUF		0xffff
+#define DEF_SEED		0
+#define DEF_MAX_RT_TIME		500
+#define DEF_MAX_DA_COUNT	2
+#define DEF_SWS_TIMER		1000
+#define DEF_MAX_CWND		0xffff
+
+struct cnic_ctx {
+	u32		cid;
+	void		*ctx;
+	dma_addr_t	mapping;
+};
+
+#define BNX2_MAX_CID		0x2000
+
+struct cnic_dma {
+	int		num_pages;
+	void		**pg_arr;
+	dma_addr_t	*pg_map_arr;
+	int		pgtbl_size;
+	u32		*pgtbl;
+	dma_addr_t	pgtbl_map;
+};
+
+struct cnic_id_tbl {
+	spinlock_t	lock;
+	u32		start;
+	u32		max;
+	u32		next;
+	unsigned long	*table;
+};
+
+#define CNIC_KWQ16_DATA_SIZE	128
+
+struct kwqe_16_data {
+	u8	data[CNIC_KWQ16_DATA_SIZE];
+};
+
+struct cnic_iscsi {
+	struct cnic_dma		task_array_info;
+	struct cnic_dma		r2tq_info;
+	struct cnic_dma		hq_info;
+};
+
+struct cnic_context {
+	u32			cid;
+	struct kwqe_16_data	*kwqe_data;
+	dma_addr_t		kwqe_data_mapping;
+	wait_queue_head_t	waitq;
+	int			wait_cond;
+	unsigned long		timestamp;
+	unsigned long		ctx_flags;
+#define	CTX_FL_OFFLD_START	0
+#define	CTX_FL_DELETE_WAIT	1
+#define	CTX_FL_CID_ERROR	2
+	u8			ulp_proto_id;
+	union {
+		struct cnic_iscsi	*iscsi;
+	} proto;
+};
+
+struct kcq_info {
+	struct cnic_dma	dma;
+	struct kcqe	**kcq;
+
+	u16		*hw_prod_idx_ptr;
+	u16		sw_prod_idx;
+	u16		*status_idx_ptr;
+	u32		io_addr;
+
+	u16		(*next_idx)(u16);
+	u16		(*hw_idx)(u16);
+};
+
+struct iro {
+	u32 base;
+	u16 m1;
+	u16 m2;
+	u16 m3;
+	u16 size;
+};
+
+struct cnic_uio_dev {
+	struct uio_info		cnic_uinfo;
+	u32			uio_dev;
+
+	int			l2_ring_size;
+	void			*l2_ring;
+	dma_addr_t		l2_ring_map;
+
+	int			l2_buf_size;
+	void			*l2_buf;
+	dma_addr_t		l2_buf_map;
+
+	struct cnic_dev		*dev;
+	struct pci_dev		*pdev;
+	struct list_head	list;
+};
+
+struct cnic_local {
+
+	spinlock_t cnic_ulp_lock;
+	void *ulp_handle[MAX_CNIC_ULP_TYPE];
+	unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
+#define ULP_F_INIT	0
+#define ULP_F_START	1
+#define ULP_F_CALL_PENDING	2
+	struct cnic_ulp_ops __rcu *ulp_ops[MAX_CNIC_ULP_TYPE];
+
+	unsigned long cnic_local_flags;
+#define	CNIC_LCL_FL_KWQ_INIT		0x0
+#define	CNIC_LCL_FL_L2_WAIT		0x1
+#define	CNIC_LCL_FL_RINGS_INITED	0x2
+#define	CNIC_LCL_FL_STOP_ISCSI		0x4
+
+	struct cnic_dev *dev;
+
+	struct cnic_eth_dev *ethdev;
+
+	struct cnic_uio_dev *udev;
+
+	int		l2_rx_ring_size;
+	int		l2_single_buf_size;
+
+	u16		*rx_cons_ptr;
+	u16		*tx_cons_ptr;
+	u16		rx_cons;
+	u16		tx_cons;
+
+	const struct iro	*iro_arr;
+#define IRO (((struct cnic_local *) dev->cnic_priv)->iro_arr)
+
+	struct cnic_dma		kwq_info;
+	struct kwqe		**kwq;
+
+	struct cnic_dma		kwq_16_data_info;
+
+	u16		max_kwq_idx;
+
+	u16		kwq_prod_idx;
+	u32		kwq_io_addr;
+
+	u16		*kwq_con_idx_ptr;
+	u16		kwq_con_idx;
+
+	struct kcq_info	kcq1;
+	struct kcq_info	kcq2;
+
+	union {
+		void				*gen;
+		struct status_block_msix	*bnx2;
+		struct host_hc_status_block_e1x	*bnx2x_e1x;
+		/* index values - which counter to update */
+		#define SM_RX_ID		0
+		#define SM_TX_ID		1
+	} status_blk;
+
+	struct host_sp_status_block	*bnx2x_def_status_blk;
+
+	u32				status_blk_num;
+	u32				bnx2x_igu_sb_id;
+	u32				int_num;
+	u32				last_status_idx;
+	struct tasklet_struct		cnic_irq_task;
+
+	struct kcqe		*completed_kcq[MAX_COMPLETED_KCQE];
+
+	struct cnic_sock	*csk_tbl;
+	struct cnic_id_tbl	csk_port_tbl;
+
+	struct cnic_dma		gbl_buf_info;
+
+	struct cnic_iscsi	*iscsi_tbl;
+	struct cnic_context	*ctx_tbl;
+	struct cnic_id_tbl	cid_tbl;
+	atomic_t		iscsi_conn;
+	u32			iscsi_start_cid;
+
+	u32			fcoe_init_cid;
+	u32			fcoe_start_cid;
+	struct cnic_id_tbl	fcoe_cid_tbl;
+
+	u32			max_cid_space;
+
+	/* per connection parameters */
+	int			num_iscsi_tasks;
+	int			num_ccells;
+	int			task_array_size;
+	int			r2tq_size;
+	int			hq_size;
+	int			num_cqs;
+
+	struct delayed_work	delete_task;
+
+	struct cnic_ctx		*ctx_arr;
+	int			ctx_blks;
+	int			ctx_blk_size;
+	unsigned long		ctx_align;
+	int			cids_per_blk;
+
+	u32			chip_id;
+	int			func;
+	u32			pfid;
+	u8			port_mode;
+#define CHIP_4_PORT_MODE	0
+#define CHIP_2_PORT_MODE	1
+#define CHIP_PORT_MODE_NONE	2
+
+	u32			shmem_base;
+
+	struct cnic_ops		*cnic_ops;
+	int			(*start_hw)(struct cnic_dev *);
+	void			(*stop_hw)(struct cnic_dev *);
+	void			(*setup_pgtbl)(struct cnic_dev *,
+					       struct cnic_dma *);
+	int			(*alloc_resc)(struct cnic_dev *);
+	void			(*free_resc)(struct cnic_dev *);
+	int			(*start_cm)(struct cnic_dev *);
+	void			(*stop_cm)(struct cnic_dev *);
+	void			(*enable_int)(struct cnic_dev *);
+	void			(*disable_int_sync)(struct cnic_dev *);
+	void			(*ack_int)(struct cnic_dev *);
+	void			(*close_conn)(struct cnic_sock *, u32 opcode);
+};
+
+struct bnx2x_bd_chain_next {
+	u32	addr_lo;
+	u32	addr_hi;
+	u8	reserved[8];
+};
+
+#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T 	(1)
+
+#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN		(ISCSI_KCQE_OPCODE_UPDATE_CONN)
+#define ISCSI_RAMROD_CMD_ID_INIT		(ISCSI_KCQE_OPCODE_INIT)
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+#define CDU_VALID_DATA(_cid, _region, _type)	\
+	(((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
+
+#define CDU_CRC8(_cid, _region, _type)	\
+	(calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
+
+#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type)	\
+	(0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
+
+#define BNX2X_CONTEXT_MEM_SIZE		1024
+#define BNX2X_FCOE_CID			16
+
+#define BNX2X_ISCSI_START_CID		18
+#define BNX2X_ISCSI_NUM_CONNECTIONS	128
+#define BNX2X_ISCSI_TASK_CONTEXT_SIZE	128
+#define BNX2X_ISCSI_MAX_PENDING_R2TS	4
+#define BNX2X_ISCSI_R2TQE_SIZE		8
+#define BNX2X_ISCSI_HQ_BD_SIZE		64
+#define BNX2X_ISCSI_GLB_BUF_SIZE	64
+#define BNX2X_ISCSI_PBL_NOT_CACHED	0xff
+#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED	0xff
+
+#define BNX2X_FCOE_NUM_CONNECTIONS	128
+
+#define BNX2X_FCOE_L5_CID_BASE		MAX_ISCSI_TBL_SZ
+
+#define BNX2X_CHIP_NUM_57710		0x164e
+#define BNX2X_CHIP_NUM_57711		0x164f
+#define BNX2X_CHIP_NUM_57711E		0x1650
+#define BNX2X_CHIP_NUM_57712		0x1662
+#define BNX2X_CHIP_NUM_57712E		0x1663
+#define BNX2X_CHIP_NUM_57713		0x1651
+#define BNX2X_CHIP_NUM_57713E		0x1652
+#define BNX2X_CHIP_NUM_57800		0x168a
+#define BNX2X_CHIP_NUM_57810		0x168e
+#define BNX2X_CHIP_NUM_57840		0x168d
+
+#define BNX2X_CHIP_NUM(x)		(x >> 16)
+#define BNX2X_CHIP_IS_57710(x)		\
+	(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57710)
+#define BNX2X_CHIP_IS_57711(x)		\
+	(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711)
+#define BNX2X_CHIP_IS_57711E(x)		\
+	(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E)
+#define BNX2X_CHIP_IS_E1H(x)		\
+	(BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x))
+#define BNX2X_CHIP_IS_57712(x)		\
+	(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712)
+#define BNX2X_CHIP_IS_57712E(x)		\
+	(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57712E)
+#define BNX2X_CHIP_IS_57713(x)		\
+	(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713)
+#define BNX2X_CHIP_IS_57713E(x)		\
+	(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57713E)
+#define BNX2X_CHIP_IS_57800(x)		\
+	(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57800)
+#define BNX2X_CHIP_IS_57810(x)		\
+	(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57810)
+#define BNX2X_CHIP_IS_57840(x)		\
+	(BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57840)
+#define BNX2X_CHIP_IS_E2(x)		\
+	(BNX2X_CHIP_IS_57712(x) || BNX2X_CHIP_IS_57712E(x) || \
+	 BNX2X_CHIP_IS_57713(x) || BNX2X_CHIP_IS_57713E(x))
+#define BNX2X_CHIP_IS_E3(x)			\
+	(BNX2X_CHIP_IS_57800(x) || BNX2X_CHIP_IS_57810(x) || \
+	 BNX2X_CHIP_IS_57840(x))
+#define BNX2X_CHIP_IS_E2_PLUS(x) (BNX2X_CHIP_IS_E2(x) || BNX2X_CHIP_IS_E3(x))
+
+#define IS_E1H_OFFSET       		BNX2X_CHIP_IS_E1H(cp->chip_id)
+
+#define BNX2X_RX_DESC_CNT		(BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
+#define BNX2X_MAX_RX_DESC_CNT		(BNX2X_RX_DESC_CNT - 2)
+#define BNX2X_RCQ_DESC_CNT		(BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
+#define BNX2X_MAX_RCQ_DESC_CNT		(BNX2X_RCQ_DESC_CNT - 1)
+
+#define BNX2X_NEXT_RCQE(x) (((x) & BNX2X_MAX_RCQ_DESC_CNT) ==		\
+		(BNX2X_MAX_RCQ_DESC_CNT - 1)) ?				\
+		((x) + 2) : ((x) + 1)
+
+#define BNX2X_DEF_SB_ID			HC_SP_SB_ID
+
+#define BNX2X_SHMEM_MF_BLK_OFFSET	0x7e4
+
+#define BNX2X_SHMEM_ADDR(base, field)	(base + \
+					 offsetof(struct shmem_region, field))
+
+#define BNX2X_SHMEM2_ADDR(base, field)	(base + \
+					 offsetof(struct shmem2_region, field))
+
+#define BNX2X_SHMEM2_HAS(base, field)				\
+		((base) &&					\
+		 (CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base, size)) >	\
+		  offsetof(struct shmem2_region, field)))
+
+#define BNX2X_MF_CFG_ADDR(base, field)				\
+			((base) + offsetof(struct mf_cfg, field))
+
+#ifndef ETH_MAX_RX_CLIENTS_E2
+#define ETH_MAX_RX_CLIENTS_E2 		ETH_MAX_RX_CLIENTS_E1H
+#endif
+
+#define CNIC_PORT(cp)			((cp)->pfid & 1)
+#define CNIC_FUNC(cp)			((cp)->func)
+#define CNIC_PATH(cp)			(!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? \
+					 0 : (CNIC_FUNC(cp) & 1))
+#define CNIC_E1HVN(cp)			((cp)->pfid >> 1)
+
+#define BNX2X_HW_CID(cp, x)		((CNIC_PORT(cp) << 23) | \
+					 (CNIC_E1HVN(cp) << 17) | (x))
+
+#define BNX2X_SW_CID(x)			(x & 0x1ffff)
+
+#define BNX2X_CL_QZONE_ID(cp, cli)					\
+		(BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ? cli :		\
+		 cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H))
+
+#ifndef MAX_STAT_COUNTER_ID
+#define MAX_STAT_COUNTER_ID						\
+	(BNX2X_CHIP_IS_E1H((cp)->chip_id) ? MAX_STAT_COUNTER_ID_E1H :	\
+	 ((BNX2X_CHIP_IS_E2_PLUS((cp)->chip_id)) ? MAX_STAT_COUNTER_ID_E2 :\
+	  MAX_STAT_COUNTER_ID_E1))
+#endif
+
+#endif
+
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
new file mode 100644
index 0000000..e47d210
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -0,0 +1,5484 @@
+
+/* cnic.c: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#ifndef CNIC_DEFS_H
+#define CNIC_DEFS_H
+
+/* KWQ (kernel work queue) request op codes */
+#define L2_KWQE_OPCODE_VALUE_FLUSH                  (4)
+#define L2_KWQE_OPCODE_VALUE_VM_FREE_RX_QUEUE       (8)
+
+#define L4_KWQE_OPCODE_VALUE_CONNECT1               (50)
+#define L4_KWQE_OPCODE_VALUE_CONNECT2               (51)
+#define L4_KWQE_OPCODE_VALUE_CONNECT3               (52)
+#define L4_KWQE_OPCODE_VALUE_RESET                  (53)
+#define L4_KWQE_OPCODE_VALUE_CLOSE                  (54)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET          (60)
+#define L4_KWQE_OPCODE_VALUE_INIT_ULP               (61)
+
+#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG             (1)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_PG              (9)
+#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG              (14)
+
+#define L5CM_RAMROD_CMD_ID_BASE			(0x80)
+#define L5CM_RAMROD_CMD_ID_TCP_CONNECT		(L5CM_RAMROD_CMD_ID_BASE + 3)
+#define L5CM_RAMROD_CMD_ID_CLOSE		(L5CM_RAMROD_CMD_ID_BASE + 12)
+#define L5CM_RAMROD_CMD_ID_ABORT		(L5CM_RAMROD_CMD_ID_BASE + 13)
+#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE	(L5CM_RAMROD_CMD_ID_BASE + 14)
+#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD	(L5CM_RAMROD_CMD_ID_BASE + 15)
+
+#define FCOE_KCQE_OPCODE_INIT_FUNC			(0x10)
+#define FCOE_KCQE_OPCODE_DESTROY_FUNC			(0x11)
+#define FCOE_KCQE_OPCODE_STAT_FUNC			(0x12)
+#define FCOE_KCQE_OPCODE_OFFLOAD_CONN			(0x15)
+#define FCOE_KCQE_OPCODE_ENABLE_CONN			(0x16)
+#define FCOE_KCQE_OPCODE_DISABLE_CONN			(0x17)
+#define FCOE_KCQE_OPCODE_DESTROY_CONN			(0x18)
+#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION  (0x20)
+#define FCOE_KCQE_OPCODE_FCOE_ERROR				(0x21)
+
+#define FCOE_RAMROD_CMD_ID_INIT_FUNC		(FCOE_KCQE_OPCODE_INIT_FUNC)
+#define FCOE_RAMROD_CMD_ID_DESTROY_FUNC		(FCOE_KCQE_OPCODE_DESTROY_FUNC)
+#define FCOE_RAMROD_CMD_ID_STAT_FUNC		(FCOE_KCQE_OPCODE_STAT_FUNC)
+#define FCOE_RAMROD_CMD_ID_OFFLOAD_CONN		(FCOE_KCQE_OPCODE_OFFLOAD_CONN)
+#define FCOE_RAMROD_CMD_ID_ENABLE_CONN		(FCOE_KCQE_OPCODE_ENABLE_CONN)
+#define FCOE_RAMROD_CMD_ID_DISABLE_CONN		(FCOE_KCQE_OPCODE_DISABLE_CONN)
+#define FCOE_RAMROD_CMD_ID_DESTROY_CONN		(FCOE_KCQE_OPCODE_DESTROY_CONN)
+#define FCOE_RAMROD_CMD_ID_TERMINATE_CONN	(0x81)
+
+#define FCOE_KWQE_OPCODE_INIT1                  (0)
+#define FCOE_KWQE_OPCODE_INIT2                  (1)
+#define FCOE_KWQE_OPCODE_INIT3                  (2)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1  (3)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2  (4)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3  (5)
+#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4  (6)
+#define FCOE_KWQE_OPCODE_ENABLE_CONN	(7)
+#define FCOE_KWQE_OPCODE_DISABLE_CONN	(8)
+#define FCOE_KWQE_OPCODE_DESTROY_CONN	(9)
+#define FCOE_KWQE_OPCODE_DESTROY		(10)
+#define FCOE_KWQE_OPCODE_STAT			(11)
+
+#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE	(0x3)
+
+/* KCQ (kernel completion queue) response op codes */
+#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP             (53)
+#define L4_KCQE_OPCODE_VALUE_RESET_COMP             (54)
+#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE          (55)
+#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE       (56)
+#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED         (57)
+#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED         (58)
+#define L4_KCQE_OPCODE_VALUE_INIT_ULP               (61)
+
+#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG             (1)
+#define L4_KCQE_OPCODE_VALUE_UPDATE_PG              (9)
+#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG              (14)
+
+/* KCQ (kernel completion queue) completion status */
+#define L4_KCQE_COMPLETION_STATUS_SUCCESS           (0)
+#define L4_KCQE_COMPLETION_STATUS_TIMEOUT           (0x93)
+
+#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL    (0x83)
+#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG      (0x89)
+
+#define L4_KCQE_OPCODE_VALUE_OOO_EVENT_NOTIFICATION (0xa0)
+#define L4_KCQE_OPCODE_VALUE_OOO_FLUSH              (0xa1)
+
+#define L4_LAYER_CODE (4)
+#define L2_LAYER_CODE (2)
+
+/*
+ * L4 KCQ CQE
+ */
+struct l4_kcq {
+	u32 cid;
+	u32 pg_cid;
+	u32 conn_id;
+	u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+	u16 status;
+	u16 reserved1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved1;
+	u16 status;
+#endif
+	u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KCQ_RESERVED3 (0x7<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+	u8 op_code;
+	u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define L4_KCQ_RESERVED3 (0xF<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * L4 KCQ CQE PG upload
+ */
+struct l4_kcq_upload_pg {
+	u32 pg_cid;
+#if defined(__BIG_ENDIAN)
+	u16 pg_status;
+	u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pg_ipid_count;
+	u16 pg_status;
+#endif
+	u32 reserved1[5];
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+	u8 op_code;
+	u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * Gracefully close the connection request
+ */
+struct l4_kwq_close_req {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 reserved2[6];
+};
+
+
+/*
+ * The first request to be passed in order to establish connection in option2
+ */
+struct l4_kwq_connect_req1 {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u8 reserved0;
+	u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+	u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+	u8 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 pg_cid;
+	u32 src_ip;
+	u32 dst_ip;
+#if defined(__BIG_ENDIAN)
+	u16 dst_port;
+	u16 src_port;
+#elif defined(__LITTLE_ENDIAN)
+	u16 src_port;
+	u16 dst_port;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 rsrv1[3];
+	u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+	u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+	u8 rsrv1[3];
+#endif
+	u32 rsrv2;
+};
+
+
+/*
+ * The second ( optional )request to be passed in order to establish
+ * connection in option2 - for IPv6 only
+ */
+struct l4_kwq_connect_req2 {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u8 reserved0;
+	u8 rsrv;
+#elif defined(__LITTLE_ENDIAN)
+	u8 rsrv;
+	u8 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 reserved2;
+	u32 src_ip_v6_2;
+	u32 src_ip_v6_3;
+	u32 src_ip_v6_4;
+	u32 dst_ip_v6_2;
+	u32 dst_ip_v6_3;
+	u32 dst_ip_v6_4;
+};
+
+
+/*
+ * The third ( and last )request to be passed in order to establish
+ * connection in option2
+ */
+struct l4_kwq_connect_req3 {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 ka_timeout;
+	u32 ka_interval ;
+#if defined(__BIG_ENDIAN)
+	u8 snd_seq_scale;
+	u8 ttl;
+	u8 tos;
+	u8 ka_max_probe_count;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ka_max_probe_count;
+	u8 tos;
+	u8 ttl;
+	u8 snd_seq_scale;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 pmtu;
+	u16 mss;
+#elif defined(__LITTLE_ENDIAN)
+	u16 mss;
+	u16 pmtu;
+#endif
+	u32 rcv_buf;
+	u32 snd_buf;
+	u32 seed;
+};
+
+
+/*
+ * a KWQE request to offload a PG connection
+ */
+struct l4_kwq_offload_pg {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 l2hdr_nbytes;
+	u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+	u8 da0;
+	u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da1;
+	u8 da0;
+	u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+	u8 l2hdr_nbytes;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 da2;
+	u8 da3;
+	u8 da4;
+	u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da5;
+	u8 da4;
+	u8 da3;
+	u8 da2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 sa0;
+	u8 sa1;
+	u8 sa2;
+	u8 sa3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 sa3;
+	u8 sa2;
+	u8 sa1;
+	u8 sa0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 sa4;
+	u8 sa5;
+	u16 etype;
+#elif defined(__LITTLE_ENDIAN)
+	u16 etype;
+	u8 sa5;
+	u8 sa4;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 vlan_tag;
+	u16 ipid_start;
+#elif defined(__LITTLE_ENDIAN)
+	u16 ipid_start;
+	u16 vlan_tag;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 ipid_count;
+	u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved3;
+	u16 ipid_count;
+#endif
+	u32 host_opaque;
+};
+
+
+/*
+ * Abortively close the connection request
+ */
+struct l4_kwq_reset_req {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+	u8 op_code;
+	u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved0;
+	u8 op_code;
+	u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 reserved2[6];
+};
+
+
+/*
+ * a KWQE request to update a PG connection
+ */
+struct l4_kwq_update_pg {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+	u8 opcode;
+	u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 oper16;
+	u8 opcode;
+	u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 pg_cid;
+	u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+	u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+	u8 pg_unused_a;
+	u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pg_ipid_count;
+	u8 pg_unused_a;
+	u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserverd3;
+	u8 da0;
+	u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da1;
+	u8 da0;
+	u16 reserverd3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 da2;
+	u8 da3;
+	u8 da4;
+	u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 da5;
+	u8 da4;
+	u8 da3;
+	u8 da2;
+#endif
+	u32 reserved4;
+	u32 reserved5;
+};
+
+
+/*
+ * a KWQE request to upload a PG or L4 context
+ */
+struct l4_kwq_upload {
+#if defined(__BIG_ENDIAN)
+	u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+	u8 opcode;
+	u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+	u16 oper16;
+	u8 opcode;
+	u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+#endif
+	u32 cid;
+	u32 reserved2[6];
+};
+
+/*
+ * bnx2x structures
+ */
+
+/*
+ * The iscsi aggregative context of Cstorm
+ */
+struct cstorm_iscsi_ag_context {
+	u32 agg_vars1;
+#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0)
+#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11)
+#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<14)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 14
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
+#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
+#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<19)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 19
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN (0x1<<20)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX2_CF_EN_SHIFT 20
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<21)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 21
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<22)
+#define __CSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 22
+#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
+#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
+#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
+#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30)
+#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30
+#if defined(__BIG_ENDIAN)
+	u8 __aux1_th;
+	u8 __aux1_val;
+	u16 __agg_vars2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_vars2;
+	u8 __aux1_val;
+	u8 __aux1_th;
+#endif
+	u32 rel_seq;
+	u32 rel_seq_th;
+#if defined(__BIG_ENDIAN)
+	u16 hq_cons;
+	u16 hq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hq_prod;
+	u16 hq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __reserved62;
+	u8 __reserved61;
+	u8 __reserved60;
+	u8 __reserved59;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __reserved59;
+	u8 __reserved60;
+	u8 __reserved61;
+	u8 __reserved62;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __reserved64;
+	u16 cq_u_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cq_u_prod;
+	u16 __reserved64;
+#endif
+	u32 __cq_u_prod1;
+#if defined(__BIG_ENDIAN)
+	u16 __agg_vars3;
+	u16 cq_u_pend;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cq_u_pend;
+	u16 __agg_vars3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __aux2_th;
+	u16 aux2_val;
+#elif defined(__LITTLE_ENDIAN)
+	u16 aux2_val;
+	u16 __aux2_th;
+#endif
+};
+
+/*
+ * The fcoe extra aggregative context section of Tstorm
+ */
+struct tstorm_fcoe_extra_ag_context_section {
+	u32 __agg_val1;
+#if defined(__BIG_ENDIAN)
+	u8 __tcp_agg_vars2;
+	u8 __agg_val3;
+	u16 __agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val2;
+	u8 __agg_val3;
+	u8 __tcp_agg_vars2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val5;
+	u8 __agg_val6;
+	u8 __tcp_agg_vars3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __tcp_agg_vars3;
+	u8 __agg_val6;
+	u16 __agg_val5;
+#endif
+	u32 __lcq_prod;
+	u32 rtt_seq;
+	u32 rtt_time;
+	u32 __reserved66;
+	u32 wnd_right_edge;
+	u32 tcp_agg_vars1;
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN (0x1<<9)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN_SHIFT 9
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
+#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
+#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
+	u32 snd_max;
+	u32 __lcq_cons;
+	u32 __reserved2;
+};
+
+/*
+ * The fcoe aggregative context of Tstorm
+ */
+struct tstorm_fcoe_ag_context {
+#if defined(__BIG_ENDIAN)
+	u16 ulp_credit;
+	u8 agg_vars1;
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
+	u16 ulp_credit;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val4;
+	u16 agg_vars2;
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_vars2;
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
+#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
+#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
+#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+	u16 __agg_val4;
+#endif
+	struct tstorm_fcoe_extra_ag_context_section __extra_section;
+};
+
+
+
+/*
+ * The tcp aggregative context section of Tstorm
+ */
+struct tstorm_tcp_tcp_ag_context_section {
+	u32 __agg_val1;
+#if defined(__BIG_ENDIAN)
+	u8 __tcp_agg_vars2;
+	u8 __agg_val3;
+	u16 __agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val2;
+	u8 __agg_val3;
+	u8 __tcp_agg_vars2;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val5;
+	u8 __agg_val6;
+	u8 __tcp_agg_vars3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __tcp_agg_vars3;
+	u8 __agg_val6;
+	u16 __agg_val5;
+#endif
+	u32 snd_nxt;
+	u32 rtt_seq;
+	u32 rtt_time;
+	u32 __reserved66;
+	u32 wnd_right_edge;
+	u32 tcp_agg_vars1;
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
+#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
+#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
+	u32 snd_max;
+	u32 snd_una;
+	u32 __reserved2;
+};
+
+/*
+ * The iscsi aggregative context of Tstorm
+ */
+struct tstorm_iscsi_ag_context {
+#if defined(__BIG_ENDIAN)
+	u16 ulp_credit;
+	u8 agg_vars1;
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG (0x1<<7)
+#define __TSTORM_ISCSI_AG_CONTEXT_ACK_ON_FIN_SENT_FLAG_SHIFT 7
+	u16 ulp_credit;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val4;
+	u16 agg_vars2;
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_vars2;
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG (0x1<<0)
+#define __TSTORM_ISCSI_AG_CONTEXT_MSL_TIMER_SET_FLAG_SHIFT 0
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG (0x1<<1)
+#define __TSTORM_ISCSI_AG_CONTEXT_FIN_SENT_FIRST_FLAG_SHIFT 1
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF (0x3<<2)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_SHIFT 2
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF (0x3<<4)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_SHIFT 4
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
+#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<11)
+#define __TSTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 11
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN (0x1<<12)
+#define __TSTORM_ISCSI_AG_CONTEXT_RST_SENT_CF_EN_SHIFT 12
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN (0x1<<13)
+#define __TSTORM_ISCSI_AG_CONTEXT_WAKEUP_CALL_CF_EN_SHIFT 13
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
+#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
+	u16 __agg_val4;
+#endif
+	struct tstorm_tcp_tcp_ag_context_section tcp;
+};
+
+
+
+/*
+ * The fcoe aggregative context of Ustorm
+ */
+struct ustorm_fcoe_ag_context {
+#if defined(__BIG_ENDIAN)
+	u8 __aux_counter_flags;
+	u8 agg_vars2;
+#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+	u8 agg_vars1;
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+	u8 agg_vars2;
+#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+	u8 __aux_counter_flags;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cdu_usage;
+	u8 agg_misc2;
+	u16 pbf_tx_seq_ack;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pbf_tx_seq_ack;
+	u8 agg_misc2;
+	u8 cdu_usage;
+#endif
+	u32 agg_misc4;
+#if defined(__BIG_ENDIAN)
+	u8 agg_val3_th;
+	u8 agg_val3;
+	u16 agg_misc3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_misc3;
+	u8 agg_val3;
+	u8 agg_val3_th;
+#endif
+	u32 expired_task_id;
+	u32 agg_misc4_th;
+#if defined(__BIG_ENDIAN)
+	u16 cq_prod;
+	u16 cq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cq_cons;
+	u16 cq_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __reserved2;
+	u8 decision_rules;
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
+	u8 decision_rule_enable_bits;
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 decision_rule_enable_bits;
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
+#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
+#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+	u8 decision_rules;
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
+#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
+#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
+	u16 __reserved2;
+#endif
+};
+
+
+/*
+ * The iscsi aggregative context of Ustorm
+ */
+struct ustorm_iscsi_ag_context {
+#if defined(__BIG_ENDIAN)
+	u8 __aux_counter_flags;
+	u8 agg_vars2;
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+	u8 agg_vars1;
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
+	u8 agg_vars2;
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
+	u8 __aux_counter_flags;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cdu_usage;
+	u8 agg_misc2;
+	u16 __cq_local_comp_itt_val;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __cq_local_comp_itt_val;
+	u8 agg_misc2;
+	u8 cdu_usage;
+#endif
+	u32 agg_misc4;
+#if defined(__BIG_ENDIAN)
+	u8 agg_val3_th;
+	u8 agg_val3;
+	u16 agg_misc3;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_misc3;
+	u8 agg_val3;
+	u8 agg_val3_th;
+#endif
+	u32 agg_val1;
+	u32 agg_misc4_th;
+#if defined(__BIG_ENDIAN)
+	u16 agg_val2_th;
+	u16 agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_val2;
+	u16 agg_val2_th;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __reserved2;
+	u8 decision_rules;
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
+	u8 decision_rule_enable_bits;
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 decision_rule_enable_bits;
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
+#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
+#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
+#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
+#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
+#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
+#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+	u8 decision_rules;
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
+#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
+#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
+#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
+	u16 __reserved2;
+#endif
+};
+
+
+/*
+ * The fcoe aggregative context section of Xstorm
+ */
+struct xstorm_fcoe_extra_ag_context_section {
+#if defined(__BIG_ENDIAN)
+	u8 tcp_agg_vars1;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7
+	u8 __reserved_da_cnt;
+	u16 __mtu;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __mtu;
+	u8 __reserved_da_cnt;
+	u8 tcp_agg_vars1;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7
+#endif
+	u32 snd_nxt;
+	u32 tx_wnd;
+	u32 __reserved55;
+	u32 local_adv_wnd;
+#if defined(__BIG_ENDIAN)
+	u8 __agg_val8_th;
+	u8 __tx_dest;
+	u16 tcp_agg_vars2;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 tcp_agg_vars2;
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+	u8 __tx_dest;
+	u8 __agg_val8_th;
+#endif
+	u32 __sq_base_addr_lo;
+	u32 __sq_base_addr_hi;
+	u32 __xfrq_base_addr_lo;
+	u32 __xfrq_base_addr_hi;
+#if defined(__BIG_ENDIAN)
+	u16 __xfrq_cons;
+	u16 __xfrq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __xfrq_prod;
+	u16 __xfrq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __tcp_agg_vars5;
+	u8 __tcp_agg_vars4;
+	u8 __tcp_agg_vars3;
+	u8 __reserved_force_pure_ack_cnt;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __reserved_force_pure_ack_cnt;
+	u8 __tcp_agg_vars3;
+	u8 __tcp_agg_vars4;
+	u8 __tcp_agg_vars5;
+#endif
+	u32 __tcp_agg_vars6;
+#if defined(__BIG_ENDIAN)
+	u16 __agg_misc6;
+	u16 __tcp_agg_vars7;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __tcp_agg_vars7;
+	u16 __agg_misc6;
+#endif
+	u32 __agg_val10;
+	u32 __agg_val10_th;
+#if defined(__BIG_ENDIAN)
+	u16 __reserved3;
+	u8 __reserved2;
+	u8 __da_only_cnt;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __da_only_cnt;
+	u8 __reserved2;
+	u16 __reserved3;
+#endif
+};
+
+/*
+ * The fcoe aggregative context of Xstorm
+ */
+struct xstorm_fcoe_ag_context {
+#if defined(__BIG_ENDIAN)
+	u16 agg_val1;
+	u8 agg_vars1;
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7
+	u8 __state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __state;
+	u8 agg_vars1;
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7
+	u16 agg_val1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cdu_reserved;
+	u8 __agg_vars4;
+	u8 agg_vars3;
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6
+	u8 agg_vars2;
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_vars2;
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+	u8 agg_vars3;
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6
+	u8 __agg_vars4;
+	u8 cdu_reserved;
+#endif
+	u32 more_to_send;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars5;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14
+	u16 sq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_cons;
+	u16 agg_vars5;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14
+#endif
+	struct xstorm_fcoe_extra_ag_context_section __extra_section;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars7;
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15
+	u8 agg_val3_th;
+	u8 agg_vars6;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_vars6;
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6)
+#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6
+	u8 agg_val3_th;
+	u16 agg_vars7;
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4)
+#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10)
+#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15)
+#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val11_th;
+	u16 __agg_val11;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val11;
+	u16 __agg_val11_th;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __reserved1;
+	u8 __agg_val6_th;
+	u16 __agg_val9;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val9;
+	u8 __agg_val6_th;
+	u8 __reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 confq_cons;
+	u16 confq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 confq_prod;
+	u16 confq_cons;
+#endif
+	u32 agg_vars8;
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC2_SHIFT 0
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
+#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 agg_misc0;
+	u16 sq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_prod;
+	u16 agg_misc0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 agg_val3;
+	u8 agg_val6;
+	u8 agg_val5_th;
+	u8 agg_val5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_val5;
+	u8 agg_val5_th;
+	u8 agg_val6;
+	u8 agg_val3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_misc1;
+	u16 agg_limit1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_limit1;
+	u16 __agg_misc1;
+#endif
+	u32 completion_seq;
+	u32 confq_pbl_base_lo;
+	u32 confq_pbl_base_hi;
+};
+
+
+
+/*
+ * The tcp aggregative context section of Xstorm
+ */
+struct xstorm_tcp_tcp_ag_context_section {
+#if defined(__BIG_ENDIAN)
+	u8 tcp_agg_vars1;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7
+	u8 __da_cnt;
+	u16 mss;
+#elif defined(__LITTLE_ENDIAN)
+	u16 mss;
+	u8 __da_cnt;
+	u8 tcp_agg_vars1;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF (0x3<<0)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_DA_TIMER_CF_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF (0x3<<4)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_SHIFT 4
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CLEAR_DA_TIMER_EN_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG (0x1<<7)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_EXPIRATION_FLAG_SHIFT 7
+#endif
+	u32 snd_nxt;
+	u32 tx_wnd;
+	u32 snd_una;
+	u32 local_adv_wnd;
+#if defined(__BIG_ENDIAN)
+	u8 __agg_val8_th;
+	u8 __tx_dest;
+	u16 tcp_agg_vars2;
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 tcp_agg_vars2;
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN (0x1<<7)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SIDEBAND_SENT_CF_EN_SHIFT 7
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_SHIFT 14
+	u8 __tx_dest;
+	u8 __agg_val8_th;
+#endif
+	u32 ack_to_far_end;
+	u32 rto_timer;
+	u32 ka_timer;
+	u32 ts_to_echo;
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val7_th;
+	u16 __agg_val7;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val7;
+	u16 __agg_val7_th;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __tcp_agg_vars5;
+	u8 __tcp_agg_vars4;
+	u8 __tcp_agg_vars3;
+	u8 __force_pure_ack_cnt;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __force_pure_ack_cnt;
+	u8 __tcp_agg_vars3;
+	u8 __tcp_agg_vars4;
+	u8 __tcp_agg_vars5;
+#endif
+	u32 tcp_agg_vars6;
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN (0x1<<0)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN_SHIFT 0
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN (0x1<<1)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_DEST_UPDATED_CF_EN_SHIFT 1
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN (0x1<<2)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN_SHIFT 2
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<3)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 3
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG (0x1<<4)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG_SHIFT 4
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG (0x1<<5)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG_SHIFT 5
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF (0x3<<6)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF_SHIFT 6
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF (0x3<<8)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_SHIFT 8
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF (0x3<<10)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_SHIFT 10
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF (0x3<<12)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_SHIFT 12
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF (0x3<<14)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_SHIFT 14
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF (0x3<<16)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF_SHIFT 16
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF (0x3<<18)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF_SHIFT 18
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF (0x3<<20)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF_SHIFT 20
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF (0x3<<22)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF_SHIFT 22
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF (0x3<<24)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF_SHIFT 24
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG (0x1<<26)
+#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG_SHIFT 26
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71 (0x1<<27)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71_SHIFT 27
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY (0x1<<28)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY_SHIFT 28
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG (0x1<<29)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG_SHIFT 29
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG (0x1<<30)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG_SHIFT 30
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG (0x1<<31)
+#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG_SHIFT 31
+#if defined(__BIG_ENDIAN)
+	u16 __agg_misc6;
+	u16 __tcp_agg_vars7;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __tcp_agg_vars7;
+	u16 __agg_misc6;
+#endif
+	u32 __agg_val10;
+	u32 __agg_val10_th;
+#if defined(__BIG_ENDIAN)
+	u16 __reserved3;
+	u8 __reserved2;
+	u8 __da_only_cnt;
+#elif defined(__LITTLE_ENDIAN)
+	u8 __da_only_cnt;
+	u8 __reserved2;
+	u16 __reserved3;
+#endif
+};
+
+/*
+ * The iscsi aggregative context of Xstorm
+ */
+struct xstorm_iscsi_ag_context {
+#if defined(__BIG_ENDIAN)
+	u16 agg_val1;
+	u8 agg_vars1;
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+	u16 agg_val1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cdu_reserved;
+	u8 __agg_vars4;
+	u8 agg_vars3;
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+	u8 agg_vars2;
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_vars2;
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
+#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
+	u8 agg_vars3;
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+	u8 __agg_vars4;
+	u8 cdu_reserved;
+#endif
+	u32 more_to_send;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars5;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+	u16 sq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_cons;
+	u16 agg_vars5;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+#endif
+	struct xstorm_tcp_tcp_ag_context_section tcp;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars7;
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+	u8 agg_val3_th;
+	u8 agg_vars6;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_vars6;
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+	u8 agg_val3_th;
+	u16 agg_vars7;
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_ISCSI_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_ISCSI_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val11_th;
+	u16 __gen_data;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __gen_data;
+	u16 __agg_val11_th;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __reserved1;
+	u8 __agg_val6_th;
+	u16 __agg_val9;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val9;
+	u8 __agg_val6_th;
+	u8 __reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 hq_prod;
+	u16 hq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hq_cons;
+	u16 hq_prod;
+#endif
+	u32 agg_vars8;
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2_SHIFT 0
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
+#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 r2tq_prod;
+	u16 sq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sq_prod;
+	u16 r2tq_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 agg_val3;
+	u8 agg_val6;
+	u8 agg_val5_th;
+	u8 agg_val5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_val5;
+	u8 agg_val5_th;
+	u8 agg_val6;
+	u8 agg_val3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_misc1;
+	u16 agg_limit1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_limit1;
+	u16 __agg_misc1;
+#endif
+	u32 hq_cons_tcp_seq;
+	u32 exp_stat_sn;
+	u32 rst_seq_num;
+};
+
+
+/*
+ * The L5cm aggregative context of XStorm
+ */
+struct xstorm_l5cm_ag_context {
+#if defined(__BIG_ENDIAN)
+	u16 agg_val1;
+	u8 agg_vars1;
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+	u8 state;
+#elif defined(__LITTLE_ENDIAN)
+	u8 state;
+	u8 agg_vars1;
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
+#define XSTORM_L5CM_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN (0x1<<5)
+#define XSTORM_L5CM_AG_CONTEXT_NAGLE_EN_SHIFT 5
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
+#define __XSTORM_L5CM_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
+	u16 agg_val1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 cdu_reserved;
+	u8 __agg_vars4;
+	u8 agg_vars3;
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+	u8 agg_vars2;
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_vars2;
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
+#define __XSTORM_L5CM_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX8_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG (0x1<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX9_FLAG_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE1_SHIFT 5
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN (0x1<<7)
+#define XSTORM_L5CM_AG_CONTEXT_AUX4_CF_EN_SHIFT 7
+	u8 agg_vars3;
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF (0x3<<6)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_TS_EN_CF_SHIFT 6
+	u8 __agg_vars4;
+	u8 cdu_reserved;
+#endif
+	u32 more_to_send;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars5;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+	u16 agg_val4_th;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_val4_th;
+	u16 agg_vars5;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE5_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
+#define XSTORM_L5CM_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE2_SHIFT 14
+#endif
+	struct xstorm_tcp_tcp_ag_context_section tcp;
+#if defined(__BIG_ENDIAN)
+	u16 agg_vars7;
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+	u8 agg_val3_th;
+	u8 agg_vars6;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_vars6;
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE6_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE7_SHIFT 3
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE4_SHIFT 6
+	u8 agg_val3_th;
+	u16 agg_vars7;
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
+#define __XSTORM_L5CM_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG (0x1<<3)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX13_FLAG_SHIFT 3
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF (0x3<<4)
+#define __XSTORM_L5CM_AG_CONTEXT_STORMS_SYNC_CF_SHIFT 4
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
+#define XSTORM_L5CM_AG_CONTEXT_DECISION_RULE3_SHIFT 6
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF (0x3<<8)
+#define XSTORM_L5CM_AG_CONTEXT_AUX1_CF_SHIFT 8
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
+#define __XSTORM_L5CM_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG (0x1<<12)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX10_FLAG_SHIFT 12
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG (0x1<<13)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX11_FLAG_SHIFT 13
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG (0x1<<14)
+#define __XSTORM_L5CM_AG_CONTEXT_AUX12_FLAG_SHIFT 14
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN (0x1<<15)
+#define __XSTORM_L5CM_AG_CONTEXT_RX_WND_SCL_EN_SHIFT 15
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_val11_th;
+	u16 __gen_data;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __gen_data;
+	u16 __agg_val11_th;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __reserved1;
+	u8 __agg_val6_th;
+	u16 __agg_val9;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __agg_val9;
+	u8 __agg_val6_th;
+	u8 __reserved1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 agg_val2_th;
+	u16 agg_val2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_val2;
+	u16 agg_val2_th;
+#endif
+	u32 agg_vars8;
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC2_SHIFT 0
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
+#define XSTORM_L5CM_AG_CONTEXT_AGG_MISC3_SHIFT 24
+#if defined(__BIG_ENDIAN)
+	u16 agg_misc0;
+	u16 agg_val4;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_val4;
+	u16 agg_misc0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 agg_val3;
+	u8 agg_val6;
+	u8 agg_val5_th;
+	u8 agg_val5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 agg_val5;
+	u8 agg_val5_th;
+	u8 agg_val6;
+	u8 agg_val3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 __agg_misc1;
+	u16 agg_limit1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 agg_limit1;
+	u16 __agg_misc1;
+#endif
+	u32 completion_seq;
+	u32 agg_misc4;
+	u32 rst_seq_num;
+};
+
+/*
+ * ABTS info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_abts_info {
+	__le16 aborted_task_id;
+	__le16 reserved0;
+	__le32 reserved1;
+};
+
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_abts_rsp_union {
+	u8 r_ctl;
+	u8 rsrv[3];
+	__le32 abts_rsp_payload[7];
+};
+
+
+/*
+ * 4 regs size $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_bd_ctx {
+	__le32 buf_addr_hi;
+	__le32 buf_addr_lo;
+	__le16 buf_len;
+	__le16 rsrv0;
+	__le16 flags;
+	__le16 rsrv1;
+};
+
+
+/*
+ * FCoE cached sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cached_sge_ctx {
+	struct regpair cur_buf_addr;
+	__le16 cur_buf_rem;
+	__le16 second_buf_rem;
+	struct regpair second_buf_addr;
+};
+
+
+/*
+ * Cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_cleanup_info {
+	__le16 cleaned_task_id;
+	__le16 rolled_tx_seq_cnt;
+	__le32 rolled_tx_data_offset;
+};
+
+
+/*
+ * Fcp RSP flags $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_flags {
+	u8 flags;
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
+#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
+#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
+#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
+#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
+#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
+};
+
+/*
+ * Fcp RSP payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_payload {
+	struct regpair reserved0;
+	__le32 fcp_resid;
+	u8 scsi_status_code;
+	struct fcoe_fcp_rsp_flags fcp_flags;
+	__le16 retry_delay_timer;
+	__le32 fcp_rsp_len;
+	__le32 fcp_sns_len;
+};
+
+/*
+ * Fixed size structure in order to plant it in Union structure
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_rsp_union {
+	struct fcoe_fcp_rsp_payload payload;
+	struct regpair reserved0;
+};
+
+/*
+ * FC header $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_hdr {
+	u8 s_id[3];
+	u8 cs_ctl;
+	u8 d_id[3];
+	u8 r_ctl;
+	__le16 seq_cnt;
+	u8 df_ctl;
+	u8 seq_id;
+	u8 f_ctl[3];
+	u8 type;
+	__le32 parameters;
+	__le16 rx_id;
+	__le16 ox_id;
+};
+
+/*
+ * FC header union $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mp_rsp_union {
+	struct fcoe_fc_hdr fc_hdr;
+	__le32 mp_payload_len;
+	__le32 rsrv;
+};
+
+/*
+ * Completion information $$KEEP_ENDIANNESS$$
+ */
+union fcoe_comp_flow_info {
+	struct fcoe_fcp_rsp_union fcp_rsp;
+	struct fcoe_abts_rsp_union abts_rsp;
+	struct fcoe_mp_rsp_union mp_rsp;
+	__le32 opaque[8];
+};
+
+
+/*
+ * External ABTS info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_abts_info {
+	__le32 rsrv0[6];
+	struct fcoe_abts_info ctx;
+};
+
+
+/*
+ * External cleanup info $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_cleanup_info {
+	__le32 rsrv0[6];
+	struct fcoe_cleanup_info ctx;
+};
+
+
+/*
+ * Fcoe FW Tx sequence context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fw_tx_seq_ctx {
+	__le32 data_offset;
+	__le16 seq_cnt;
+	__le16 rsrv0;
+};
+
+/*
+ * Fcoe external FW Tx sequence context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_fw_tx_seq_ctx {
+	__le32 rsrv0[6];
+	struct fcoe_fw_tx_seq_ctx ctx;
+};
+
+
+/*
+ * FCoE multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_mul_sges_ctx {
+	struct regpair cur_sge_addr;
+	__le16 cur_sge_off;
+	u8 cur_sge_idx;
+	u8 sgl_size;
+};
+
+/*
+ * FCoE external multiple sges context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_ext_mul_sges_ctx {
+	struct fcoe_mul_sges_ctx mul_sgl;
+	struct regpair rsrv0;
+};
+
+
+/*
+ * FCP CMD payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_cmd_payload {
+	__le32 opaque[8];
+};
+
+
+
+
+
+/*
+ * Fcp xfr rdy payload $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fcp_xfr_rdy_payload {
+	__le32 burst_len;
+	__le32 data_ro;
+};
+
+
+/*
+ * FC frame $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_fc_frame {
+	struct fcoe_fc_hdr fc_hdr;
+	__le32 reserved0[2];
+};
+
+
+
+
+/*
+ * FCoE KCQ CQE parameters $$KEEP_ENDIANNESS$$
+ */
+union fcoe_kcqe_params {
+	__le32 reserved0[4];
+};
+
+/*
+ * FCoE KCQ CQE $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kcqe {
+	__le32 fcoe_conn_id;
+	__le32 completion_status;
+	__le32 fcoe_conn_context_id;
+	union fcoe_kcqe_params params;
+	__le16 qe_self_seq;
+	u8 op_code;
+	u8 flags;
+#define FCOE_KCQE_RESERVED0 (0x7<<0)
+#define FCOE_KCQE_RESERVED0_SHIFT 0
+#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
+#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
+#define FCOE_KCQE_LAYER_CODE (0x7<<4)
+#define FCOE_KCQE_LAYER_CODE_SHIFT 4
+#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
+#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
+};
+
+
+
+/*
+ * FCoE KWQE header $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_header {
+	u8 op_code;
+	u8 flags;
+#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
+#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
+};
+
+/*
+ * FCoE firmware init request 1 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init1 {
+	__le16 num_tasks;
+	struct fcoe_kwqe_header hdr;
+	__le32 task_list_pbl_addr_lo;
+	__le32 task_list_pbl_addr_hi;
+	__le32 dummy_buffer_addr_lo;
+	__le32 dummy_buffer_addr_hi;
+	__le16 sq_num_wqes;
+	__le16 rq_num_wqes;
+	__le16 rq_buffer_log_size;
+	__le16 cq_num_wqes;
+	__le16 mtu;
+	u8 num_sessions_log;
+	u8 flags;
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
+#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
+#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
+#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
+#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
+};
+
+/*
+ * FCoE firmware init request 2 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init2 {
+	u8 hsi_major_version;
+	u8 hsi_minor_version;
+	struct fcoe_kwqe_header hdr;
+	__le32 hash_tbl_pbl_addr_lo;
+	__le32 hash_tbl_pbl_addr_hi;
+	__le32 t2_hash_tbl_addr_lo;
+	__le32 t2_hash_tbl_addr_hi;
+	__le32 t2_ptr_hash_tbl_addr_lo;
+	__le32 t2_ptr_hash_tbl_addr_hi;
+	__le32 free_list_count;
+};
+
+/*
+ * FCoE firmware init request 3 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_init3 {
+	__le16 reserved0;
+	struct fcoe_kwqe_header hdr;
+	__le32 error_bit_map_lo;
+	__le32 error_bit_map_hi;
+	u8 perf_config;
+	u8 reserved21[3];
+	__le32 reserved2[4];
+};
+
+/*
+ * FCoE connection offload request 1 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload1 {
+	__le16 fcoe_conn_id;
+	struct fcoe_kwqe_header hdr;
+	__le32 sq_addr_lo;
+	__le32 sq_addr_hi;
+	__le32 rq_pbl_addr_lo;
+	__le32 rq_pbl_addr_hi;
+	__le32 rq_first_pbe_addr_lo;
+	__le32 rq_first_pbe_addr_hi;
+	__le16 rq_prod;
+	__le16 reserved0;
+};
+
+/*
+ * FCoE connection offload request 2 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload2 {
+	__le16 tx_max_fc_pay_len;
+	struct fcoe_kwqe_header hdr;
+	__le32 cq_addr_lo;
+	__le32 cq_addr_hi;
+	__le32 xferq_addr_lo;
+	__le32 xferq_addr_hi;
+	__le32 conn_db_addr_lo;
+	__le32 conn_db_addr_hi;
+	__le32 reserved1;
+};
+
+/*
+ * FCoE connection offload request 3 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload3 {
+	__le16 vlan_tag;
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
+	struct fcoe_kwqe_header hdr;
+	u8 s_id[3];
+	u8 tx_max_conc_seqs_c3;
+	u8 d_id[3];
+	u8 flags;
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
+#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
+	__le32 reserved;
+	__le32 confq_first_pbe_addr_lo;
+	__le32 confq_first_pbe_addr_hi;
+	__le16 tx_total_conc_seqs;
+	__le16 rx_max_fc_pay_len;
+	__le16 rx_total_conc_seqs;
+	u8 rx_max_conc_seqs_c3;
+	u8 rx_open_seqs_exch_c3;
+};
+
+/*
+ * FCoE connection offload request 4 $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_offload4 {
+	u8 e_d_tov_timer_val;
+	u8 reserved2;
+	struct fcoe_kwqe_header hdr;
+	u8 src_mac_addr_lo[2];
+	u8 src_mac_addr_mid[2];
+	u8 src_mac_addr_hi[2];
+	u8 dst_mac_addr_hi[2];
+	u8 dst_mac_addr_lo[2];
+	u8 dst_mac_addr_mid[2];
+	__le32 lcq_addr_lo;
+	__le32 lcq_addr_hi;
+	__le32 confq_pbl_base_addr_lo;
+	__le32 confq_pbl_base_addr_hi;
+};
+
+/*
+ * FCoE connection enable request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_enable_disable {
+	__le16 reserved0;
+	struct fcoe_kwqe_header hdr;
+	u8 src_mac_addr_lo[2];
+	u8 src_mac_addr_mid[2];
+	u8 src_mac_addr_hi[2];
+	u16 vlan_tag;
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
+#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
+	u8 dst_mac_addr_lo[2];
+	u8 dst_mac_addr_mid[2];
+	u8 dst_mac_addr_hi[2];
+	__le16 reserved1;
+	u8 s_id[3];
+	u8 vlan_flag;
+	u8 d_id[3];
+	u8 reserved3;
+	__le32 context_id;
+	__le32 conn_id;
+	__le32 reserved4;
+};
+
+/*
+ * FCoE connection destroy request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_conn_destroy {
+	__le16 reserved0;
+	struct fcoe_kwqe_header hdr;
+	__le32 context_id;
+	__le32 conn_id;
+	__le32 reserved1[5];
+};
+
+/*
+ * FCoe destroy request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_destroy {
+	__le16 reserved0;
+	struct fcoe_kwqe_header hdr;
+	__le32 reserved1[7];
+};
+
+/*
+ * FCoe statistics request $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_kwqe_stat {
+	__le16 reserved0;
+	struct fcoe_kwqe_header hdr;
+	__le32 stat_params_addr_lo;
+	__le32 stat_params_addr_hi;
+	__le32 reserved1[5];
+};
+
+/*
+ * FCoE KWQ WQE $$KEEP_ENDIANNESS$$
+ */
+union fcoe_kwqe {
+	struct fcoe_kwqe_init1 init1;
+	struct fcoe_kwqe_init2 init2;
+	struct fcoe_kwqe_init3 init3;
+	struct fcoe_kwqe_conn_offload1 conn_offload1;
+	struct fcoe_kwqe_conn_offload2 conn_offload2;
+	struct fcoe_kwqe_conn_offload3 conn_offload3;
+	struct fcoe_kwqe_conn_offload4 conn_offload4;
+	struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
+	struct fcoe_kwqe_conn_destroy conn_destroy;
+	struct fcoe_kwqe_destroy destroy;
+	struct fcoe_kwqe_stat statistics;
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * TX SGL context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_sgl_union_ctx {
+	struct fcoe_cached_sge_ctx cached_sge;
+	struct fcoe_ext_mul_sges_ctx sgl;
+	__le32 opaque[5];
+};
+
+/*
+ * Data-In/ELS/BLS information $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_read_flow_info {
+	union fcoe_sgl_union_ctx sgl_ctx;
+	__le32 rsrv0[3];
+};
+
+
+/*
+ * Fcoe stat context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_s_stat_ctx {
+	u8 flags;
+#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
+#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
+#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
+#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
+#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
+#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
+#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
+#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
+#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
+#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
+#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
+};
+
+/*
+ * Fcoe rx seq context $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_rx_seq_ctx {
+	u8 seq_id;
+	struct fcoe_s_stat_ctx s_stat;
+	__le16 seq_cnt;
+	__le32 low_exp_ro;
+	__le32 high_exp_ro;
+};
+
+
+/*
+ * Fcoe rx_wr union context $$KEEP_ENDIANNESS$$
+ */
+union fcoe_rx_wr_union_ctx {
+	struct fcoe_read_flow_info read_info;
+	union fcoe_comp_flow_info comp_info;
+	__le32 opaque[8];
+};
+
+
+
+/*
+ * FCoE SQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_sqe {
+	__le16 wqe;
+#define FCOE_SQE_TASK_ID (0x7FFF<<0)
+#define FCOE_SQE_TASK_ID_SHIFT 0
+#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+
+/*
+ * 14 regs $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_only {
+	union fcoe_sgl_union_ctx sgl_ctx;
+	__le32 rsrv0;
+};
+
+/*
+ * 32 bytes (8 regs) used for TX only purposes $$KEEP_ENDIANNESS$$
+ */
+union fcoe_tx_wr_rx_rd_union_ctx {
+	struct fcoe_fc_frame tx_frame;
+	struct fcoe_fcp_cmd_payload fcp_cmd;
+	struct fcoe_ext_cleanup_info cleanup;
+	struct fcoe_ext_abts_info abts;
+	struct fcoe_ext_fw_tx_seq_ctx tx_seq;
+	__le32 opaque[8];
+};
+
+/*
+ * tce_tx_wr_rx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_wr_rx_rd_const {
+	u8 init_flags;
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE (0x7<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE (0x1<<3)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT 3
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE (0x1<<4)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT 4
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE (0x3<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_SUPPORT_REC_TOV_SHIFT 7
+	u8 tx_flags;
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID (0x1<<0)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_VALID_SHIFT 0
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE (0xF<<1)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT 1
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1 (0x1<<5)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV1_SHIFT 5
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT (0x1<<6)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_TX_SEQ_INIT_SHIFT 6
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2 (0x1<<7)
+#define FCOE_TCE_TX_WR_RX_RD_CONST_RSRV2_SHIFT 7
+	__le16 rsrv3;
+	__le32 verify_tx_seq;
+};
+
+/*
+ * tce_tx_wr_rx_rd $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_tx_wr_rx_rd {
+	union fcoe_tx_wr_rx_rd_union_ctx union_ctx;
+	struct fcoe_tce_tx_wr_rx_rd_const const_ctx;
+};
+
+/*
+ * tce_rx_wr_tx_rd_const $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_const {
+	__le32 data_2_trns;
+	__le32 init_flags;
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID (0xFFFFFF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0 (0xFF<<24)
+#define FCOE_TCE_RX_WR_TX_RD_CONST_RSRV0_SHIFT 24
+};
+
+/*
+ * tce_rx_wr_tx_rd_var $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd_var {
+	__le16 rx_flags;
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1 (0xF<<0)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV1_SHIFT 0
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE (0x7<<4)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT 4
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ (0x1<<7)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_CONF_REQ_SHIFT 7
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE (0xF<<8)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT 8
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME (0x1<<12)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT 12
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT (0x1<<13)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_SEQ_INIT_SHIFT 13
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2 (0x1<<14)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RSRV2_SHIFT 14
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID (0x1<<15)
+#define FCOE_TCE_RX_WR_TX_RD_VAR_RX_VALID_SHIFT 15
+	__le16 rx_id;
+	struct fcoe_fcp_xfr_rdy_payload fcp_xfr_rdy;
+};
+
+/*
+ * tce_rx_wr_tx_rd $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_wr_tx_rd {
+	struct fcoe_tce_rx_wr_tx_rd_const const_ctx;
+	struct fcoe_tce_rx_wr_tx_rd_var var_ctx;
+};
+
+/*
+ * tce_rx_only $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_tce_rx_only {
+	struct fcoe_rx_seq_ctx rx_seq_ctx;
+	union fcoe_rx_wr_union_ctx union_ctx;
+};
+
+/*
+ * task_ctx_entry $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_task_ctx_entry {
+	struct fcoe_tce_tx_only txwr_only;
+	struct fcoe_tce_tx_wr_rx_rd txwr_rxrd;
+	struct fcoe_tce_rx_wr_tx_rd rxwr_txrd;
+	struct fcoe_tce_rx_only rxwr_only;
+};
+
+
+
+
+
+
+
+
+
+
+/*
+ * FCoE XFRQ element $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_xfrqe {
+	__le16 wqe;
+#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
+#define FCOE_XFRQE_TASK_ID_SHIFT 0
+#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
+#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
+};
+
+
+/*
+ * Cached SGEs $$KEEP_ENDIANNESS$$
+ */
+struct common_fcoe_sgl {
+	struct fcoe_bd_ctx sge[3];
+};
+
+
+/*
+ * FCoE SQ\XFRQ element
+ */
+struct fcoe_cached_wqe {
+	struct fcoe_sqe sqe;
+	struct fcoe_xfrqe xfrqe;
+};
+
+
+/*
+ * FCoE connection enable\disable params passed by driver to FW in FCoE enable
+ * ramrod $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_conn_enable_disable_ramrod_params {
+	struct fcoe_kwqe_conn_enable_disable enable_disable_kwqe;
+};
+
+
+/*
+ * FCoE connection offload params passed by driver to FW in FCoE offload ramrod
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_conn_offload_ramrod_params {
+	struct fcoe_kwqe_conn_offload1 offload_kwqe1;
+	struct fcoe_kwqe_conn_offload2 offload_kwqe2;
+	struct fcoe_kwqe_conn_offload3 offload_kwqe3;
+	struct fcoe_kwqe_conn_offload4 offload_kwqe4;
+};
+
+
+struct ustorm_fcoe_mng_ctx {
+#if defined(__BIG_ENDIAN)
+	u8 mid_seq_proc_flag;
+	u8 tce_in_cam_flag;
+	u8 tce_on_ior_flag;
+	u8 en_cached_tce_flag;
+#elif defined(__LITTLE_ENDIAN)
+	u8 en_cached_tce_flag;
+	u8 tce_on_ior_flag;
+	u8 tce_in_cam_flag;
+	u8 mid_seq_proc_flag;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 tce_cam_addr;
+	u8 cached_conn_flag;
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 cached_conn_flag;
+	u8 tce_cam_addr;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 dma_tce_ram_addr;
+	u16 tce_ram_addr;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tce_ram_addr;
+	u16 dma_tce_ram_addr;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 ox_id;
+	u16 wr_done_seq;
+#elif defined(__LITTLE_ENDIAN)
+	u16 wr_done_seq;
+	u16 ox_id;
+#endif
+	struct regpair task_addr;
+};
+
+/*
+ * Parameters initialized during offloaded according to FLOGI/PLOGI/PRLI and
+ * used in FCoE context section
+ */
+struct ustorm_fcoe_params {
+#if defined(__BIG_ENDIAN)
+	u16 fcoe_conn_id;
+	u16 flags;
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
+#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
+#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
+#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
+#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7)
+#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u16 flags;
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
+#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
+#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
+#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
+#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
+#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
+#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
+#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
+#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
+#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
+#define USTORM_FCOE_PARAMS_RSRV0 (0x1FF<<7)
+#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 7
+	u16 fcoe_conn_id;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 hc_csdm_byte_en;
+	u8 func_id;
+	u8 port_id;
+	u8 vnic_id;
+#elif defined(__LITTLE_ENDIAN)
+	u8 vnic_id;
+	u8 port_id;
+	u8 func_id;
+	u8 hc_csdm_byte_en;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 rx_total_conc_seqs;
+	u16 rx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rx_max_fc_pay_len;
+	u16 rx_total_conc_seqs;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 task_pbe_idx_off;
+	u8 task_in_page_log_size;
+	u16 rx_max_conc_seqs;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rx_max_conc_seqs;
+	u8 task_in_page_log_size;
+	u8 task_pbe_idx_off;
+#endif
+};
+
+/*
+ * FCoE 16-bits index structure
+ */
+struct fcoe_idx16_fields {
+	u16 fields;
+#define FCOE_IDX16_FIELDS_IDX (0x7FFF<<0)
+#define FCOE_IDX16_FIELDS_IDX_SHIFT 0
+#define FCOE_IDX16_FIELDS_MSB (0x1<<15)
+#define FCOE_IDX16_FIELDS_MSB_SHIFT 15
+};
+
+/*
+ * FCoE 16-bits index union
+ */
+union fcoe_idx16_field_union {
+	struct fcoe_idx16_fields fields;
+	u16 val;
+};
+
+/*
+ * Parameters required for placement according to SGL
+ */
+struct ustorm_fcoe_data_place_mng {
+#if defined(__BIG_ENDIAN)
+	u16 sge_off;
+	u8 num_sges;
+	u8 sge_idx;
+#elif defined(__LITTLE_ENDIAN)
+	u8 sge_idx;
+	u8 num_sges;
+	u16 sge_off;
+#endif
+};
+
+/*
+ * Parameters required for placement according to SGL
+ */
+struct ustorm_fcoe_data_place {
+	struct ustorm_fcoe_data_place_mng cached_mng;
+	struct fcoe_bd_ctx cached_sge[2];
+};
+
+/*
+ * TX processing shall write and RX processing shall read from this section
+ */
+union fcoe_u_tce_tx_wr_rx_rd_union {
+	struct fcoe_abts_info abts;
+	struct fcoe_cleanup_info cleanup;
+	struct fcoe_fw_tx_seq_ctx tx_seq_ctx;
+	u32 opaque[2];
+};
+
+/*
+ * TX processing shall write and RX processing shall read from this section
+ */
+struct fcoe_u_tce_tx_wr_rx_rd {
+	union fcoe_u_tce_tx_wr_rx_rd_union union_ctx;
+	struct fcoe_tce_tx_wr_rx_rd_const const_ctx;
+};
+
+struct ustorm_fcoe_tce {
+	struct fcoe_u_tce_tx_wr_rx_rd txwr_rxrd;
+	struct fcoe_tce_rx_wr_tx_rd rxwr_txrd;
+	struct fcoe_tce_rx_only rxwr;
+};
+
+struct ustorm_fcoe_cache_ctx {
+	u32 rsrv0;
+	struct ustorm_fcoe_data_place data_place;
+	struct ustorm_fcoe_tce tce;
+};
+
+/*
+ * Ustorm FCoE Storm Context
+ */
+struct ustorm_fcoe_st_context {
+	struct ustorm_fcoe_mng_ctx mng_ctx;
+	struct ustorm_fcoe_params fcoe_params;
+	struct regpair cq_base_addr;
+	struct regpair rq_pbl_base;
+	struct regpair rq_cur_page_addr;
+	struct regpair confq_pbl_base_addr;
+	struct regpair conn_db_base;
+	struct regpair xfrq_base_addr;
+	struct regpair lcq_base_addr;
+#if defined(__BIG_ENDIAN)
+	union fcoe_idx16_field_union rq_cons;
+	union fcoe_idx16_field_union rq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	union fcoe_idx16_field_union rq_prod;
+	union fcoe_idx16_field_union rq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 xfrq_prod;
+	u16 cq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 cq_cons;
+	u16 xfrq_prod;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 lcq_cons;
+	u16 hc_cram_address;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hc_cram_address;
+	u16 lcq_cons;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 sq_xfrq_lcq_confq_size;
+	u16 confq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 confq_prod;
+	u16 sq_xfrq_lcq_confq_size;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 hc_csdm_agg_int;
+	u8 rsrv2;
+	u8 available_rqes;
+	u8 sp_q_flush_cnt;
+#elif defined(__LITTLE_ENDIAN)
+	u8 sp_q_flush_cnt;
+	u8 available_rqes;
+	u8 rsrv2;
+	u8 hc_csdm_agg_int;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 num_pend_tasks;
+	u16 pbf_ack_ram_addr;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pbf_ack_ram_addr;
+	u16 num_pend_tasks;
+#endif
+	struct ustorm_fcoe_cache_ctx cache_ctx;
+};
+
+/*
+ * The FCoE non-aggregative context of Tstorm
+ */
+struct tstorm_fcoe_st_context {
+	struct regpair reserved0;
+	struct regpair reserved1;
+};
+
+/*
+ * Ethernet context section
+ */
+struct xstorm_fcoe_eth_context_section {
+#if defined(__BIG_ENDIAN)
+	u8 remote_addr_4;
+	u8 remote_addr_5;
+	u8 local_addr_0;
+	u8 local_addr_1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 local_addr_1;
+	u8 local_addr_0;
+	u8 remote_addr_5;
+	u8 remote_addr_4;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 remote_addr_0;
+	u8 remote_addr_1;
+	u8 remote_addr_2;
+	u8 remote_addr_3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 remote_addr_3;
+	u8 remote_addr_2;
+	u8 remote_addr_1;
+	u8 remote_addr_0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved_vlan_type;
+	u16 params;
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+#elif defined(__LITTLE_ENDIAN)
+	u16 params;
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+	u16 reserved_vlan_type;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 local_addr_2;
+	u8 local_addr_3;
+	u8 local_addr_4;
+	u8 local_addr_5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 local_addr_5;
+	u8 local_addr_4;
+	u8 local_addr_3;
+	u8 local_addr_2;
+#endif
+};
+
+/*
+ * Flags used in FCoE context section - 1 byte
+ */
+struct xstorm_fcoe_context_flags {
+	u8 flags;
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q (0x3<<0)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q_SHIFT 0
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ (0x1<<2)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ_SHIFT 2
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ (0x1<<3)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_BLOCK_SQ_SHIFT 3
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT (0x1<<4)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT_SHIFT 4
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE (0x1<<5)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE_SHIFT 5
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE (0x1<<6)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE_SHIFT 6
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN (0x1<<7)
+#define XSTORM_FCOE_CONTEXT_FLAGS_B_VNTAG_VLAN_SHIFT 7
+};
+
+struct xstorm_fcoe_tce {
+	struct fcoe_tce_tx_only txwr;
+	struct fcoe_tce_tx_wr_rx_rd txwr_rxrd;
+};
+
+/*
+ * FCP_DATA parameters required for transmission
+ */
+struct xstorm_fcoe_fcp_data {
+	u32 io_rem;
+#if defined(__BIG_ENDIAN)
+	u16 cached_sge_off;
+	u8 cached_num_sges;
+	u8 cached_sge_idx;
+#elif defined(__LITTLE_ENDIAN)
+	u8 cached_sge_idx;
+	u8 cached_num_sges;
+	u16 cached_sge_off;
+#endif
+	u32 buf_addr_hi_0;
+	u32 buf_addr_lo_0;
+#if defined(__BIG_ENDIAN)
+	u16 num_of_pending_tasks;
+	u16 buf_len_0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 buf_len_0;
+	u16 num_of_pending_tasks;
+#endif
+	u32 buf_addr_hi_1;
+	u32 buf_addr_lo_1;
+#if defined(__BIG_ENDIAN)
+	u16 task_pbe_idx_off;
+	u16 buf_len_1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 buf_len_1;
+	u16 task_pbe_idx_off;
+#endif
+	u32 buf_addr_hi_2;
+	u32 buf_addr_lo_2;
+#if defined(__BIG_ENDIAN)
+	u16 ox_id;
+	u16 buf_len_2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 buf_len_2;
+	u16 ox_id;
+#endif
+};
+
+/*
+ * vlan configuration
+ */
+struct xstorm_fcoe_vlan_conf {
+	u8 vlan_conf;
+#define XSTORM_FCOE_VLAN_CONF_PRIORITY (0x7<<0)
+#define XSTORM_FCOE_VLAN_CONF_PRIORITY_SHIFT 0
+#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG (0x1<<3)
+#define XSTORM_FCOE_VLAN_CONF_INNER_VLAN_FLAG_SHIFT 3
+#define XSTORM_FCOE_VLAN_CONF_RESERVED (0xF<<4)
+#define XSTORM_FCOE_VLAN_CONF_RESERVED_SHIFT 4
+};
+
+/*
+ * FCoE 16-bits vlan structure
+ */
+struct fcoe_vlan_fields {
+	u16 fields;
+#define FCOE_VLAN_FIELDS_VID (0xFFF<<0)
+#define FCOE_VLAN_FIELDS_VID_SHIFT 0
+#define FCOE_VLAN_FIELDS_CLI (0x1<<12)
+#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
+#define FCOE_VLAN_FIELDS_PRI (0x7<<13)
+#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
+};
+
+/*
+ * FCoE 16-bits vlan union
+ */
+union fcoe_vlan_field_union {
+	struct fcoe_vlan_fields fields;
+	u16 val;
+};
+
+/*
+ * FCoE 16-bits vlan, vif union
+ */
+union fcoe_vlan_vif_field_union {
+	union fcoe_vlan_field_union vlan;
+	u16 vif;
+};
+
+/*
+ * FCoE context section
+ */
+struct xstorm_fcoe_context_section {
+#if defined(__BIG_ENDIAN)
+	u8 cs_ctl;
+	u8 s_id[3];
+#elif defined(__LITTLE_ENDIAN)
+	u8 s_id[3];
+	u8 cs_ctl;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 rctl;
+	u8 d_id[3];
+#elif defined(__LITTLE_ENDIAN)
+	u8 d_id[3];
+	u8 rctl;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 sq_xfrq_lcq_confq_size;
+	u16 tx_max_fc_pay_len;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tx_max_fc_pay_len;
+	u16 sq_xfrq_lcq_confq_size;
+#endif
+	u32 lcq_prod;
+#if defined(__BIG_ENDIAN)
+	u8 port_id;
+	u8 func_id;
+	u8 seq_id;
+	struct xstorm_fcoe_context_flags tx_flags;
+#elif defined(__LITTLE_ENDIAN)
+	struct xstorm_fcoe_context_flags tx_flags;
+	u8 seq_id;
+	u8 func_id;
+	u8 port_id;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 mtu;
+	u8 func_mode;
+	u8 vnic_id;
+#elif defined(__LITTLE_ENDIAN)
+	u8 vnic_id;
+	u8 func_mode;
+	u16 mtu;
+#endif
+	struct regpair confq_curr_page_addr;
+	struct fcoe_cached_wqe cached_wqe[8];
+	struct regpair lcq_base_addr;
+	struct xstorm_fcoe_tce tce;
+	struct xstorm_fcoe_fcp_data fcp_data;
+#if defined(__BIG_ENDIAN)
+	u8 tx_max_conc_seqs_c3;
+	u8 vlan_flag;
+	u8 dcb_val;
+	u8 data_pb_cmd_size;
+#elif defined(__LITTLE_ENDIAN)
+	u8 data_pb_cmd_size;
+	u8 dcb_val;
+	u8 vlan_flag;
+	u8 tx_max_conc_seqs_c3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 fcoe_tx_stat_params_ram_addr;
+	u16 fcoe_tx_fc_seq_ram_addr;
+#elif defined(__LITTLE_ENDIAN)
+	u16 fcoe_tx_fc_seq_ram_addr;
+	u16 fcoe_tx_stat_params_ram_addr;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 fcp_cmd_line_credit;
+	u8 eth_hdr_size;
+	u16 pbf_addr;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pbf_addr;
+	u8 eth_hdr_size;
+	u8 fcp_cmd_line_credit;
+#endif
+#if defined(__BIG_ENDIAN)
+	union fcoe_vlan_vif_field_union multi_func_val;
+	u8 page_log_size;
+	struct xstorm_fcoe_vlan_conf orig_vlan_conf;
+#elif defined(__LITTLE_ENDIAN)
+	struct xstorm_fcoe_vlan_conf orig_vlan_conf;
+	u8 page_log_size;
+	union fcoe_vlan_vif_field_union multi_func_val;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 fcp_cmd_frame_size;
+	u16 pbf_addr_ff;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pbf_addr_ff;
+	u16 fcp_cmd_frame_size;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 vlan_num;
+	u8 cos;
+	u8 cache_xfrq_cons;
+	u8 cache_sq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u8 cache_sq_cons;
+	u8 cache_xfrq_cons;
+	u8 cos;
+	u8 vlan_num;
+#endif
+	u32 verify_tx_seq;
+};
+
+/*
+ * Xstorm FCoE Storm Context
+ */
+struct xstorm_fcoe_st_context {
+	struct xstorm_fcoe_eth_context_section eth;
+	struct xstorm_fcoe_context_section fcoe;
+};
+
+/*
+ * Fcoe connection context
+ */
+struct fcoe_context {
+	struct ustorm_fcoe_st_context ustorm_st_context;
+	struct tstorm_fcoe_st_context tstorm_st_context;
+	struct xstorm_fcoe_ag_context xstorm_ag_context;
+	struct tstorm_fcoe_ag_context tstorm_ag_context;
+	struct ustorm_fcoe_ag_context ustorm_ag_context;
+	struct timers_block_context timers_context;
+	struct xstorm_fcoe_st_context xstorm_st_context;
+};
+
+/*
+ * FCoE init params passed by driver to FW in FCoE init ramrod
+ * $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_init_ramrod_params {
+	struct fcoe_kwqe_init1 init_kwqe1;
+	struct fcoe_kwqe_init2 init_kwqe2;
+	struct fcoe_kwqe_init3 init_kwqe3;
+	struct regpair eq_pbl_base;
+	__le32 eq_pbl_size;
+	__le32 reserved2;
+	__le16 eq_prod;
+	__le16 sb_num;
+	u8 sb_id;
+	u8 reserved0;
+	__le16 reserved1;
+};
+
+/*
+ * FCoE statistics params buffer passed by driver to FW in FCoE statistics
+ * ramrod $$KEEP_ENDIANNESS$$
+ */
+struct fcoe_stat_ramrod_params {
+	struct fcoe_kwqe_stat stat_kwqe;
+};
+
+/*
+ * CQ DB CQ producer and pending completion counter
+ */
+struct iscsi_cq_db_prod_pnd_cmpltn_cnt {
+#if defined(__BIG_ENDIAN)
+	u16 cntr;
+	u16 prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 prod;
+	u16 cntr;
+#endif
+};
+
+/*
+ * CQ DB pending completion ITT array
+ */
+struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr {
+	struct iscsi_cq_db_prod_pnd_cmpltn_cnt prod_pend_comp[8];
+};
+
+/*
+ * Cstorm CQ sequence to notify array, updated by driver
+ */
+struct iscsi_cq_db_sqn_2_notify_arr {
+	u16 sqn[8];
+};
+
+/*
+ * Cstorm iSCSI Storm Context
+ */
+struct cstorm_iscsi_st_context {
+	struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr cq_c_prod_pend_comp_ctr_arr;
+	struct iscsi_cq_db_sqn_2_notify_arr cq_c_prod_sqn_arr;
+	struct iscsi_cq_db_sqn_2_notify_arr cq_c_sqn_2_notify_arr;
+	struct regpair hq_pbl_base;
+	struct regpair hq_curr_pbe;
+	struct regpair task_pbl_base;
+	struct regpair cq_db_base;
+#if defined(__BIG_ENDIAN)
+	u16 hq_bd_itt;
+	u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 iscsi_conn_id;
+	u16 hq_bd_itt;
+#endif
+	u32 hq_bd_data_segment_len;
+	u32 hq_bd_buffer_offset;
+#if defined(__BIG_ENDIAN)
+	u8 rsrv;
+	u8 cq_proc_en_bit_map;
+	u8 cq_pend_comp_itt_valid_bit_map;
+	u8 hq_bd_opcode;
+#elif defined(__LITTLE_ENDIAN)
+	u8 hq_bd_opcode;
+	u8 cq_pend_comp_itt_valid_bit_map;
+	u8 cq_proc_en_bit_map;
+	u8 rsrv;
+#endif
+	u32 hq_tcp_seq;
+#if defined(__BIG_ENDIAN)
+	u16 flags;
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
+	u16 hq_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hq_cons;
+	u16 flags;
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
+#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
+#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
+#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
+#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
+#endif
+	struct regpair rsrv1;
+};
+
+
+/*
+ * SCSI read/write SQ WQE
+ */
+struct iscsi_cmd_pdu_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES (0x7<<0)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_ATTRIBUTES_SHIFT 0
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x3<<3)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 3
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG (0x1<<5)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_WRITE_FLAG_SHIFT 5
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG (0x1<<6)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_READ_FLAG_SHIFT 6
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_CMD_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	struct regpair lun;
+	u32 itt;
+	u32 expected_data_transfer_length;
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 scsi_command_block[4];
+};
+
+
+/*
+ * Buffer per connection, used in Tstorm
+ */
+struct iscsi_conn_buf {
+	struct regpair reserved[8];
+};
+
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_rq_db {
+	struct regpair pbl_base;
+	struct regpair curr_pbe;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_r2tq_db {
+	struct regpair pbl_base;
+	struct regpair curr_pbe;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_cq_db {
+#if defined(__BIG_ENDIAN)
+	u16 cq_sn;
+	u16 prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 prod;
+	u16 cq_sn;
+#endif
+	struct regpair curr_pbe;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct rings_db {
+	struct ustorm_iscsi_rq_db rq;
+	struct ustorm_iscsi_r2tq_db r2tq;
+	struct ustorm_iscsi_cq_db cq[8];
+#if defined(__BIG_ENDIAN)
+	u16 rq_prod;
+	u16 r2tq_prod;
+#elif defined(__LITTLE_ENDIAN)
+	u16 r2tq_prod;
+	u16 rq_prod;
+#endif
+	struct regpair cq_pbl_base;
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct ustorm_iscsi_placement_db {
+	u32 sgl_base_lo;
+	u32 sgl_base_hi;
+	u32 local_sge_0_address_hi;
+	u32 local_sge_0_address_lo;
+#if defined(__BIG_ENDIAN)
+	u16 curr_sge_offset;
+	u16 local_sge_0_size;
+#elif defined(__LITTLE_ENDIAN)
+	u16 local_sge_0_size;
+	u16 curr_sge_offset;
+#endif
+	u32 local_sge_1_address_hi;
+	u32 local_sge_1_address_lo;
+#if defined(__BIG_ENDIAN)
+	u8 exp_padding_2b;
+	u8 nal_len_3b;
+	u16 local_sge_1_size;
+#elif defined(__LITTLE_ENDIAN)
+	u16 local_sge_1_size;
+	u8 nal_len_3b;
+	u8 exp_padding_2b;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 sgl_size;
+	u8 local_sge_index_2b;
+	u16 reserved7;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved7;
+	u8 local_sge_index_2b;
+	u8 sgl_size;
+#endif
+	u32 rem_pdu;
+	u32 place_db_bitfield_1;
+#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD (0xFFFFFF<<0)
+#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD_SHIFT 0
+#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID (0xFF<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID_SHIFT 24
+	u32 place_db_bitfield_2;
+#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE (0xFFFFFF<<0)
+#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE_SHIFT 0
+#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX (0xFF<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX_SHIFT 24
+	u32 nal;
+#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE (0xFFFFFF<<0)
+#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE_SHIFT 0
+#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B (0xFF<<24)
+#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B_SHIFT 24
+};
+
+/*
+ * Ustorm iSCSI Storm Context
+ */
+struct ustorm_iscsi_st_context {
+	u32 exp_stat_sn;
+	u32 exp_data_sn;
+	struct rings_db ring;
+	struct regpair task_pbl_base;
+	struct regpair tce_phy_addr;
+	struct ustorm_iscsi_placement_db place_db;
+	u32 reserved8;
+	u32 rem_rcv_len;
+#if defined(__BIG_ENDIAN)
+	u16 hdr_itt;
+	u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 iscsi_conn_id;
+	u16 hdr_itt;
+#endif
+	u32 nal_bytes;
+#if defined(__BIG_ENDIAN)
+	u8 hdr_second_byte_union;
+	u8 bitfield_0;
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
+	u8 task_pdu_cache_index;
+	u8 task_pbe_cache_index;
+#elif defined(__LITTLE_ENDIAN)
+	u8 task_pbe_cache_index;
+	u8 task_pdu_cache_index;
+	u8 bitfield_0;
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
+#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
+#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC (0x1<<2)
+#define USTORM_ISCSI_ST_CONTEXT_BRESETCRC_SHIFT 2
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x1F<<3)
+#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 3
+	u8 hdr_second_byte_union;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved3;
+	u8 reserved2;
+	u8 acDecrement;
+#elif defined(__LITTLE_ENDIAN)
+	u8 acDecrement;
+	u8 reserved2;
+	u16 reserved3;
+#endif
+	u32 task_stat;
+#if defined(__BIG_ENDIAN)
+	u8 hdr_opcode;
+	u8 num_cqs;
+	u16 reserved5;
+#elif defined(__LITTLE_ENDIAN)
+	u16 reserved5;
+	u8 num_cqs;
+	u8 hdr_opcode;
+#endif
+	u32 negotiated_rx;
+#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH (0xFFFFFF<<0)
+#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS (0xFF<<24)
+#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT 24
+	u32 negotiated_rx_and_flags;
+#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH (0xFFFFFF<<0)
+#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH_SHIFT 0
+#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED (0x1<<24)
+#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED_SHIFT 24
+#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN (0x1<<25)
+#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN_SHIFT 25
+#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN (0x1<<26)
+#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN_SHIFT 26
+#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR (0x1<<27)
+#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR_SHIFT 27
+#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID (0x1<<28)
+#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID_SHIFT 28
+#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE (0x3<<29)
+#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE_SHIFT 29
+#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED (0x1<<31)
+#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED_SHIFT 31
+};
+
+/*
+ * TCP context region, shared in TOE, RDMA and ISCSI
+ */
+struct tstorm_tcp_st_context_section {
+	u32 flags1;
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT (0xFFFFFF<<0)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_SHIFT 0
+#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24
+#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0 (0x1<<26)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED0_SHIFT 26
+#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27
+#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED_SHIFT 28
+#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE (0x1<<29)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29
+#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30
+#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN (0x1<<31)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_LAST_ISLE_HAS_FIN_SHIFT 31
+	u32 flags2;
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION (0xFFFFFF<<0)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_SHIFT 0
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN_SHIFT 25
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT (0x1<<26)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT_SHIFT 26
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT (0x1<<27)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT_SHIFT 27
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<28)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29)
+#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK (0x1<<30)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_RST_ATTACK_SHIFT 30
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK (0x1<<31)
+#define __TSTORM_TCP_ST_CONTEXT_SECTION_IN_WINDOW_SYN_ATTACK_SHIFT 31
+#if defined(__BIG_ENDIAN)
+	u16 mss;
+	u8 tcp_sm_state;
+	u8 rto_exp;
+#elif defined(__LITTLE_ENDIAN)
+	u8 rto_exp;
+	u8 tcp_sm_state;
+	u16 mss;
+#endif
+	u32 rcv_nxt;
+	u32 timestamp_recent;
+	u32 timestamp_recent_time;
+	u32 cwnd;
+	u32 ss_thresh;
+	u32 cwnd_accum;
+	u32 prev_seg_seq;
+	u32 expected_rel_seq;
+	u32 recover;
+#if defined(__BIG_ENDIAN)
+	u8 retransmit_count;
+	u8 ka_max_probe_count;
+	u8 persist_probe_count;
+	u8 ka_probe_count;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ka_probe_count;
+	u8 persist_probe_count;
+	u8 ka_max_probe_count;
+	u8 retransmit_count;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 statistics_counter_id;
+	u8 ooo_support_mode;
+	u8 snd_wnd_scale;
+	u8 dup_ack_count;
+#elif defined(__LITTLE_ENDIAN)
+	u8 dup_ack_count;
+	u8 snd_wnd_scale;
+	u8 ooo_support_mode;
+	u8 statistics_counter_id;
+#endif
+	u32 retransmit_start_time;
+	u32 ka_timeout;
+	u32 ka_interval;
+	u32 isle_start_seq;
+	u32 isle_end_seq;
+#if defined(__BIG_ENDIAN)
+	u16 second_isle_address;
+	u16 recent_seg_wnd;
+#elif defined(__LITTLE_ENDIAN)
+	u16 recent_seg_wnd;
+	u16 second_isle_address;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 max_isles_ever_happened;
+	u8 isles_number;
+	u16 last_isle_address;
+#elif defined(__LITTLE_ENDIAN)
+	u16 last_isle_address;
+	u8 isles_number;
+	u8 max_isles_ever_happened;
+#endif
+	u32 max_rt_time;
+#if defined(__BIG_ENDIAN)
+	u16 lsb_mac_address;
+	u16 vlan_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 vlan_id;
+	u16 lsb_mac_address;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 msb_mac_address;
+	u16 mid_mac_address;
+#elif defined(__LITTLE_ENDIAN)
+	u16 mid_mac_address;
+	u16 msb_mac_address;
+#endif
+	u32 rightmost_received_seq;
+};
+
+/*
+ * Termination variables
+ */
+struct iscsi_term_vars {
+	u8 BitMap;
+#define ISCSI_TERM_VARS_TCP_STATE (0xF<<0)
+#define ISCSI_TERM_VARS_TCP_STATE_SHIFT 0
+#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4)
+#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4
+#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5)
+#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5
+#define ISCSI_TERM_VARS_TERM_ON_CHIP (0x1<<6)
+#define ISCSI_TERM_VARS_TERM_ON_CHIP_SHIFT 6
+#define ISCSI_TERM_VARS_RSRV (0x1<<7)
+#define ISCSI_TERM_VARS_RSRV_SHIFT 7
+};
+
+/*
+ * iSCSI context region, used only in iSCSI
+ */
+struct tstorm_iscsi_st_context_section {
+	u32 nalPayload;
+	u32 b2nh;
+#if defined(__BIG_ENDIAN)
+	u16 rq_cons;
+	u8 flags;
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7
+	u8 hdr_bytes_2_fetch;
+#elif defined(__LITTLE_ENDIAN)
+	u8 hdr_bytes_2_fetch;
+	u8 flags;
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN (0x3<<5)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_NALLEN_SHIFT 5
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0 (0x1<<7)
+#define TSTORM_ISCSI_ST_CONTEXT_SECTION_RSRV0_SHIFT 7
+	u16 rq_cons;
+#endif
+	struct regpair rq_db_phy_addr;
+#if defined(__BIG_ENDIAN)
+	struct iscsi_term_vars term_vars;
+	u8 rsrv1;
+	u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+	u16 iscsi_conn_id;
+	u8 rsrv1;
+	struct iscsi_term_vars term_vars;
+#endif
+	u32 process_nxt;
+};
+
+/*
+ * The iSCSI non-aggregative context of Tstorm
+ */
+struct tstorm_iscsi_st_context {
+	struct tstorm_tcp_st_context_section tcp;
+	struct tstorm_iscsi_st_context_section iscsi;
+};
+
+/*
+ * Ethernet context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_eth_context_section {
+#if defined(__BIG_ENDIAN)
+	u8 remote_addr_4;
+	u8 remote_addr_5;
+	u8 local_addr_0;
+	u8 local_addr_1;
+#elif defined(__LITTLE_ENDIAN)
+	u8 local_addr_1;
+	u8 local_addr_0;
+	u8 remote_addr_5;
+	u8 remote_addr_4;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 remote_addr_0;
+	u8 remote_addr_1;
+	u8 remote_addr_2;
+	u8 remote_addr_3;
+#elif defined(__LITTLE_ENDIAN)
+	u8 remote_addr_3;
+	u8 remote_addr_2;
+	u8 remote_addr_1;
+	u8 remote_addr_0;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved_vlan_type;
+	u16 params;
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+#elif defined(__LITTLE_ENDIAN)
+	u16 params;
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
+#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
+#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12)
+#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
+#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
+	u16 reserved_vlan_type;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 local_addr_2;
+	u8 local_addr_3;
+	u8 local_addr_4;
+	u8 local_addr_5;
+#elif defined(__LITTLE_ENDIAN)
+	u8 local_addr_5;
+	u8 local_addr_4;
+	u8 local_addr_3;
+	u8 local_addr_2;
+#endif
+};
+
+/*
+ * IpV4 context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_ip_v4_context_section {
+#if defined(__BIG_ENDIAN)
+	u16 __pbf_hdr_cmd_rsvd_id;
+	u16 __pbf_hdr_cmd_rsvd_flags_offset;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __pbf_hdr_cmd_rsvd_flags_offset;
+	u16 __pbf_hdr_cmd_rsvd_id;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 __pbf_hdr_cmd_rsvd_ver_ihl;
+	u8 tos;
+	u16 __pbf_hdr_cmd_rsvd_length;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __pbf_hdr_cmd_rsvd_length;
+	u8 tos;
+	u8 __pbf_hdr_cmd_rsvd_ver_ihl;
+#endif
+	u32 ip_local_addr;
+#if defined(__BIG_ENDIAN)
+	u8 ttl;
+	u8 __pbf_hdr_cmd_rsvd_protocol;
+	u16 __pbf_hdr_cmd_rsvd_csum;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __pbf_hdr_cmd_rsvd_csum;
+	u8 __pbf_hdr_cmd_rsvd_protocol;
+	u8 ttl;
+#endif
+	u32 __pbf_hdr_cmd_rsvd_1;
+	u32 ip_remote_addr;
+};
+
+/*
+ * context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_padded_ip_v4_context_section {
+	struct xstorm_ip_v4_context_section ip_v4;
+	u32 reserved1[4];
+};
+
+/*
+ * IpV6 context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_ip_v6_context_section {
+#if defined(__BIG_ENDIAN)
+	u16 pbf_hdr_cmd_rsvd_payload_len;
+	u8 pbf_hdr_cmd_rsvd_nxt_hdr;
+	u8 hop_limit;
+#elif defined(__LITTLE_ENDIAN)
+	u8 hop_limit;
+	u8 pbf_hdr_cmd_rsvd_nxt_hdr;
+	u16 pbf_hdr_cmd_rsvd_payload_len;
+#endif
+	u32 priority_flow_label;
+#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL (0xFFFFF<<0)
+#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL_SHIFT 0
+#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS (0xFF<<20)
+#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS_SHIFT 20
+#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER (0xF<<28)
+#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER_SHIFT 28
+	u32 ip_local_addr_lo_hi;
+	u32 ip_local_addr_lo_lo;
+	u32 ip_local_addr_hi_hi;
+	u32 ip_local_addr_hi_lo;
+	u32 ip_remote_addr_lo_hi;
+	u32 ip_remote_addr_lo_lo;
+	u32 ip_remote_addr_hi_hi;
+	u32 ip_remote_addr_hi_lo;
+};
+
+union xstorm_ip_context_section_types {
+	struct xstorm_padded_ip_v4_context_section padded_ip_v4;
+	struct xstorm_ip_v6_context_section ip_v6;
+};
+
+/*
+ * TCP context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_tcp_context_section {
+	u32 snd_max;
+#if defined(__BIG_ENDIAN)
+	u16 remote_port;
+	u16 local_port;
+#elif defined(__LITTLE_ENDIAN)
+	u16 local_port;
+	u16 remote_port;
+#endif
+#if defined(__BIG_ENDIAN)
+	u8 original_nagle_1b;
+	u8 ts_enabled;
+	u16 tcp_params;
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11)
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+	u16 tcp_params;
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9)
+#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
+#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV (0x1<<11)
+#define XSTORM_TCP_CONTEXT_SECTION_SMALL_WIN_ADV_SHIFT 11
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
+#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
+#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
+#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
+	u8 ts_enabled;
+	u8 original_nagle_1b;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 pseudo_csum;
+	u16 window_scaling_factor;
+#elif defined(__LITTLE_ENDIAN)
+	u16 window_scaling_factor;
+	u16 pseudo_csum;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 reserved2;
+	u8 statistics_counter_id;
+	u8 statistics_params;
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2)
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2
+#elif defined(__LITTLE_ENDIAN)
+	u8 statistics_params;
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
+#define XSTORM_TCP_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED (0x3F<<2)
+#define XSTORM_TCP_CONTEXT_SECTION_RESERVED_SHIFT 2
+	u8 statistics_counter_id;
+	u16 reserved2;
+#endif
+	u32 ts_time_diff;
+	u32 __next_timer_expir;
+};
+
+/*
+ * Common context section, shared in TOE, RDMA and ISCSI
+ */
+struct xstorm_common_context_section {
+	struct xstorm_eth_context_section ethernet;
+	union xstorm_ip_context_section_types ip_union;
+	struct xstorm_tcp_context_section tcp;
+#if defined(__BIG_ENDIAN)
+	u8 __dcb_val;
+	u8 flags;
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0)
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1)
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4)
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5)
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5
+	u8 reserved;
+	u8 ip_version_1b;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ip_version_1b;
+	u8 reserved;
+	u8 flags;
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED (0x1<<0)
+#define XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT 0
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT (0x7<<1)
+#define XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT 1
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE (0x1<<4)
+#define XSTORM_COMMON_CONTEXT_SECTION_VLAN_MODE_SHIFT 4
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY (0x7<<5)
+#define XSTORM_COMMON_CONTEXT_SECTION_ORIGINAL_PRIORITY_SHIFT 5
+	u8 __dcb_val;
+#endif
+};
+
+/*
+ * Flags used in ISCSI context section
+ */
+struct xstorm_iscsi_context_flags {
+	u8 flags;
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA (0x1<<0)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA_SHIFT 0
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T (0x1<<1)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T_SHIFT 1
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST (0x1<<2)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST_SHIFT 2
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST (0x1<<3)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST_SHIFT 3
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN (0x1<<4)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN_SHIFT 4
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ (0x1<<5)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ_SHIFT 5
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT (0x1<<6)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT_SHIFT 6
+#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4 (0x1<<7)
+#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4_SHIFT 7
+};
+
+struct iscsi_task_context_entry_x {
+	u32 data_out_buffer_offset;
+	u32 itt;
+	u32 data_sn;
+};
+
+struct iscsi_task_context_entry_xuc_x_write_only {
+	u32 tx_r2t_sn;
+};
+
+struct iscsi_task_context_entry_xuc_xu_write_both {
+	u32 sgl_base_lo;
+	u32 sgl_base_hi;
+#if defined(__BIG_ENDIAN)
+	u8 sgl_size;
+	u8 sge_index;
+	u16 sge_offset;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sge_offset;
+	u8 sge_index;
+	u8 sgl_size;
+#endif
+};
+
+/*
+ * iSCSI context section
+ */
+struct xstorm_iscsi_context_section {
+	u32 first_burst_length;
+	u32 max_send_pdu_length;
+	struct regpair sq_pbl_base;
+	struct regpair sq_curr_pbe;
+	struct regpair hq_pbl_base;
+	struct regpair hq_curr_pbe_base;
+	struct regpair r2tq_pbl_base;
+	struct regpair r2tq_curr_pbe_base;
+	struct regpair task_pbl_base;
+#if defined(__BIG_ENDIAN)
+	u16 data_out_count;
+	struct xstorm_iscsi_context_flags flags;
+	u8 task_pbl_cache_idx;
+#elif defined(__LITTLE_ENDIAN)
+	u8 task_pbl_cache_idx;
+	struct xstorm_iscsi_context_flags flags;
+	u16 data_out_count;
+#endif
+	u32 seq_more_2_send;
+	u32 pdu_more_2_send;
+	struct iscsi_task_context_entry_x temp_tce_x;
+	struct iscsi_task_context_entry_xuc_x_write_only temp_tce_x_wr;
+	struct iscsi_task_context_entry_xuc_xu_write_both temp_tce_xu_wr;
+	struct regpair lun;
+	u32 exp_data_transfer_len_ttt;
+	u32 pdu_data_2_rxmit;
+	u32 rxmit_bytes_2_dr;
+#if defined(__BIG_ENDIAN)
+	u16 rxmit_sge_offset;
+	u16 hq_rxmit_cons;
+#elif defined(__LITTLE_ENDIAN)
+	u16 hq_rxmit_cons;
+	u16 rxmit_sge_offset;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 r2tq_cons;
+	u8 rxmit_flags;
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7
+	u8 rxmit_sge_idx;
+#elif defined(__LITTLE_ENDIAN)
+	u8 rxmit_sge_idx;
+	u8 rxmit_flags;
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7)
+#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7
+	u16 r2tq_cons;
+#endif
+	u32 hq_rxmit_tcp_seq;
+};
+
+/*
+ * Xstorm iSCSI Storm Context
+ */
+struct xstorm_iscsi_st_context {
+	struct xstorm_common_context_section common;
+	struct xstorm_iscsi_context_section iscsi;
+};
+
+/*
+ * Iscsi connection context
+ */
+struct iscsi_context {
+	struct ustorm_iscsi_st_context ustorm_st_context;
+	struct tstorm_iscsi_st_context tstorm_st_context;
+	struct xstorm_iscsi_ag_context xstorm_ag_context;
+	struct tstorm_iscsi_ag_context tstorm_ag_context;
+	struct cstorm_iscsi_ag_context cstorm_ag_context;
+	struct ustorm_iscsi_ag_context ustorm_ag_context;
+	struct timers_block_context timers_context;
+	struct regpair upb_context;
+	struct xstorm_iscsi_st_context xstorm_st_context;
+	struct regpair xpb_context;
+	struct cstorm_iscsi_st_context cstorm_st_context;
+};
+
+
+/*
+ * PDU header of an iSCSI DATA-OUT
+ */
+struct iscsi_data_pdu_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG (0x1<<7)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_FINAL_FLAG_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_DATA_PDU_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	struct regpair lun;
+	u32 itt;
+	u32 ttt;
+	u32 rsrv2;
+	u32 exp_stat_sn;
+	u32 rsrv3;
+	u32 data_sn;
+	u32 buffer_offset;
+	u32 rsrv4;
+};
+
+
+/*
+ * PDU header of an iSCSI login request
+ */
+struct iscsi_login_req_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7
+	u8 version_max;
+	u8 version_min;
+#elif defined(__LITTLE_ENDIAN)
+	u8 version_min;
+	u8 version_max;
+	u8 op_attr;
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG (0x3<<0)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_NSG_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG (0x3<<2)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CSG_SHIFT 2
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0 (0x3<<4)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_RSRV0_SHIFT 4
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TRANSIT_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_LOGIN_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+	u16 isid_hi;
+	u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+	u16 tsih;
+	u16 isid_hi;
+#endif
+	u32 itt;
+#if defined(__BIG_ENDIAN)
+	u16 cid;
+	u16 rsrv1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv1;
+	u16 cid;
+#endif
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 rsrv2[4];
+};
+
+/*
+ * PDU header of an iSCSI logout request
+ */
+struct iscsi_logout_req_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE (0x7F<<0)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_REASON_CODE_SHIFT 0
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_LOGOUT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	u32 rsrv2[2];
+	u32 itt;
+#if defined(__BIG_ENDIAN)
+	u16 cid;
+	u16 rsrv1;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv1;
+	u16 cid;
+#endif
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 rsrv3[4];
+};
+
+/*
+ * PDU header of an iSCSI TMF request
+ */
+struct iscsi_tmf_req_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1 (0x1<<7)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_RSRV1_1_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_TMF_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	struct regpair lun;
+	u32 itt;
+	u32 referenced_task_tag;
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 ref_cmd_sn;
+	u32 exp_data_sn;
+	u32 rsrv2[2];
+};
+
+/*
+ * PDU header of an iSCSI Text request
+ */
+struct iscsi_text_req_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1 (0x3F<<0)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG (0x1<<6)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_CONTINUE_FLG_SHIFT 6
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_FINAL_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_TEXT_REQ_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	struct regpair lun;
+	u32 itt;
+	u32 ttt;
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 rsrv3[4];
+};
+
+/*
+ * PDU header of an iSCSI Nop-Out
+ */
+struct iscsi_nop_out_hdr_little_endian {
+#if defined(__BIG_ENDIAN)
+	u8 opcode;
+	u8 op_attr;
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u8 op_attr;
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1 (0x7F<<0)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV1_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1 (0x1<<7)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_RSRV2_1_SHIFT 7
+	u8 opcode;
+#endif
+	u32 data_fields;
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH (0xFFFFFF<<0)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_DATA_SEGMENT_LENGTH_SHIFT 0
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH (0xFF<<24)
+#define ISCSI_NOP_OUT_HDR_LITTLE_ENDIAN_TOTAL_AHS_LENGTH_SHIFT 24
+	struct regpair lun;
+	u32 itt;
+	u32 ttt;
+	u32 cmd_sn;
+	u32 exp_stat_sn;
+	u32 rsrv3[4];
+};
+
+/*
+ * iscsi pdu headers in little endian form.
+ */
+union iscsi_pdu_headers_little_endian {
+	u32 fullHeaderSize[12];
+	struct iscsi_cmd_pdu_hdr_little_endian command_pdu_hdr;
+	struct iscsi_data_pdu_hdr_little_endian data_out_pdu_hdr;
+	struct iscsi_login_req_hdr_little_endian login_req_pdu_hdr;
+	struct iscsi_logout_req_hdr_little_endian logout_req_pdu_hdr;
+	struct iscsi_tmf_req_hdr_little_endian tmf_req_pdu_hdr;
+	struct iscsi_text_req_hdr_little_endian text_req_pdu_hdr;
+	struct iscsi_nop_out_hdr_little_endian nop_out_pdu_hdr;
+};
+
+struct iscsi_hq_bd {
+	union iscsi_pdu_headers_little_endian pdu_header;
+#if defined(__BIG_ENDIAN)
+	u16 reserved1;
+	u16 lcl_cmp_flg;
+#elif defined(__LITTLE_ENDIAN)
+	u16 lcl_cmp_flg;
+	u16 reserved1;
+#endif
+	u32 sgl_base_lo;
+	u32 sgl_base_hi;
+#if defined(__BIG_ENDIAN)
+	u8 sgl_size;
+	u8 sge_index;
+	u16 sge_offset;
+#elif defined(__LITTLE_ENDIAN)
+	u16 sge_offset;
+	u8 sge_index;
+	u8 sgl_size;
+#endif
+};
+
+
+/*
+ * CQE data for L2 OOO connection $$KEEP_ENDIANNESS$$
+ */
+struct iscsi_l2_ooo_data {
+	__le32 iscsi_cid;
+	u8 drop_isle;
+	u8 drop_size;
+	u8 ooo_opcode;
+	u8 ooo_isle;
+	u8 reserved[8];
+};
+
+
+
+
+
+
+struct iscsi_task_context_entry_xuc_c_write_only {
+	u32 total_data_acked;
+};
+
+struct iscsi_task_context_r2t_table_entry {
+	u32 ttt;
+	u32 desired_data_len;
+};
+
+struct iscsi_task_context_entry_xuc_u_write_only {
+	u32 exp_r2t_sn;
+	struct iscsi_task_context_r2t_table_entry r2t_table[4];
+#if defined(__BIG_ENDIAN)
+	u16 data_in_count;
+	u8 cq_id;
+	u8 valid_1b;
+#elif defined(__LITTLE_ENDIAN)
+	u8 valid_1b;
+	u8 cq_id;
+	u16 data_in_count;
+#endif
+};
+
+struct iscsi_task_context_entry_xuc {
+	struct iscsi_task_context_entry_xuc_c_write_only write_c;
+	u32 exp_data_transfer_len;
+	struct iscsi_task_context_entry_xuc_x_write_only write_x;
+	u32 lun_lo;
+	struct iscsi_task_context_entry_xuc_xu_write_both write_xu;
+	u32 lun_hi;
+	struct iscsi_task_context_entry_xuc_u_write_only write_u;
+};
+
+struct iscsi_task_context_entry_u {
+	u32 exp_r2t_buff_offset;
+	u32 rem_rcv_len;
+	u32 exp_data_sn;
+};
+
+struct iscsi_task_context_entry {
+	struct iscsi_task_context_entry_x tce_x;
+#if defined(__BIG_ENDIAN)
+	u16 data_out_count;
+	u16 rsrv0;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rsrv0;
+	u16 data_out_count;
+#endif
+	struct iscsi_task_context_entry_xuc tce_xuc;
+	struct iscsi_task_context_entry_u tce_u;
+	u32 rsrv1[7];
+};
+
+
+
+
+
+
+
+
+struct iscsi_task_context_entry_xuc_x_init_only {
+	struct regpair lun;
+	u32 exp_data_transfer_len;
+};
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/*
+ * ipv6 structure
+ */
+struct ip_v6_addr {
+	u32 ip_addr_lo_lo;
+	u32 ip_addr_lo_hi;
+	u32 ip_addr_hi_lo;
+	u32 ip_addr_hi_hi;
+};
+
+
+
+/*
+ * l5cm- connection identification params
+ */
+struct l5cm_conn_addr_params {
+	u32 pmtu;
+#if defined(__BIG_ENDIAN)
+	u8 remote_addr_3;
+	u8 remote_addr_2;
+	u8 remote_addr_1;
+	u8 remote_addr_0;
+#elif defined(__LITTLE_ENDIAN)
+	u8 remote_addr_0;
+	u8 remote_addr_1;
+	u8 remote_addr_2;
+	u8 remote_addr_3;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 params;
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0)
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0
+#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1)
+#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1
+	u8 remote_addr_5;
+	u8 remote_addr_4;
+#elif defined(__LITTLE_ENDIAN)
+	u8 remote_addr_4;
+	u8 remote_addr_5;
+	u16 params;
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0)
+#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0
+#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1)
+#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1
+#endif
+	struct ip_v6_addr local_ip_addr;
+	struct ip_v6_addr remote_ip_addr;
+	u32 ipv6_flow_label_20b;
+	u32 reserved1;
+#if defined(__BIG_ENDIAN)
+	u16 remote_tcp_port;
+	u16 local_tcp_port;
+#elif defined(__LITTLE_ENDIAN)
+	u16 local_tcp_port;
+	u16 remote_tcp_port;
+#endif
+};
+
+/*
+ * l5cm-xstorm connection buffer
+ */
+struct l5cm_xstorm_conn_buffer {
+#if defined(__BIG_ENDIAN)
+	u16 rsrv1;
+	u16 params;
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0)
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0
+#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1
+#elif defined(__LITTLE_ENDIAN)
+	u16 params;
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0)
+#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0
+#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1
+	u16 rsrv1;
+#endif
+#if defined(__BIG_ENDIAN)
+	u16 mss;
+	u16 pseudo_header_checksum;
+#elif defined(__LITTLE_ENDIAN)
+	u16 pseudo_header_checksum;
+	u16 mss;
+#endif
+	u32 rcv_buf;
+	u32 rsrv2;
+	struct regpair context_addr;
+};
+
+/*
+ * l5cm-tstorm connection buffer
+ */
+struct l5cm_tstorm_conn_buffer {
+	u32 rsrv1[2];
+#if defined(__BIG_ENDIAN)
+	u16 params;
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0)
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0
+#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1
+	u8 ka_max_probe_count;
+	u8 ka_enable;
+#elif defined(__LITTLE_ENDIAN)
+	u8 ka_enable;
+	u8 ka_max_probe_count;
+	u16 params;
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0)
+#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0
+#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
+#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1
+#endif
+	u32 ka_timeout;
+	u32 ka_interval;
+	u32 max_rt_time;
+};
+
+/*
+ * l5cm connection buffer for active side
+ */
+struct l5cm_active_conn_buffer {
+	struct l5cm_conn_addr_params conn_addr_buf;
+	struct l5cm_xstorm_conn_buffer xstorm_conn_buffer;
+	struct l5cm_tstorm_conn_buffer tstorm_conn_buffer;
+};
+
+
+
+/*
+ * The l5cm opaque buffer passed in add new connection ramrod passive side
+ */
+struct l5cm_hash_input_string {
+	u32 __opaque1;
+#if defined(__BIG_ENDIAN)
+	u16 __opaque3;
+	u16 __opaque2;
+#elif defined(__LITTLE_ENDIAN)
+	u16 __opaque2;
+	u16 __opaque3;
+#endif
+	struct ip_v6_addr __opaque4;
+	struct ip_v6_addr __opaque5;
+	u32 __opaque6;
+	u32 __opaque7[5];
+};
+
+
+/*
+ * syn cookie component
+ */
+struct l5cm_syn_cookie_comp {
+	u32 __opaque;
+};
+
+/*
+ * data related to listeners of a TCP port
+ */
+struct l5cm_port_listener_data {
+	u8 params;
+#define L5CM_PORT_LISTENER_DATA_ENABLE (0x1<<0)
+#define L5CM_PORT_LISTENER_DATA_ENABLE_SHIFT 0
+#define L5CM_PORT_LISTENER_DATA_IP_INDEX (0xF<<1)
+#define L5CM_PORT_LISTENER_DATA_IP_INDEX_SHIFT 1
+#define L5CM_PORT_LISTENER_DATA_NET_FILTER (0x1<<5)
+#define L5CM_PORT_LISTENER_DATA_NET_FILTER_SHIFT 5
+#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE (0x1<<6)
+#define L5CM_PORT_LISTENER_DATA_DEFFERED_MODE_SHIFT 6
+#define L5CM_PORT_LISTENER_DATA_MPA_MODE (0x1<<7)
+#define L5CM_PORT_LISTENER_DATA_MPA_MODE_SHIFT 7
+};
+
+/*
+ * Opaque structure passed from U to X when final ack arrives
+ */
+struct l5cm_opaque_buf {
+	u32 __opaque1;
+	u32 __opaque2;
+	u32 __opaque3;
+	u32 __opaque4;
+	struct l5cm_syn_cookie_comp __opaque5;
+#if defined(__BIG_ENDIAN)
+	u16 rsrv2;
+	u8 rsrv;
+	struct l5cm_port_listener_data __opaque6;
+#elif defined(__LITTLE_ENDIAN)
+	struct l5cm_port_listener_data __opaque6;
+	u8 rsrv;
+	u16 rsrv2;
+#endif
+};
+
+
+/*
+ * l5cm slow path element
+ */
+struct l5cm_packet_size {
+	u32 size;
+	u32 rsrv;
+};
+
+
+/*
+ * The final-ack union structure in PCS entry after final ack arrived
+ */
+struct l5cm_pcse_ack {
+	struct l5cm_xstorm_conn_buffer tx_socket_params;
+	struct l5cm_opaque_buf opaque_buf;
+	struct l5cm_tstorm_conn_buffer rx_socket_params;
+};
+
+
+/*
+ * The syn union structure in PCS entry after syn arrived
+ */
+struct l5cm_pcse_syn {
+	struct l5cm_opaque_buf opaque_buf;
+	u32 rsrv[12];
+};
+
+
+/*
+ * pcs entry data for passive connections
+ */
+struct l5cm_pcs_attributes {
+#if defined(__BIG_ENDIAN)
+	u16 pcs_id;
+	u8 status;
+	u8 flags;
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0)
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1)
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2)
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3)
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4)
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5)
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6)
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6
+#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7)
+#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7
+#elif defined(__LITTLE_ENDIAN)
+	u8 flags;
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER (0x1<<0)
+#define L5CM_PCS_ATTRIBUTES_NET_FILTER_SHIFT 0
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH (0x1<<1)
+#define L5CM_PCS_ATTRIBUTES_CALCULATE_HASH_SHIFT 1
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT (0x1<<2)
+#define L5CM_PCS_ATTRIBUTES_COMPARE_HASH_RESULT_SHIFT 2
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT (0x1<<3)
+#define L5CM_PCS_ATTRIBUTES_QUERY_ULP_ACCEPT_SHIFT 3
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC (0x1<<4)
+#define L5CM_PCS_ATTRIBUTES_FIND_DEST_MAC_SHIFT 4
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD (0x1<<5)
+#define L5CM_PCS_ATTRIBUTES_L4_OFFLOAD_SHIFT 5
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET (0x1<<6)
+#define L5CM_PCS_ATTRIBUTES_FORWARD_PACKET_SHIFT 6
+#define L5CM_PCS_ATTRIBUTES_RSRV (0x1<<7)
+#define L5CM_PCS_ATTRIBUTES_RSRV_SHIFT 7
+	u8 status;
+	u16 pcs_id;
+#endif
+};
+
+
+union l5cm_seg_params {
+	struct l5cm_pcse_syn syn_seg_params;
+	struct l5cm_pcse_ack ack_seg_params;
+};
+
+/*
+ * pcs entry data for passive connections
+ */
+struct l5cm_pcs_hdr {
+	struct l5cm_hash_input_string hash_input_string;
+	struct l5cm_conn_addr_params conn_addr_buf;
+	u32 cid;
+	u32 hash_result;
+	union l5cm_seg_params seg_params;
+	struct l5cm_pcs_attributes att;
+#if defined(__BIG_ENDIAN)
+	u16 rsrv;
+	u16 rx_seg_size;
+#elif defined(__LITTLE_ENDIAN)
+	u16 rx_seg_size;
+	u16 rsrv;
+#endif
+};
+
+/*
+ * pcs entry for passive connections
+ */
+struct l5cm_pcs_entry {
+	struct l5cm_pcs_hdr hdr;
+	u8 rx_segment[1516];
+};
+
+
+
+
+/*
+ * l5cm connection parameters
+ */
+union l5cm_reduce_param_union {
+	u32 opaque1;
+	u32 opaque2;
+};
+
+/*
+ * l5cm connection parameters
+ */
+struct l5cm_reduce_conn {
+	union l5cm_reduce_param_union opaque1;
+	u32 opaque2;
+};
+
+/*
+ * l5cm slow path element
+ */
+union l5cm_specific_data {
+	u8 protocol_data[8];
+	struct regpair phy_address;
+	struct l5cm_packet_size packet_size;
+	struct l5cm_reduce_conn reduced_conn;
+};
+
+/*
+ * l5 slow path element
+ */
+struct l5cm_spe {
+	struct spe_hdr hdr;
+	union l5cm_specific_data data;
+};
+
+
+
+
+/*
+ * Termination variables
+ */
+struct l5cm_term_vars {
+	u8 BitMap;
+#define L5CM_TERM_VARS_TCP_STATE (0xF<<0)
+#define L5CM_TERM_VARS_TCP_STATE_SHIFT 0
+#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4)
+#define L5CM_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4
+#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5)
+#define L5CM_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5
+#define L5CM_TERM_VARS_TERM_ON_CHIP (0x1<<6)
+#define L5CM_TERM_VARS_TERM_ON_CHIP_SHIFT 6
+#define L5CM_TERM_VARS_RSRV (0x1<<7)
+#define L5CM_TERM_VARS_RSRV_SHIFT 7
+};
+
+
+
+
+/*
+ * Tstorm Tcp flags
+ */
+struct tstorm_l5cm_tcp_flags {
+	u16 flags;
+#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0)
+#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0
+#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12)
+#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12
+#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13)
+#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13
+#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14)
+#define TSTORM_L5CM_TCP_FLAGS_RSRV1_SHIFT 14
+};
+
+
+/*
+ * Xstorm Tcp flags
+ */
+struct xstorm_l5cm_tcp_flags {
+	u8 flags;
+#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED (0x1<<0)
+#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED_SHIFT 0
+#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<1)
+#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 1
+#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN (0x1<<2)
+#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN_SHIFT 2
+#define XSTORM_L5CM_TCP_FLAGS_RSRV (0x1F<<3)
+#define XSTORM_L5CM_TCP_FLAGS_RSRV_SHIFT 3
+};
+
+
+
+/*
+ * Out-of-order states
+ */
+enum tcp_ooo_event {
+	TCP_EVENT_ADD_PEN = 0,
+	TCP_EVENT_ADD_NEW_ISLE = 1,
+	TCP_EVENT_ADD_ISLE_RIGHT = 2,
+	TCP_EVENT_ADD_ISLE_LEFT = 3,
+	TCP_EVENT_JOIN = 4,
+	TCP_EVENT_NOP = 5,
+	MAX_TCP_OOO_EVENT
+};
+
+
+/*
+ * OOO support modes
+ */
+enum tcp_tstorm_ooo {
+	TCP_TSTORM_OOO_DROP_AND_PROC_ACK = 0,
+	TCP_TSTORM_OOO_SEND_PURE_ACK = 1,
+	TCP_TSTORM_OOO_SUPPORTED = 2,
+	MAX_TCP_TSTORM_OOO
+};
+
+
+
+
+
+
+
+
+
+#endif /* __5710_HSI_CNIC_LE__ */
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
new file mode 100644
index 0000000..79443e0
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -0,0 +1,340 @@
+/* cnic_if.h: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2011 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_IF_H
+#define CNIC_IF_H
+
+#define CNIC_MODULE_VERSION	"2.5.7"
+#define CNIC_MODULE_RELDATE	"July 20, 2011"
+
+#define CNIC_ULP_RDMA		0
+#define CNIC_ULP_ISCSI		1
+#define CNIC_ULP_FCOE		2
+#define CNIC_ULP_L4		3
+#define MAX_CNIC_ULP_TYPE_EXT	3
+#define MAX_CNIC_ULP_TYPE	4
+
+struct kwqe {
+	u32 kwqe_op_flag;
+
+#define KWQE_QID_SHIFT		8
+#define KWQE_OPCODE_MASK	0x00ff0000
+#define KWQE_OPCODE_SHIFT	16
+#define KWQE_OPCODE(x)		((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
+#define KWQE_LAYER_MASK			0x70000000
+#define KWQE_LAYER_SHIFT		28
+#define KWQE_FLAGS_LAYER_MASK_L2	(2<<28)
+#define KWQE_FLAGS_LAYER_MASK_L3	(3<<28)
+#define KWQE_FLAGS_LAYER_MASK_L4	(4<<28)
+#define KWQE_FLAGS_LAYER_MASK_L5_RDMA	(5<<28)
+#define KWQE_FLAGS_LAYER_MASK_L5_ISCSI	(6<<28)
+#define KWQE_FLAGS_LAYER_MASK_L5_FCOE	(7<<28)
+
+	u32 kwqe_info0;
+	u32 kwqe_info1;
+	u32 kwqe_info2;
+	u32 kwqe_info3;
+	u32 kwqe_info4;
+	u32 kwqe_info5;
+	u32 kwqe_info6;
+};
+
+struct kwqe_16 {
+	u32 kwqe_info0;
+	u32 kwqe_info1;
+	u32 kwqe_info2;
+	u32 kwqe_info3;
+};
+
+struct kcqe {
+	u32 kcqe_info0;
+	u32 kcqe_info1;
+	u32 kcqe_info2;
+	u32 kcqe_info3;
+	u32 kcqe_info4;
+	u32 kcqe_info5;
+	u32 kcqe_info6;
+	u32 kcqe_op_flag;
+		#define KCQE_RAMROD_COMPLETION		(0x1<<27) /* Everest */
+		#define KCQE_FLAGS_LAYER_MASK		(0x7<<28)
+		#define KCQE_FLAGS_LAYER_MASK_MISC	(0<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L2	(2<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L3	(3<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L4	(4<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L5_RDMA	(5<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L5_ISCSI	(6<<28)
+		#define KCQE_FLAGS_LAYER_MASK_L5_FCOE	(7<<28)
+		#define KCQE_FLAGS_NEXT 		(1<<31)
+		#define KCQE_FLAGS_OPCODE_MASK		(0xff<<16)
+		#define KCQE_FLAGS_OPCODE_SHIFT		(16)
+		#define KCQE_OPCODE(op)			\
+		(((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
+};
+
+#define MAX_CNIC_CTL_DATA	64
+#define MAX_DRV_CTL_DATA	64
+
+#define CNIC_CTL_STOP_CMD		1
+#define CNIC_CTL_START_CMD		2
+#define CNIC_CTL_COMPLETION_CMD		3
+#define CNIC_CTL_STOP_ISCSI_CMD		4
+
+#define DRV_CTL_IO_WR_CMD		0x101
+#define DRV_CTL_IO_RD_CMD		0x102
+#define DRV_CTL_CTX_WR_CMD		0x103
+#define DRV_CTL_CTXTBL_WR_CMD		0x104
+#define DRV_CTL_RET_L5_SPQ_CREDIT_CMD	0x105
+#define DRV_CTL_START_L2_CMD		0x106
+#define DRV_CTL_STOP_L2_CMD		0x107
+#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD	0x10c
+#define DRV_CTL_ISCSI_STOPPED_CMD	0x10d
+
+struct cnic_ctl_completion {
+	u32	cid;
+	u8	opcode;
+	u8	error;
+};
+
+struct cnic_ctl_info {
+	int	cmd;
+	union {
+		struct cnic_ctl_completion comp;
+		char bytes[MAX_CNIC_CTL_DATA];
+	} data;
+};
+
+struct drv_ctl_spq_credit {
+	u32	credit_count;
+};
+
+struct drv_ctl_io {
+	u32		cid_addr;
+	u32		offset;
+	u32		data;
+	dma_addr_t	dma_addr;
+};
+
+struct drv_ctl_l2_ring {
+	u32		client_id;
+	u32		cid;
+};
+
+struct drv_ctl_info {
+	int	cmd;
+	union {
+		struct drv_ctl_spq_credit credit;
+		struct drv_ctl_io io;
+		struct drv_ctl_l2_ring ring;
+		char bytes[MAX_DRV_CTL_DATA];
+	} data;
+};
+
+struct cnic_ops {
+	struct module	*cnic_owner;
+	/* Calls to these functions are protected by RCU.  When
+	 * unregistering, we wait for any calls to complete before
+	 * continuing.
+	 */
+	int		(*cnic_handler)(void *, void *);
+	int		(*cnic_ctl)(void *, struct cnic_ctl_info *);
+};
+
+#define MAX_CNIC_VEC	8
+
+struct cnic_irq {
+	unsigned int	vector;
+	void		*status_blk;
+	u32		status_blk_num;
+	u32		status_blk_num2;
+	u32		irq_flags;
+#define CNIC_IRQ_FL_MSIX		0x00000001
+};
+
+struct cnic_eth_dev {
+	struct module	*drv_owner;
+	u32		drv_state;
+#define CNIC_DRV_STATE_REGD		0x00000001
+#define CNIC_DRV_STATE_USING_MSIX	0x00000002
+#define CNIC_DRV_STATE_NO_ISCSI_OOO	0x00000004
+#define CNIC_DRV_STATE_NO_ISCSI		0x00000008
+#define CNIC_DRV_STATE_NO_FCOE		0x00000010
+	u32		chip_id;
+	u32		max_kwqe_pending;
+	struct pci_dev	*pdev;
+	void __iomem	*io_base;
+	void __iomem	*io_base2;
+	const void	*iro_arr;
+
+	u32		ctx_tbl_offset;
+	u32		ctx_tbl_len;
+	int		ctx_blk_size;
+	u32		starting_cid;
+	u32		max_iscsi_conn;
+	u32		max_fcoe_conn;
+	u32		max_rdma_conn;
+	u32		fcoe_init_cid;
+	u32		fcoe_wwn_port_name_hi;
+	u32		fcoe_wwn_port_name_lo;
+	u32		fcoe_wwn_node_name_hi;
+	u32		fcoe_wwn_node_name_lo;
+
+	u16		iscsi_l2_client_id;
+	u16		iscsi_l2_cid;
+	u8		iscsi_mac[ETH_ALEN];
+
+	int		num_irq;
+	struct cnic_irq	irq_arr[MAX_CNIC_VEC];
+	int		(*drv_register_cnic)(struct net_device *,
+					     struct cnic_ops *, void *);
+	int		(*drv_unregister_cnic)(struct net_device *);
+	int		(*drv_submit_kwqes_32)(struct net_device *,
+					       struct kwqe *[], u32);
+	int		(*drv_submit_kwqes_16)(struct net_device *,
+					       struct kwqe_16 *[], u32);
+	int		(*drv_ctl)(struct net_device *, struct drv_ctl_info *);
+	unsigned long	reserved1[2];
+};
+
+struct cnic_sockaddr {
+	union {
+		struct sockaddr_in	v4;
+		struct sockaddr_in6	v6;
+	} local;
+	union {
+		struct sockaddr_in	v4;
+		struct sockaddr_in6	v6;
+	} remote;
+};
+
+struct cnic_sock {
+	struct cnic_dev *dev;
+	void	*context;
+	u32	src_ip[4];
+	u32	dst_ip[4];
+	u16	src_port;
+	u16	dst_port;
+	u16	vlan_id;
+	unsigned char old_ha[6];
+	unsigned char ha[6];
+	u32	mtu;
+	u32	cid;
+	u32	l5_cid;
+	u32	pg_cid;
+	int	ulp_type;
+
+	u32	ka_timeout;
+	u32	ka_interval;
+	u8	ka_max_probe_count;
+	u8	tos;
+	u8	ttl;
+	u8	snd_seq_scale;
+	u32	rcv_buf;
+	u32	snd_buf;
+	u32	seed;
+
+	unsigned long	tcp_flags;
+#define SK_TCP_NO_DELAY_ACK	0x1
+#define SK_TCP_KEEP_ALIVE	0x2
+#define SK_TCP_NAGLE		0x4
+#define SK_TCP_TIMESTAMP	0x8
+#define SK_TCP_SACK		0x10
+#define SK_TCP_SEG_SCALING	0x20
+	unsigned long	flags;
+#define SK_F_INUSE		0
+#define SK_F_OFFLD_COMPLETE	1
+#define SK_F_OFFLD_SCHED	2
+#define SK_F_PG_OFFLD_COMPLETE	3
+#define SK_F_CONNECT_START	4
+#define SK_F_IPV6		5
+#define SK_F_CLOSING		7
+
+	atomic_t ref_count;
+	u32 state;
+	struct kwqe kwqe1;
+	struct kwqe kwqe2;
+	struct kwqe kwqe3;
+};
+
+struct cnic_dev {
+	struct net_device	*netdev;
+	struct pci_dev		*pcidev;
+	void __iomem		*regview;
+	struct list_head	list;
+
+	int (*register_device)(struct cnic_dev *dev, int ulp_type,
+			       void *ulp_ctx);
+	int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
+	int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
+				u32 num_wqes);
+	int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
+				u32 num_wqes);
+
+	int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
+			 void *);
+	int (*cm_destroy)(struct cnic_sock *);
+	int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
+	int (*cm_abort)(struct cnic_sock *);
+	int (*cm_close)(struct cnic_sock *);
+	struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
+	int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
+				 char *data, u16 data_size);
+	unsigned long	flags;
+#define CNIC_F_CNIC_UP		1
+#define CNIC_F_BNX2_CLASS	3
+#define CNIC_F_BNX2X_CLASS	4
+	atomic_t	ref_count;
+	u8		mac_addr[6];
+
+	int		max_iscsi_conn;
+	int		max_fcoe_conn;
+	int		max_rdma_conn;
+
+	void		*cnic_priv;
+};
+
+#define CNIC_WR(dev, off, val)		writel(val, dev->regview + off)
+#define CNIC_WR16(dev, off, val)	writew(val, dev->regview + off)
+#define CNIC_WR8(dev, off, val)		writeb(val, dev->regview + off)
+#define CNIC_RD(dev, off)		readl(dev->regview + off)
+#define CNIC_RD16(dev, off)		readw(dev->regview + off)
+
+struct cnic_ulp_ops {
+	/* Calls to these functions are protected by RCU.  When
+	 * unregistering, we wait for any calls to complete before
+	 * continuing.
+	 */
+
+	void (*cnic_init)(struct cnic_dev *dev);
+	void (*cnic_exit)(struct cnic_dev *dev);
+	void (*cnic_start)(void *ulp_ctx);
+	void (*cnic_stop)(void *ulp_ctx);
+	void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
+				u32 num_cqes);
+	void (*indicate_netevent)(void *ulp_ctx, unsigned long event, u16 vid);
+	void (*cm_connect_complete)(struct cnic_sock *);
+	void (*cm_close_complete)(struct cnic_sock *);
+	void (*cm_abort_complete)(struct cnic_sock *);
+	void (*cm_remote_close)(struct cnic_sock *);
+	void (*cm_remote_abort)(struct cnic_sock *);
+	int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
+				  char *data, u16 data_size);
+	struct module *owner;
+	atomic_t ref_count;
+};
+
+extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
+
+extern int cnic_unregister_driver(int ulp_type);
+
+extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev);
+extern struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
new file mode 100644
index 0000000..ea65f7e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -0,0 +1,2690 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Broadcom Corporation
+ * Copyright (c) 2006, 2007  Maciej W. Rozycki
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+ *
+ *
+ * This driver is designed for the Broadcom SiByte SOC built-in
+ * Ethernet controllers. Written by Mitch Lichtenberg at Broadcom Corp.
+ *
+ * Updated to the driver model and the PHY abstraction layer
+ * by Maciej W. Rozycki.
+ */
+
+#include <linux/bug.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/prefetch.h>
+
+#include <asm/cache.h>
+#include <asm/io.h>
+#include <asm/processor.h>	/* Processor type for cache alignment. */
+
+/* Operational parameters that usually are not changed. */
+
+#define CONFIG_SBMAC_COALESCE
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (2*HZ)
+
+
+MODULE_AUTHOR("Mitch Lichtenberg (Broadcom Corp.)");
+MODULE_DESCRIPTION("Broadcom SiByte SOC GB Ethernet driver");
+
+/* A few user-configurable values which may be modified when a driver
+   module is loaded. */
+
+/* 1 normal messages, 0 quiet .. 7 verbose. */
+static int debug = 1;
+module_param(debug, int, S_IRUGO);
+MODULE_PARM_DESC(debug, "Debug messages");
+
+#ifdef CONFIG_SBMAC_COALESCE
+static int int_pktcnt_tx = 255;
+module_param(int_pktcnt_tx, int, S_IRUGO);
+MODULE_PARM_DESC(int_pktcnt_tx, "TX packet count");
+
+static int int_timeout_tx = 255;
+module_param(int_timeout_tx, int, S_IRUGO);
+MODULE_PARM_DESC(int_timeout_tx, "TX timeout value");
+
+static int int_pktcnt_rx = 64;
+module_param(int_pktcnt_rx, int, S_IRUGO);
+MODULE_PARM_DESC(int_pktcnt_rx, "RX packet count");
+
+static int int_timeout_rx = 64;
+module_param(int_timeout_rx, int, S_IRUGO);
+MODULE_PARM_DESC(int_timeout_rx, "RX timeout value");
+#endif
+
+#include <asm/sibyte/board.h>
+#include <asm/sibyte/sb1250.h>
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+#define R_MAC_DMA_OODPKTLOST_RX	R_MAC_DMA_OODPKTLOST
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+#else
+#error invalid SiByte MAC configuration
+#endif
+#include <asm/sibyte/sb1250_scd.h>
+#include <asm/sibyte/sb1250_mac.h>
+#include <asm/sibyte/sb1250_dma.h>
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+#define UNIT_INT(n)		(K_BCM1480_INT_MAC_0 + ((n) * 2))
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+#define UNIT_INT(n)		(K_INT_MAC_0 + (n))
+#else
+#error invalid SiByte MAC configuration
+#endif
+
+#ifdef K_INT_PHY
+#define SBMAC_PHY_INT			K_INT_PHY
+#else
+#define SBMAC_PHY_INT			PHY_POLL
+#endif
+
+/**********************************************************************
+ *  Simple types
+ ********************************************************************* */
+
+enum sbmac_speed {
+	sbmac_speed_none = 0,
+	sbmac_speed_10 = SPEED_10,
+	sbmac_speed_100 = SPEED_100,
+	sbmac_speed_1000 = SPEED_1000,
+};
+
+enum sbmac_duplex {
+	sbmac_duplex_none = -1,
+	sbmac_duplex_half = DUPLEX_HALF,
+	sbmac_duplex_full = DUPLEX_FULL,
+};
+
+enum sbmac_fc {
+	sbmac_fc_none,
+	sbmac_fc_disabled,
+	sbmac_fc_frame,
+	sbmac_fc_collision,
+	sbmac_fc_carrier,
+};
+
+enum sbmac_state {
+	sbmac_state_uninit,
+	sbmac_state_off,
+	sbmac_state_on,
+	sbmac_state_broken,
+};
+
+
+/**********************************************************************
+ *  Macros
+ ********************************************************************* */
+
+
+#define SBDMA_NEXTBUF(d,f) ((((d)->f+1) == (d)->sbdma_dscrtable_end) ? \
+			  (d)->sbdma_dscrtable : (d)->f+1)
+
+
+#define NUMCACHEBLKS(x) (((x)+SMP_CACHE_BYTES-1)/SMP_CACHE_BYTES)
+
+#define SBMAC_MAX_TXDESCR	256
+#define SBMAC_MAX_RXDESCR	256
+
+#define ETHER_ADDR_LEN		6
+#define ENET_PACKET_SIZE	1518
+/*#define ENET_PACKET_SIZE	9216 */
+
+/**********************************************************************
+ *  DMA Descriptor structure
+ ********************************************************************* */
+
+struct sbdmadscr {
+	uint64_t  dscr_a;
+	uint64_t  dscr_b;
+};
+
+/**********************************************************************
+ *  DMA Controller structure
+ ********************************************************************* */
+
+struct sbmacdma {
+
+	/*
+	 * This stuff is used to identify the channel and the registers
+	 * associated with it.
+	 */
+	struct sbmac_softc	*sbdma_eth;	/* back pointer to associated
+						   MAC */
+	int			sbdma_channel;	/* channel number */
+	int			sbdma_txdir;	/* direction (1=transmit) */
+	int			sbdma_maxdescr;	/* total # of descriptors
+						   in ring */
+#ifdef CONFIG_SBMAC_COALESCE
+	int			sbdma_int_pktcnt;
+						/* # descriptors rx/tx
+						   before interrupt */
+	int			sbdma_int_timeout;
+						/* # usec rx/tx interrupt */
+#endif
+	void __iomem		*sbdma_config0;	/* DMA config register 0 */
+	void __iomem		*sbdma_config1;	/* DMA config register 1 */
+	void __iomem		*sbdma_dscrbase;
+						/* descriptor base address */
+	void __iomem		*sbdma_dscrcnt;	/* descriptor count register */
+	void __iomem		*sbdma_curdscr;	/* current descriptor
+						   address */
+	void __iomem		*sbdma_oodpktlost;
+						/* pkt drop (rx only) */
+
+	/*
+	 * This stuff is for maintenance of the ring
+	 */
+	void			*sbdma_dscrtable_unaligned;
+	struct sbdmadscr	*sbdma_dscrtable;
+						/* base of descriptor table */
+	struct sbdmadscr	*sbdma_dscrtable_end;
+						/* end of descriptor table */
+	struct sk_buff		**sbdma_ctxtable;
+						/* context table, one
+						   per descr */
+	dma_addr_t		sbdma_dscrtable_phys;
+						/* and also the phys addr */
+	struct sbdmadscr	*sbdma_addptr;	/* next dscr for sw to add */
+	struct sbdmadscr	*sbdma_remptr;	/* next dscr for sw
+						   to remove */
+};
+
+
+/**********************************************************************
+ *  Ethernet softc structure
+ ********************************************************************* */
+
+struct sbmac_softc {
+
+	/*
+	 * Linux-specific things
+	 */
+	struct net_device	*sbm_dev;	/* pointer to linux device */
+	struct napi_struct	napi;
+	struct phy_device	*phy_dev;	/* the associated PHY device */
+	struct mii_bus		*mii_bus;	/* the MII bus */
+	int			phy_irq[PHY_MAX_ADDR];
+	spinlock_t		sbm_lock;	/* spin lock */
+	int			sbm_devflags;	/* current device flags */
+
+	/*
+	 * Controller-specific things
+	 */
+	void __iomem		*sbm_base;	/* MAC's base address */
+	enum sbmac_state	sbm_state;	/* current state */
+
+	void __iomem		*sbm_macenable;	/* MAC Enable Register */
+	void __iomem		*sbm_maccfg;	/* MAC Config Register */
+	void __iomem		*sbm_fifocfg;	/* FIFO Config Register */
+	void __iomem		*sbm_framecfg;	/* Frame Config Register */
+	void __iomem		*sbm_rxfilter;	/* Receive Filter Register */
+	void __iomem		*sbm_isr;	/* Interrupt Status Register */
+	void __iomem		*sbm_imr;	/* Interrupt Mask Register */
+	void __iomem		*sbm_mdio;	/* MDIO Register */
+
+	enum sbmac_speed	sbm_speed;	/* current speed */
+	enum sbmac_duplex	sbm_duplex;	/* current duplex */
+	enum sbmac_fc		sbm_fc;		/* cur. flow control setting */
+	int			sbm_pause;	/* current pause setting */
+	int			sbm_link;	/* current link state */
+
+	unsigned char		sbm_hwaddr[ETHER_ADDR_LEN];
+
+	struct sbmacdma		sbm_txdma;	/* only channel 0 for now */
+	struct sbmacdma		sbm_rxdma;
+	int			rx_hw_checksum;
+	int			sbe_idx;
+};
+
+
+/**********************************************************************
+ *  Externs
+ ********************************************************************* */
+
+/**********************************************************************
+ *  Prototypes
+ ********************************************************************* */
+
+static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
+			  int txrx, int maxdescr);
+static void sbdma_channel_start(struct sbmacdma *d, int rxtx);
+static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
+			       struct sk_buff *m);
+static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m);
+static void sbdma_emptyring(struct sbmacdma *d);
+static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d);
+static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
+			    int work_to_do, int poll);
+static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
+			     int poll);
+static int sbmac_initctx(struct sbmac_softc *s);
+static void sbmac_channel_start(struct sbmac_softc *s);
+static void sbmac_channel_stop(struct sbmac_softc *s);
+static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *,
+						enum sbmac_state);
+static void sbmac_promiscuous_mode(struct sbmac_softc *sc, int onoff);
+static uint64_t sbmac_addr2reg(unsigned char *ptr);
+static irqreturn_t sbmac_intr(int irq, void *dev_instance);
+static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev);
+static void sbmac_setmulti(struct sbmac_softc *sc);
+static int sbmac_init(struct platform_device *pldev, long long base);
+static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed);
+static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex,
+			    enum sbmac_fc fc);
+
+static int sbmac_open(struct net_device *dev);
+static void sbmac_tx_timeout (struct net_device *dev);
+static void sbmac_set_rx_mode(struct net_device *dev);
+static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int sbmac_close(struct net_device *dev);
+static int sbmac_poll(struct napi_struct *napi, int budget);
+
+static void sbmac_mii_poll(struct net_device *dev);
+static int sbmac_mii_probe(struct net_device *dev);
+
+static void sbmac_mii_sync(void __iomem *sbm_mdio);
+static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data,
+			       int bitcnt);
+static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx);
+static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
+			   u16 val);
+
+
+/**********************************************************************
+ *  Globals
+ ********************************************************************* */
+
+static char sbmac_string[] = "sb1250-mac";
+
+static char sbmac_mdio_string[] = "sb1250-mac-mdio";
+
+
+/**********************************************************************
+ *  MDIO constants
+ ********************************************************************* */
+
+#define	MII_COMMAND_START	0x01
+#define	MII_COMMAND_READ	0x02
+#define	MII_COMMAND_WRITE	0x01
+#define	MII_COMMAND_ACK		0x02
+
+#define M_MAC_MDIO_DIR_OUTPUT	0		/* for clarity */
+
+#define ENABLE 		1
+#define DISABLE		0
+
+/**********************************************************************
+ *  SBMAC_MII_SYNC(sbm_mdio)
+ *
+ *  Synchronize with the MII - send a pattern of bits to the MII
+ *  that will guarantee that it is ready to accept a command.
+ *
+ *  Input parameters:
+ *  	   sbm_mdio - address of the MAC's MDIO register
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbmac_mii_sync(void __iomem *sbm_mdio)
+{
+	int cnt;
+	uint64_t bits;
+	int mac_mdio_genc;
+
+	mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
+
+	bits = M_MAC_MDIO_DIR_OUTPUT | M_MAC_MDIO_OUT;
+
+	__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
+
+	for (cnt = 0; cnt < 32; cnt++) {
+		__raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio);
+		__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
+	}
+}
+
+/**********************************************************************
+ *  SBMAC_MII_SENDDATA(sbm_mdio, data, bitcnt)
+ *
+ *  Send some bits to the MII.  The bits to be sent are right-
+ *  justified in the 'data' parameter.
+ *
+ *  Input parameters:
+ *  	   sbm_mdio - address of the MAC's MDIO register
+ *  	   data     - data to send
+ *  	   bitcnt   - number of bits to send
+ ********************************************************************* */
+
+static void sbmac_mii_senddata(void __iomem *sbm_mdio, unsigned int data,
+			       int bitcnt)
+{
+	int i;
+	uint64_t bits;
+	unsigned int curmask;
+	int mac_mdio_genc;
+
+	mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
+
+	bits = M_MAC_MDIO_DIR_OUTPUT;
+	__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
+
+	curmask = 1 << (bitcnt - 1);
+
+	for (i = 0; i < bitcnt; i++) {
+		if (data & curmask)
+			bits |= M_MAC_MDIO_OUT;
+		else bits &= ~M_MAC_MDIO_OUT;
+		__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
+		__raw_writeq(bits | M_MAC_MDC | mac_mdio_genc, sbm_mdio);
+		__raw_writeq(bits | mac_mdio_genc, sbm_mdio);
+		curmask >>= 1;
+	}
+}
+
+
+
+/**********************************************************************
+ *  SBMAC_MII_READ(bus, phyaddr, regidx)
+ *  Read a PHY register.
+ *
+ *  Input parameters:
+ *  	   bus     - MDIO bus handle
+ *  	   phyaddr - PHY's address
+ *  	   regnum  - index of register to read
+ *
+ *  Return value:
+ *  	   value read, or 0xffff if an error occurred.
+ ********************************************************************* */
+
+static int sbmac_mii_read(struct mii_bus *bus, int phyaddr, int regidx)
+{
+	struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv;
+	void __iomem *sbm_mdio = sc->sbm_mdio;
+	int idx;
+	int error;
+	int regval;
+	int mac_mdio_genc;
+
+	/*
+	 * Synchronize ourselves so that the PHY knows the next
+	 * thing coming down is a command
+	 */
+	sbmac_mii_sync(sbm_mdio);
+
+	/*
+	 * Send the data to the PHY.  The sequence is
+	 * a "start" command (2 bits)
+	 * a "read" command (2 bits)
+	 * the PHY addr (5 bits)
+	 * the register index (5 bits)
+	 */
+	sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2);
+	sbmac_mii_senddata(sbm_mdio, MII_COMMAND_READ, 2);
+	sbmac_mii_senddata(sbm_mdio, phyaddr, 5);
+	sbmac_mii_senddata(sbm_mdio, regidx, 5);
+
+	mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
+
+	/*
+	 * Switch the port around without a clock transition.
+	 */
+	__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
+
+	/*
+	 * Send out a clock pulse to signal we want the status
+	 */
+	__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
+		     sbm_mdio);
+	__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
+
+	/*
+	 * If an error occurred, the PHY will signal '1' back
+	 */
+	error = __raw_readq(sbm_mdio) & M_MAC_MDIO_IN;
+
+	/*
+	 * Issue an 'idle' clock pulse, but keep the direction
+	 * the same.
+	 */
+	__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
+		     sbm_mdio);
+	__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
+
+	regval = 0;
+
+	for (idx = 0; idx < 16; idx++) {
+		regval <<= 1;
+
+		if (error == 0) {
+			if (__raw_readq(sbm_mdio) & M_MAC_MDIO_IN)
+				regval |= 1;
+		}
+
+		__raw_writeq(M_MAC_MDIO_DIR_INPUT | M_MAC_MDC | mac_mdio_genc,
+			     sbm_mdio);
+		__raw_writeq(M_MAC_MDIO_DIR_INPUT | mac_mdio_genc, sbm_mdio);
+	}
+
+	/* Switch back to output */
+	__raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio);
+
+	if (error == 0)
+		return regval;
+	return 0xffff;
+}
+
+
+/**********************************************************************
+ *  SBMAC_MII_WRITE(bus, phyaddr, regidx, regval)
+ *
+ *  Write a value to a PHY register.
+ *
+ *  Input parameters:
+ *  	   bus     - MDIO bus handle
+ *  	   phyaddr - PHY to use
+ *  	   regidx  - register within the PHY
+ *  	   regval  - data to write to register
+ *
+ *  Return value:
+ *  	   0 for success
+ ********************************************************************* */
+
+static int sbmac_mii_write(struct mii_bus *bus, int phyaddr, int regidx,
+			   u16 regval)
+{
+	struct sbmac_softc *sc = (struct sbmac_softc *)bus->priv;
+	void __iomem *sbm_mdio = sc->sbm_mdio;
+	int mac_mdio_genc;
+
+	sbmac_mii_sync(sbm_mdio);
+
+	sbmac_mii_senddata(sbm_mdio, MII_COMMAND_START, 2);
+	sbmac_mii_senddata(sbm_mdio, MII_COMMAND_WRITE, 2);
+	sbmac_mii_senddata(sbm_mdio, phyaddr, 5);
+	sbmac_mii_senddata(sbm_mdio, regidx, 5);
+	sbmac_mii_senddata(sbm_mdio, MII_COMMAND_ACK, 2);
+	sbmac_mii_senddata(sbm_mdio, regval, 16);
+
+	mac_mdio_genc = __raw_readq(sbm_mdio) & M_MAC_GENC;
+
+	__raw_writeq(M_MAC_MDIO_DIR_OUTPUT | mac_mdio_genc, sbm_mdio);
+
+	return 0;
+}
+
+
+
+/**********************************************************************
+ *  SBDMA_INITCTX(d,s,chan,txrx,maxdescr)
+ *
+ *  Initialize a DMA channel context.  Since there are potentially
+ *  eight DMA channels per MAC, it's nice to do this in a standard
+ *  way.
+ *
+ *  Input parameters:
+ *  	   d - struct sbmacdma (DMA channel context)
+ *  	   s - struct sbmac_softc (pointer to a MAC)
+ *  	   chan - channel number (0..1 right now)
+ *  	   txrx - Identifies DMA_TX or DMA_RX for channel direction
+ *      maxdescr - number of descriptors
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan,
+			  int txrx, int maxdescr)
+{
+#ifdef CONFIG_SBMAC_COALESCE
+	int int_pktcnt, int_timeout;
+#endif
+
+	/*
+	 * Save away interesting stuff in the structure
+	 */
+
+	d->sbdma_eth       = s;
+	d->sbdma_channel   = chan;
+	d->sbdma_txdir     = txrx;
+
+#if 0
+	/* RMON clearing */
+	s->sbe_idx =(s->sbm_base - A_MAC_BASE_0)/MAC_SPACING;
+#endif
+
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BYTES);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_COLLISIONS);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_LATE_COL);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_EX_COL);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_FCS_ERROR);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_ABORT);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_BAD);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_GOOD);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_RUNT);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_TX_OVERSIZE);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BYTES);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_MCAST);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BCAST);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_BAD);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_GOOD);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_RUNT);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_OVERSIZE);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_FCS_ERROR);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_LENGTH_ERROR);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_CODE_ERROR);
+	__raw_writeq(0, s->sbm_base + R_MAC_RMON_RX_ALIGN_ERROR);
+
+	/*
+	 * initialize register pointers
+	 */
+
+	d->sbdma_config0 =
+		s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG0);
+	d->sbdma_config1 =
+		s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CONFIG1);
+	d->sbdma_dscrbase =
+		s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_BASE);
+	d->sbdma_dscrcnt =
+		s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_DSCR_CNT);
+	d->sbdma_curdscr =
+		s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_CUR_DSCRADDR);
+	if (d->sbdma_txdir)
+		d->sbdma_oodpktlost = NULL;
+	else
+		d->sbdma_oodpktlost =
+			s->sbm_base + R_MAC_DMA_REGISTER(txrx,chan,R_MAC_DMA_OODPKTLOST_RX);
+
+	/*
+	 * Allocate memory for the ring
+	 */
+
+	d->sbdma_maxdescr = maxdescr;
+
+	d->sbdma_dscrtable_unaligned = kcalloc(d->sbdma_maxdescr + 1,
+					       sizeof(*d->sbdma_dscrtable),
+					       GFP_KERNEL);
+
+	/*
+	 * The descriptor table must be aligned to at least 16 bytes or the
+	 * MAC will corrupt it.
+	 */
+	d->sbdma_dscrtable = (struct sbdmadscr *)
+			     ALIGN((unsigned long)d->sbdma_dscrtable_unaligned,
+				   sizeof(*d->sbdma_dscrtable));
+
+	d->sbdma_dscrtable_end = d->sbdma_dscrtable + d->sbdma_maxdescr;
+
+	d->sbdma_dscrtable_phys = virt_to_phys(d->sbdma_dscrtable);
+
+	/*
+	 * And context table
+	 */
+
+	d->sbdma_ctxtable = kcalloc(d->sbdma_maxdescr,
+				    sizeof(*d->sbdma_ctxtable), GFP_KERNEL);
+
+#ifdef CONFIG_SBMAC_COALESCE
+	/*
+	 * Setup Rx/Tx DMA coalescing defaults
+	 */
+
+	int_pktcnt = (txrx == DMA_TX) ? int_pktcnt_tx : int_pktcnt_rx;
+	if ( int_pktcnt ) {
+		d->sbdma_int_pktcnt = int_pktcnt;
+	} else {
+		d->sbdma_int_pktcnt = 1;
+	}
+
+	int_timeout = (txrx == DMA_TX) ? int_timeout_tx : int_timeout_rx;
+	if ( int_timeout ) {
+		d->sbdma_int_timeout = int_timeout;
+	} else {
+		d->sbdma_int_timeout = 0;
+	}
+#endif
+
+}
+
+/**********************************************************************
+ *  SBDMA_CHANNEL_START(d)
+ *
+ *  Initialize the hardware registers for a DMA channel.
+ *
+ *  Input parameters:
+ *  	   d - DMA channel to init (context must be previously init'd
+ *         rxtx - DMA_RX or DMA_TX depending on what type of channel
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbdma_channel_start(struct sbmacdma *d, int rxtx)
+{
+	/*
+	 * Turn on the DMA channel
+	 */
+
+#ifdef CONFIG_SBMAC_COALESCE
+	__raw_writeq(V_DMA_INT_TIMEOUT(d->sbdma_int_timeout) |
+		       0, d->sbdma_config1);
+	__raw_writeq(M_DMA_EOP_INT_EN |
+		       V_DMA_RINGSZ(d->sbdma_maxdescr) |
+		       V_DMA_INT_PKTCNT(d->sbdma_int_pktcnt) |
+		       0, d->sbdma_config0);
+#else
+	__raw_writeq(0, d->sbdma_config1);
+	__raw_writeq(V_DMA_RINGSZ(d->sbdma_maxdescr) |
+		       0, d->sbdma_config0);
+#endif
+
+	__raw_writeq(d->sbdma_dscrtable_phys, d->sbdma_dscrbase);
+
+	/*
+	 * Initialize ring pointers
+	 */
+
+	d->sbdma_addptr = d->sbdma_dscrtable;
+	d->sbdma_remptr = d->sbdma_dscrtable;
+}
+
+/**********************************************************************
+ *  SBDMA_CHANNEL_STOP(d)
+ *
+ *  Initialize the hardware registers for a DMA channel.
+ *
+ *  Input parameters:
+ *  	   d - DMA channel to init (context must be previously init'd
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbdma_channel_stop(struct sbmacdma *d)
+{
+	/*
+	 * Turn off the DMA channel
+	 */
+
+	__raw_writeq(0, d->sbdma_config1);
+
+	__raw_writeq(0, d->sbdma_dscrbase);
+
+	__raw_writeq(0, d->sbdma_config0);
+
+	/*
+	 * Zero ring pointers
+	 */
+
+	d->sbdma_addptr = NULL;
+	d->sbdma_remptr = NULL;
+}
+
+static inline void sbdma_align_skb(struct sk_buff *skb,
+				   unsigned int power2, unsigned int offset)
+{
+	unsigned char *addr = skb->data;
+	unsigned char *newaddr = PTR_ALIGN(addr, power2);
+
+	skb_reserve(skb, newaddr - addr + offset);
+}
+
+
+/**********************************************************************
+ *  SBDMA_ADD_RCVBUFFER(d,sb)
+ *
+ *  Add a buffer to the specified DMA channel.   For receive channels,
+ *  this queues a buffer for inbound packets.
+ *
+ *  Input parameters:
+ *	   sc - softc structure
+ *  	    d - DMA channel descriptor
+ * 	   sb - sk_buff to add, or NULL if we should allocate one
+ *
+ *  Return value:
+ *  	   0 if buffer could not be added (ring is full)
+ *  	   1 if buffer added successfully
+ ********************************************************************* */
+
+
+static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d,
+			       struct sk_buff *sb)
+{
+	struct net_device *dev = sc->sbm_dev;
+	struct sbdmadscr *dsc;
+	struct sbdmadscr *nextdsc;
+	struct sk_buff *sb_new = NULL;
+	int pktsize = ENET_PACKET_SIZE;
+
+	/* get pointer to our current place in the ring */
+
+	dsc = d->sbdma_addptr;
+	nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
+
+	/*
+	 * figure out if the ring is full - if the next descriptor
+	 * is the same as the one that we're going to remove from
+	 * the ring, the ring is full
+	 */
+
+	if (nextdsc == d->sbdma_remptr) {
+		return -ENOSPC;
+	}
+
+	/*
+	 * Allocate a sk_buff if we don't already have one.
+	 * If we do have an sk_buff, reset it so that it's empty.
+	 *
+	 * Note: sk_buffs don't seem to be guaranteed to have any sort
+	 * of alignment when they are allocated.  Therefore, allocate enough
+	 * extra space to make sure that:
+	 *
+	 *    1. the data does not start in the middle of a cache line.
+	 *    2. The data does not end in the middle of a cache line
+	 *    3. The buffer can be aligned such that the IP addresses are
+	 *       naturally aligned.
+	 *
+	 *  Remember, the SOCs MAC writes whole cache lines at a time,
+	 *  without reading the old contents first.  So, if the sk_buff's
+	 *  data portion starts in the middle of a cache line, the SOC
+	 *  DMA will trash the beginning (and ending) portions.
+	 */
+
+	if (sb == NULL) {
+		sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE +
+					       SMP_CACHE_BYTES * 2 +
+					       NET_IP_ALIGN);
+		if (sb_new == NULL) {
+			pr_info("%s: sk_buff allocation failed\n",
+			       d->sbdma_eth->sbm_dev->name);
+			return -ENOBUFS;
+		}
+
+		sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN);
+	}
+	else {
+		sb_new = sb;
+		/*
+		 * nothing special to reinit buffer, it's already aligned
+		 * and sb->data already points to a good place.
+		 */
+	}
+
+	/*
+	 * fill in the descriptor
+	 */
+
+#ifdef CONFIG_SBMAC_COALESCE
+	/*
+	 * Do not interrupt per DMA transfer.
+	 */
+	dsc->dscr_a = virt_to_phys(sb_new->data) |
+		V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0;
+#else
+	dsc->dscr_a = virt_to_phys(sb_new->data) |
+		V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) |
+		M_DMA_DSCRA_INTERRUPT;
+#endif
+
+	/* receiving: no options */
+	dsc->dscr_b = 0;
+
+	/*
+	 * fill in the context
+	 */
+
+	d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb_new;
+
+	/*
+	 * point at next packet
+	 */
+
+	d->sbdma_addptr = nextdsc;
+
+	/*
+	 * Give the buffer to the DMA engine.
+	 */
+
+	__raw_writeq(1, d->sbdma_dscrcnt);
+
+	return 0;					/* we did it */
+}
+
+/**********************************************************************
+ *  SBDMA_ADD_TXBUFFER(d,sb)
+ *
+ *  Add a transmit buffer to the specified DMA channel, causing a
+ *  transmit to start.
+ *
+ *  Input parameters:
+ *  	   d - DMA channel descriptor
+ * 	   sb - sk_buff to add
+ *
+ *  Return value:
+ *  	   0 transmit queued successfully
+ *  	   otherwise error code
+ ********************************************************************* */
+
+
+static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *sb)
+{
+	struct sbdmadscr *dsc;
+	struct sbdmadscr *nextdsc;
+	uint64_t phys;
+	uint64_t ncb;
+	int length;
+
+	/* get pointer to our current place in the ring */
+
+	dsc = d->sbdma_addptr;
+	nextdsc = SBDMA_NEXTBUF(d,sbdma_addptr);
+
+	/*
+	 * figure out if the ring is full - if the next descriptor
+	 * is the same as the one that we're going to remove from
+	 * the ring, the ring is full
+	 */
+
+	if (nextdsc == d->sbdma_remptr) {
+		return -ENOSPC;
+	}
+
+	/*
+	 * Under Linux, it's not necessary to copy/coalesce buffers
+	 * like it is on NetBSD.  We think they're all contiguous,
+	 * but that may not be true for GBE.
+	 */
+
+	length = sb->len;
+
+	/*
+	 * fill in the descriptor.  Note that the number of cache
+	 * blocks in the descriptor is the number of blocks
+	 * *spanned*, so we need to add in the offset (if any)
+	 * while doing the calculation.
+	 */
+
+	phys = virt_to_phys(sb->data);
+	ncb = NUMCACHEBLKS(length+(phys & (SMP_CACHE_BYTES - 1)));
+
+	dsc->dscr_a = phys |
+		V_DMA_DSCRA_A_SIZE(ncb) |
+#ifndef CONFIG_SBMAC_COALESCE
+		M_DMA_DSCRA_INTERRUPT |
+#endif
+		M_DMA_ETHTX_SOP;
+
+	/* transmitting: set outbound options and length */
+
+	dsc->dscr_b = V_DMA_DSCRB_OPTIONS(K_DMA_ETHTX_APPENDCRC_APPENDPAD) |
+		V_DMA_DSCRB_PKT_SIZE(length);
+
+	/*
+	 * fill in the context
+	 */
+
+	d->sbdma_ctxtable[dsc-d->sbdma_dscrtable] = sb;
+
+	/*
+	 * point at next packet
+	 */
+
+	d->sbdma_addptr = nextdsc;
+
+	/*
+	 * Give the buffer to the DMA engine.
+	 */
+
+	__raw_writeq(1, d->sbdma_dscrcnt);
+
+	return 0;					/* we did it */
+}
+
+
+
+
+/**********************************************************************
+ *  SBDMA_EMPTYRING(d)
+ *
+ *  Free all allocated sk_buffs on the specified DMA channel;
+ *
+ *  Input parameters:
+ *  	   d  - DMA channel
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbdma_emptyring(struct sbmacdma *d)
+{
+	int idx;
+	struct sk_buff *sb;
+
+	for (idx = 0; idx < d->sbdma_maxdescr; idx++) {
+		sb = d->sbdma_ctxtable[idx];
+		if (sb) {
+			dev_kfree_skb(sb);
+			d->sbdma_ctxtable[idx] = NULL;
+		}
+	}
+}
+
+
+/**********************************************************************
+ *  SBDMA_FILLRING(d)
+ *
+ *  Fill the specified DMA channel (must be receive channel)
+ *  with sk_buffs
+ *
+ *  Input parameters:
+ *	   sc - softc structure
+ *  	    d - DMA channel
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d)
+{
+	int idx;
+
+	for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) {
+		if (sbdma_add_rcvbuffer(sc, d, NULL) != 0)
+			break;
+	}
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sbmac_netpoll(struct net_device *netdev)
+{
+	struct sbmac_softc *sc = netdev_priv(netdev);
+	int irq = sc->sbm_dev->irq;
+
+	__raw_writeq(0, sc->sbm_imr);
+
+	sbmac_intr(irq, netdev);
+
+#ifdef CONFIG_SBMAC_COALESCE
+	__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
+	((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
+	sc->sbm_imr);
+#else
+	__raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
+	(M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
+#endif
+}
+#endif
+
+/**********************************************************************
+ *  SBDMA_RX_PROCESS(sc,d,work_to_do,poll)
+ *
+ *  Process "completed" receive buffers on the specified DMA channel.
+ *
+ *  Input parameters:
+ *            sc - softc structure
+ *  	       d - DMA channel context
+ *    work_to_do - no. of packets to process before enabling interrupt
+ *                 again (for NAPI)
+ *          poll - 1: using polling (for NAPI)
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d,
+			    int work_to_do, int poll)
+{
+	struct net_device *dev = sc->sbm_dev;
+	int curidx;
+	int hwidx;
+	struct sbdmadscr *dsc;
+	struct sk_buff *sb;
+	int len;
+	int work_done = 0;
+	int dropped = 0;
+
+	prefetch(d);
+
+again:
+	/* Check if the HW dropped any frames */
+	dev->stats.rx_fifo_errors
+	    += __raw_readq(sc->sbm_rxdma.sbdma_oodpktlost) & 0xffff;
+	__raw_writeq(0, sc->sbm_rxdma.sbdma_oodpktlost);
+
+	while (work_to_do-- > 0) {
+		/*
+		 * figure out where we are (as an index) and where
+		 * the hardware is (also as an index)
+		 *
+		 * This could be done faster if (for example) the
+		 * descriptor table was page-aligned and contiguous in
+		 * both virtual and physical memory -- you could then
+		 * just compare the low-order bits of the virtual address
+		 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
+		 */
+
+		dsc = d->sbdma_remptr;
+		curidx = dsc - d->sbdma_dscrtable;
+
+		prefetch(dsc);
+		prefetch(&d->sbdma_ctxtable[curidx]);
+
+		hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
+			 d->sbdma_dscrtable_phys) /
+			sizeof(*d->sbdma_dscrtable);
+
+		/*
+		 * If they're the same, that means we've processed all
+		 * of the descriptors up to (but not including) the one that
+		 * the hardware is working on right now.
+		 */
+
+		if (curidx == hwidx)
+			goto done;
+
+		/*
+		 * Otherwise, get the packet's sk_buff ptr back
+		 */
+
+		sb = d->sbdma_ctxtable[curidx];
+		d->sbdma_ctxtable[curidx] = NULL;
+
+		len = (int)G_DMA_DSCRB_PKT_SIZE(dsc->dscr_b) - 4;
+
+		/*
+		 * Check packet status.  If good, process it.
+		 * If not, silently drop it and put it back on the
+		 * receive ring.
+		 */
+
+		if (likely (!(dsc->dscr_a & M_DMA_ETHRX_BAD))) {
+
+			/*
+			 * Add a new buffer to replace the old one.  If we fail
+			 * to allocate a buffer, we're going to drop this
+			 * packet and put it right back on the receive ring.
+			 */
+
+			if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) ==
+				     -ENOBUFS)) {
+				dev->stats.rx_dropped++;
+				/* Re-add old buffer */
+				sbdma_add_rcvbuffer(sc, d, sb);
+				/* No point in continuing at the moment */
+				printk(KERN_ERR "dropped packet (1)\n");
+				d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
+				goto done;
+			} else {
+				/*
+				 * Set length into the packet
+				 */
+				skb_put(sb,len);
+
+				/*
+				 * Buffer has been replaced on the
+				 * receive ring.  Pass the buffer to
+				 * the kernel
+				 */
+				sb->protocol = eth_type_trans(sb,d->sbdma_eth->sbm_dev);
+				/* Check hw IPv4/TCP checksum if supported */
+				if (sc->rx_hw_checksum == ENABLE) {
+					if (!((dsc->dscr_a) & M_DMA_ETHRX_BADIP4CS) &&
+					    !((dsc->dscr_a) & M_DMA_ETHRX_BADTCPCS)) {
+						sb->ip_summed = CHECKSUM_UNNECESSARY;
+						/* don't need to set sb->csum */
+					} else {
+						skb_checksum_none_assert(sb);
+					}
+				}
+				prefetch(sb->data);
+				prefetch((const void *)(((char *)sb->data)+32));
+				if (poll)
+					dropped = netif_receive_skb(sb);
+				else
+					dropped = netif_rx(sb);
+
+				if (dropped == NET_RX_DROP) {
+					dev->stats.rx_dropped++;
+					d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
+					goto done;
+				}
+				else {
+					dev->stats.rx_bytes += len;
+					dev->stats.rx_packets++;
+				}
+			}
+		} else {
+			/*
+			 * Packet was mangled somehow.  Just drop it and
+			 * put it back on the receive ring.
+			 */
+			dev->stats.rx_errors++;
+			sbdma_add_rcvbuffer(sc, d, sb);
+		}
+
+
+		/*
+		 * .. and advance to the next buffer.
+		 */
+
+		d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
+		work_done++;
+	}
+	if (!poll) {
+		work_to_do = 32;
+		goto again; /* collect fifo drop statistics again */
+	}
+done:
+	return work_done;
+}
+
+/**********************************************************************
+ *  SBDMA_TX_PROCESS(sc,d)
+ *
+ *  Process "completed" transmit buffers on the specified DMA channel.
+ *  This is normally called within the interrupt service routine.
+ *  Note that this isn't really ideal for priority channels, since
+ *  it processes all of the packets on a given channel before
+ *  returning.
+ *
+ *  Input parameters:
+ *      sc - softc structure
+ *  	 d - DMA channel context
+ *    poll - 1: using polling (for NAPI)
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d,
+			     int poll)
+{
+	struct net_device *dev = sc->sbm_dev;
+	int curidx;
+	int hwidx;
+	struct sbdmadscr *dsc;
+	struct sk_buff *sb;
+	unsigned long flags;
+	int packets_handled = 0;
+
+	spin_lock_irqsave(&(sc->sbm_lock), flags);
+
+	if (d->sbdma_remptr == d->sbdma_addptr)
+	  goto end_unlock;
+
+	hwidx = ((__raw_readq(d->sbdma_curdscr) & M_DMA_CURDSCR_ADDR) -
+		 d->sbdma_dscrtable_phys) / sizeof(*d->sbdma_dscrtable);
+
+	for (;;) {
+		/*
+		 * figure out where we are (as an index) and where
+		 * the hardware is (also as an index)
+		 *
+		 * This could be done faster if (for example) the
+		 * descriptor table was page-aligned and contiguous in
+		 * both virtual and physical memory -- you could then
+		 * just compare the low-order bits of the virtual address
+		 * (sbdma_remptr) and the physical address (sbdma_curdscr CSR)
+		 */
+
+		curidx = d->sbdma_remptr - d->sbdma_dscrtable;
+
+		/*
+		 * If they're the same, that means we've processed all
+		 * of the descriptors up to (but not including) the one that
+		 * the hardware is working on right now.
+		 */
+
+		if (curidx == hwidx)
+			break;
+
+		/*
+		 * Otherwise, get the packet's sk_buff ptr back
+		 */
+
+		dsc = &(d->sbdma_dscrtable[curidx]);
+		sb = d->sbdma_ctxtable[curidx];
+		d->sbdma_ctxtable[curidx] = NULL;
+
+		/*
+		 * Stats
+		 */
+
+		dev->stats.tx_bytes += sb->len;
+		dev->stats.tx_packets++;
+
+		/*
+		 * for transmits, we just free buffers.
+		 */
+
+		dev_kfree_skb_irq(sb);
+
+		/*
+		 * .. and advance to the next buffer.
+		 */
+
+		d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr);
+
+		packets_handled++;
+
+	}
+
+	/*
+	 * Decide if we should wake up the protocol or not.
+	 * Other drivers seem to do this when we reach a low
+	 * watermark on the transmit queue.
+	 */
+
+	if (packets_handled)
+		netif_wake_queue(d->sbdma_eth->sbm_dev);
+
+end_unlock:
+	spin_unlock_irqrestore(&(sc->sbm_lock), flags);
+
+}
+
+
+
+/**********************************************************************
+ *  SBMAC_INITCTX(s)
+ *
+ *  Initialize an Ethernet context structure - this is called
+ *  once per MAC on the 1250.  Memory is allocated here, so don't
+ *  call it again from inside the ioctl routines that bring the
+ *  interface up/down
+ *
+ *  Input parameters:
+ *  	   s - sbmac context structure
+ *
+ *  Return value:
+ *  	   0
+ ********************************************************************* */
+
+static int sbmac_initctx(struct sbmac_softc *s)
+{
+
+	/*
+	 * figure out the addresses of some ports
+	 */
+
+	s->sbm_macenable = s->sbm_base + R_MAC_ENABLE;
+	s->sbm_maccfg    = s->sbm_base + R_MAC_CFG;
+	s->sbm_fifocfg   = s->sbm_base + R_MAC_THRSH_CFG;
+	s->sbm_framecfg  = s->sbm_base + R_MAC_FRAMECFG;
+	s->sbm_rxfilter  = s->sbm_base + R_MAC_ADFILTER_CFG;
+	s->sbm_isr       = s->sbm_base + R_MAC_STATUS;
+	s->sbm_imr       = s->sbm_base + R_MAC_INT_MASK;
+	s->sbm_mdio      = s->sbm_base + R_MAC_MDIO;
+
+	/*
+	 * Initialize the DMA channels.  Right now, only one per MAC is used
+	 * Note: Only do this _once_, as it allocates memory from the kernel!
+	 */
+
+	sbdma_initctx(&(s->sbm_txdma),s,0,DMA_TX,SBMAC_MAX_TXDESCR);
+	sbdma_initctx(&(s->sbm_rxdma),s,0,DMA_RX,SBMAC_MAX_RXDESCR);
+
+	/*
+	 * initial state is OFF
+	 */
+
+	s->sbm_state = sbmac_state_off;
+
+	return 0;
+}
+
+
+static void sbdma_uninitctx(struct sbmacdma *d)
+{
+	if (d->sbdma_dscrtable_unaligned) {
+		kfree(d->sbdma_dscrtable_unaligned);
+		d->sbdma_dscrtable_unaligned = d->sbdma_dscrtable = NULL;
+	}
+
+	if (d->sbdma_ctxtable) {
+		kfree(d->sbdma_ctxtable);
+		d->sbdma_ctxtable = NULL;
+	}
+}
+
+
+static void sbmac_uninitctx(struct sbmac_softc *sc)
+{
+	sbdma_uninitctx(&(sc->sbm_txdma));
+	sbdma_uninitctx(&(sc->sbm_rxdma));
+}
+
+
+/**********************************************************************
+ *  SBMAC_CHANNEL_START(s)
+ *
+ *  Start packet processing on this MAC.
+ *
+ *  Input parameters:
+ *  	   s - sbmac structure
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbmac_channel_start(struct sbmac_softc *s)
+{
+	uint64_t reg;
+	void __iomem *port;
+	uint64_t cfg,fifo,framecfg;
+	int idx, th_value;
+
+	/*
+	 * Don't do this if running
+	 */
+
+	if (s->sbm_state == sbmac_state_on)
+		return;
+
+	/*
+	 * Bring the controller out of reset, but leave it off.
+	 */
+
+	__raw_writeq(0, s->sbm_macenable);
+
+	/*
+	 * Ignore all received packets
+	 */
+
+	__raw_writeq(0, s->sbm_rxfilter);
+
+	/*
+	 * Calculate values for various control registers.
+	 */
+
+	cfg = M_MAC_RETRY_EN |
+		M_MAC_TX_HOLD_SOP_EN |
+		V_MAC_TX_PAUSE_CNT_16K |
+		M_MAC_AP_STAT_EN |
+		M_MAC_FAST_SYNC |
+		M_MAC_SS_EN |
+		0;
+
+	/*
+	 * Be sure that RD_THRSH+WR_THRSH <= 32 for pass1 pars
+	 * and make sure that RD_THRSH + WR_THRSH <=128 for pass2 and above
+	 * Use a larger RD_THRSH for gigabit
+	 */
+	if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2)
+		th_value = 28;
+	else
+		th_value = 64;
+
+	fifo = V_MAC_TX_WR_THRSH(4) |	/* Must be '4' or '8' */
+		((s->sbm_speed == sbmac_speed_1000)
+		 ? V_MAC_TX_RD_THRSH(th_value) : V_MAC_TX_RD_THRSH(4)) |
+		V_MAC_TX_RL_THRSH(4) |
+		V_MAC_RX_PL_THRSH(4) |
+		V_MAC_RX_RD_THRSH(4) |	/* Must be '4' */
+		V_MAC_RX_RL_THRSH(8) |
+		0;
+
+	framecfg = V_MAC_MIN_FRAMESZ_DEFAULT |
+		V_MAC_MAX_FRAMESZ_DEFAULT |
+		V_MAC_BACKOFF_SEL(1);
+
+	/*
+	 * Clear out the hash address map
+	 */
+
+	port = s->sbm_base + R_MAC_HASH_BASE;
+	for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
+		__raw_writeq(0, port);
+		port += sizeof(uint64_t);
+	}
+
+	/*
+	 * Clear out the exact-match table
+	 */
+
+	port = s->sbm_base + R_MAC_ADDR_BASE;
+	for (idx = 0; idx < MAC_ADDR_COUNT; idx++) {
+		__raw_writeq(0, port);
+		port += sizeof(uint64_t);
+	}
+
+	/*
+	 * Clear out the DMA Channel mapping table registers
+	 */
+
+	port = s->sbm_base + R_MAC_CHUP0_BASE;
+	for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
+		__raw_writeq(0, port);
+		port += sizeof(uint64_t);
+	}
+
+
+	port = s->sbm_base + R_MAC_CHLO0_BASE;
+	for (idx = 0; idx < MAC_CHMAP_COUNT; idx++) {
+		__raw_writeq(0, port);
+		port += sizeof(uint64_t);
+	}
+
+	/*
+	 * Program the hardware address.  It goes into the hardware-address
+	 * register as well as the first filter register.
+	 */
+
+	reg = sbmac_addr2reg(s->sbm_hwaddr);
+
+	port = s->sbm_base + R_MAC_ADDR_BASE;
+	__raw_writeq(reg, port);
+	port = s->sbm_base + R_MAC_ETHERNET_ADDR;
+
+#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
+	/*
+	 * Pass1 SOCs do not receive packets addressed to the
+	 * destination address in the R_MAC_ETHERNET_ADDR register.
+	 * Set the value to zero.
+	 */
+	__raw_writeq(0, port);
+#else
+	__raw_writeq(reg, port);
+#endif
+
+	/*
+	 * Set the receive filter for no packets, and write values
+	 * to the various config registers
+	 */
+
+	__raw_writeq(0, s->sbm_rxfilter);
+	__raw_writeq(0, s->sbm_imr);
+	__raw_writeq(framecfg, s->sbm_framecfg);
+	__raw_writeq(fifo, s->sbm_fifocfg);
+	__raw_writeq(cfg, s->sbm_maccfg);
+
+	/*
+	 * Initialize DMA channels (rings should be ok now)
+	 */
+
+	sbdma_channel_start(&(s->sbm_rxdma), DMA_RX);
+	sbdma_channel_start(&(s->sbm_txdma), DMA_TX);
+
+	/*
+	 * Configure the speed, duplex, and flow control
+	 */
+
+	sbmac_set_speed(s,s->sbm_speed);
+	sbmac_set_duplex(s,s->sbm_duplex,s->sbm_fc);
+
+	/*
+	 * Fill the receive ring
+	 */
+
+	sbdma_fillring(s, &(s->sbm_rxdma));
+
+	/*
+	 * Turn on the rest of the bits in the enable register
+	 */
+
+#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
+	__raw_writeq(M_MAC_RXDMA_EN0 |
+		       M_MAC_TXDMA_EN0, s->sbm_macenable);
+#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
+	__raw_writeq(M_MAC_RXDMA_EN0 |
+		       M_MAC_TXDMA_EN0 |
+		       M_MAC_RX_ENABLE |
+		       M_MAC_TX_ENABLE, s->sbm_macenable);
+#else
+#error invalid SiByte MAC configuration
+#endif
+
+#ifdef CONFIG_SBMAC_COALESCE
+	__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
+		       ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0), s->sbm_imr);
+#else
+	__raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
+		       (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), s->sbm_imr);
+#endif
+
+	/*
+	 * Enable receiving unicasts and broadcasts
+	 */
+
+	__raw_writeq(M_MAC_UCAST_EN | M_MAC_BCAST_EN, s->sbm_rxfilter);
+
+	/*
+	 * we're running now.
+	 */
+
+	s->sbm_state = sbmac_state_on;
+
+	/*
+	 * Program multicast addresses
+	 */
+
+	sbmac_setmulti(s);
+
+	/*
+	 * If channel was in promiscuous mode before, turn that on
+	 */
+
+	if (s->sbm_devflags & IFF_PROMISC) {
+		sbmac_promiscuous_mode(s,1);
+	}
+
+}
+
+
+/**********************************************************************
+ *  SBMAC_CHANNEL_STOP(s)
+ *
+ *  Stop packet processing on this MAC.
+ *
+ *  Input parameters:
+ *  	   s - sbmac structure
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbmac_channel_stop(struct sbmac_softc *s)
+{
+	/* don't do this if already stopped */
+
+	if (s->sbm_state == sbmac_state_off)
+		return;
+
+	/* don't accept any packets, disable all interrupts */
+
+	__raw_writeq(0, s->sbm_rxfilter);
+	__raw_writeq(0, s->sbm_imr);
+
+	/* Turn off ticker */
+
+	/* XXX */
+
+	/* turn off receiver and transmitter */
+
+	__raw_writeq(0, s->sbm_macenable);
+
+	/* We're stopped now. */
+
+	s->sbm_state = sbmac_state_off;
+
+	/*
+	 * Stop DMA channels (rings should be ok now)
+	 */
+
+	sbdma_channel_stop(&(s->sbm_rxdma));
+	sbdma_channel_stop(&(s->sbm_txdma));
+
+	/* Empty the receive and transmit rings */
+
+	sbdma_emptyring(&(s->sbm_rxdma));
+	sbdma_emptyring(&(s->sbm_txdma));
+
+}
+
+/**********************************************************************
+ *  SBMAC_SET_CHANNEL_STATE(state)
+ *
+ *  Set the channel's state ON or OFF
+ *
+ *  Input parameters:
+ *  	   state - new state
+ *
+ *  Return value:
+ *  	   old state
+ ********************************************************************* */
+static enum sbmac_state sbmac_set_channel_state(struct sbmac_softc *sc,
+						enum sbmac_state state)
+{
+	enum sbmac_state oldstate = sc->sbm_state;
+
+	/*
+	 * If same as previous state, return
+	 */
+
+	if (state == oldstate) {
+		return oldstate;
+	}
+
+	/*
+	 * If new state is ON, turn channel on
+	 */
+
+	if (state == sbmac_state_on) {
+		sbmac_channel_start(sc);
+	}
+	else {
+		sbmac_channel_stop(sc);
+	}
+
+	/*
+	 * Return previous state
+	 */
+
+	return oldstate;
+}
+
+
+/**********************************************************************
+ *  SBMAC_PROMISCUOUS_MODE(sc,onoff)
+ *
+ *  Turn on or off promiscuous mode
+ *
+ *  Input parameters:
+ *  	   sc - softc
+ *      onoff - 1 to turn on, 0 to turn off
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbmac_promiscuous_mode(struct sbmac_softc *sc,int onoff)
+{
+	uint64_t reg;
+
+	if (sc->sbm_state != sbmac_state_on)
+		return;
+
+	if (onoff) {
+		reg = __raw_readq(sc->sbm_rxfilter);
+		reg |= M_MAC_ALLPKT_EN;
+		__raw_writeq(reg, sc->sbm_rxfilter);
+	}
+	else {
+		reg = __raw_readq(sc->sbm_rxfilter);
+		reg &= ~M_MAC_ALLPKT_EN;
+		__raw_writeq(reg, sc->sbm_rxfilter);
+	}
+}
+
+/**********************************************************************
+ *  SBMAC_SETIPHDR_OFFSET(sc,onoff)
+ *
+ *  Set the iphdr offset as 15 assuming ethernet encapsulation
+ *
+ *  Input parameters:
+ *  	   sc - softc
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbmac_set_iphdr_offset(struct sbmac_softc *sc)
+{
+	uint64_t reg;
+
+	/* Hard code the off set to 15 for now */
+	reg = __raw_readq(sc->sbm_rxfilter);
+	reg &= ~M_MAC_IPHDR_OFFSET | V_MAC_IPHDR_OFFSET(15);
+	__raw_writeq(reg, sc->sbm_rxfilter);
+
+	/* BCM1250 pass1 didn't have hardware checksum.  Everything
+	   later does.  */
+	if (soc_type == K_SYS_SOC_TYPE_BCM1250 && periph_rev < 2) {
+		sc->rx_hw_checksum = DISABLE;
+	} else {
+		sc->rx_hw_checksum = ENABLE;
+	}
+}
+
+
+/**********************************************************************
+ *  SBMAC_ADDR2REG(ptr)
+ *
+ *  Convert six bytes into the 64-bit register value that
+ *  we typically write into the SBMAC's address/mcast registers
+ *
+ *  Input parameters:
+ *  	   ptr - pointer to 6 bytes
+ *
+ *  Return value:
+ *  	   register value
+ ********************************************************************* */
+
+static uint64_t sbmac_addr2reg(unsigned char *ptr)
+{
+	uint64_t reg = 0;
+
+	ptr += 6;
+
+	reg |= (uint64_t) *(--ptr);
+	reg <<= 8;
+	reg |= (uint64_t) *(--ptr);
+	reg <<= 8;
+	reg |= (uint64_t) *(--ptr);
+	reg <<= 8;
+	reg |= (uint64_t) *(--ptr);
+	reg <<= 8;
+	reg |= (uint64_t) *(--ptr);
+	reg <<= 8;
+	reg |= (uint64_t) *(--ptr);
+
+	return reg;
+}
+
+
+/**********************************************************************
+ *  SBMAC_SET_SPEED(s,speed)
+ *
+ *  Configure LAN speed for the specified MAC.
+ *  Warning: must be called when MAC is off!
+ *
+ *  Input parameters:
+ *  	   s - sbmac structure
+ *  	   speed - speed to set MAC to (see enum sbmac_speed)
+ *
+ *  Return value:
+ *  	   1 if successful
+ *      0 indicates invalid parameters
+ ********************************************************************* */
+
+static int sbmac_set_speed(struct sbmac_softc *s, enum sbmac_speed speed)
+{
+	uint64_t cfg;
+	uint64_t framecfg;
+
+	/*
+	 * Save new current values
+	 */
+
+	s->sbm_speed = speed;
+
+	if (s->sbm_state == sbmac_state_on)
+		return 0;	/* save for next restart */
+
+	/*
+	 * Read current register values
+	 */
+
+	cfg = __raw_readq(s->sbm_maccfg);
+	framecfg = __raw_readq(s->sbm_framecfg);
+
+	/*
+	 * Mask out the stuff we want to change
+	 */
+
+	cfg &= ~(M_MAC_BURST_EN | M_MAC_SPEED_SEL);
+	framecfg &= ~(M_MAC_IFG_RX | M_MAC_IFG_TX | M_MAC_IFG_THRSH |
+		      M_MAC_SLOT_SIZE);
+
+	/*
+	 * Now add in the new bits
+	 */
+
+	switch (speed) {
+	case sbmac_speed_10:
+		framecfg |= V_MAC_IFG_RX_10 |
+			V_MAC_IFG_TX_10 |
+			K_MAC_IFG_THRSH_10 |
+			V_MAC_SLOT_SIZE_10;
+		cfg |= V_MAC_SPEED_SEL_10MBPS;
+		break;
+
+	case sbmac_speed_100:
+		framecfg |= V_MAC_IFG_RX_100 |
+			V_MAC_IFG_TX_100 |
+			V_MAC_IFG_THRSH_100 |
+			V_MAC_SLOT_SIZE_100;
+		cfg |= V_MAC_SPEED_SEL_100MBPS ;
+		break;
+
+	case sbmac_speed_1000:
+		framecfg |= V_MAC_IFG_RX_1000 |
+			V_MAC_IFG_TX_1000 |
+			V_MAC_IFG_THRSH_1000 |
+			V_MAC_SLOT_SIZE_1000;
+		cfg |= V_MAC_SPEED_SEL_1000MBPS | M_MAC_BURST_EN;
+		break;
+
+	default:
+		return 0;
+	}
+
+	/*
+	 * Send the bits back to the hardware
+	 */
+
+	__raw_writeq(framecfg, s->sbm_framecfg);
+	__raw_writeq(cfg, s->sbm_maccfg);
+
+	return 1;
+}
+
+/**********************************************************************
+ *  SBMAC_SET_DUPLEX(s,duplex,fc)
+ *
+ *  Set Ethernet duplex and flow control options for this MAC
+ *  Warning: must be called when MAC is off!
+ *
+ *  Input parameters:
+ *  	   s - sbmac structure
+ *  	   duplex - duplex setting (see enum sbmac_duplex)
+ *  	   fc - flow control setting (see enum sbmac_fc)
+ *
+ *  Return value:
+ *  	   1 if ok
+ *  	   0 if an invalid parameter combination was specified
+ ********************************************************************* */
+
+static int sbmac_set_duplex(struct sbmac_softc *s, enum sbmac_duplex duplex,
+			    enum sbmac_fc fc)
+{
+	uint64_t cfg;
+
+	/*
+	 * Save new current values
+	 */
+
+	s->sbm_duplex = duplex;
+	s->sbm_fc = fc;
+
+	if (s->sbm_state == sbmac_state_on)
+		return 0;	/* save for next restart */
+
+	/*
+	 * Read current register values
+	 */
+
+	cfg = __raw_readq(s->sbm_maccfg);
+
+	/*
+	 * Mask off the stuff we're about to change
+	 */
+
+	cfg &= ~(M_MAC_FC_SEL | M_MAC_FC_CMD | M_MAC_HDX_EN);
+
+
+	switch (duplex) {
+	case sbmac_duplex_half:
+		switch (fc) {
+		case sbmac_fc_disabled:
+			cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_DISABLED;
+			break;
+
+		case sbmac_fc_collision:
+			cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENABLED;
+			break;
+
+		case sbmac_fc_carrier:
+			cfg |= M_MAC_HDX_EN | V_MAC_FC_CMD_ENAB_FALSECARR;
+			break;
+
+		case sbmac_fc_frame:		/* not valid in half duplex */
+		default:			/* invalid selection */
+			return 0;
+		}
+		break;
+
+	case sbmac_duplex_full:
+		switch (fc) {
+		case sbmac_fc_disabled:
+			cfg |= V_MAC_FC_CMD_DISABLED;
+			break;
+
+		case sbmac_fc_frame:
+			cfg |= V_MAC_FC_CMD_ENABLED;
+			break;
+
+		case sbmac_fc_collision:	/* not valid in full duplex */
+		case sbmac_fc_carrier:		/* not valid in full duplex */
+		default:
+			return 0;
+		}
+		break;
+	default:
+		return 0;
+	}
+
+	/*
+	 * Send the bits back to the hardware
+	 */
+
+	__raw_writeq(cfg, s->sbm_maccfg);
+
+	return 1;
+}
+
+
+
+
+/**********************************************************************
+ *  SBMAC_INTR()
+ *
+ *  Interrupt handler for MAC interrupts
+ *
+ *  Input parameters:
+ *  	   MAC structure
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+static irqreturn_t sbmac_intr(int irq,void *dev_instance)
+{
+	struct net_device *dev = (struct net_device *) dev_instance;
+	struct sbmac_softc *sc = netdev_priv(dev);
+	uint64_t isr;
+	int handled = 0;
+
+	/*
+	 * Read the ISR (this clears the bits in the real
+	 * register, except for counter addr)
+	 */
+
+	isr = __raw_readq(sc->sbm_isr) & ~M_MAC_COUNTER_ADDR;
+
+	if (isr == 0)
+		return IRQ_RETVAL(0);
+	handled = 1;
+
+	/*
+	 * Transmits on channel 0
+	 */
+
+	if (isr & (M_MAC_INT_CHANNEL << S_MAC_TX_CH0))
+		sbdma_tx_process(sc,&(sc->sbm_txdma), 0);
+
+	if (isr & (M_MAC_INT_CHANNEL << S_MAC_RX_CH0)) {
+		if (napi_schedule_prep(&sc->napi)) {
+			__raw_writeq(0, sc->sbm_imr);
+			__napi_schedule(&sc->napi);
+			/* Depend on the exit from poll to reenable intr */
+		}
+		else {
+			/* may leave some packets behind */
+			sbdma_rx_process(sc,&(sc->sbm_rxdma),
+					 SBMAC_MAX_RXDESCR * 2, 0);
+		}
+	}
+	return IRQ_RETVAL(handled);
+}
+
+/**********************************************************************
+ *  SBMAC_START_TX(skb,dev)
+ *
+ *  Start output on the specified interface.  Basically, we
+ *  queue as many buffers as we can until the ring fills up, or
+ *  we run off the end of the queue, whichever comes first.
+ *
+ *  Input parameters:
+ *
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+static int sbmac_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct sbmac_softc *sc = netdev_priv(dev);
+	unsigned long flags;
+
+	/* lock eth irq */
+	spin_lock_irqsave(&sc->sbm_lock, flags);
+
+	/*
+	 * Put the buffer on the transmit ring.  If we
+	 * don't have room, stop the queue.
+	 */
+
+	if (sbdma_add_txbuffer(&(sc->sbm_txdma),skb)) {
+		/* XXX save skb that we could not send */
+		netif_stop_queue(dev);
+		spin_unlock_irqrestore(&sc->sbm_lock, flags);
+
+		return NETDEV_TX_BUSY;
+	}
+
+	spin_unlock_irqrestore(&sc->sbm_lock, flags);
+
+	return NETDEV_TX_OK;
+}
+
+/**********************************************************************
+ *  SBMAC_SETMULTI(sc)
+ *
+ *  Reprogram the multicast table into the hardware, given
+ *  the list of multicasts associated with the interface
+ *  structure.
+ *
+ *  Input parameters:
+ *  	   sc - softc
+ *
+ *  Return value:
+ *  	   nothing
+ ********************************************************************* */
+
+static void sbmac_setmulti(struct sbmac_softc *sc)
+{
+	uint64_t reg;
+	void __iomem *port;
+	int idx;
+	struct netdev_hw_addr *ha;
+	struct net_device *dev = sc->sbm_dev;
+
+	/*
+	 * Clear out entire multicast table.  We do this by nuking
+	 * the entire hash table and all the direct matches except
+	 * the first one, which is used for our station address
+	 */
+
+	for (idx = 1; idx < MAC_ADDR_COUNT; idx++) {
+		port = sc->sbm_base + R_MAC_ADDR_BASE+(idx*sizeof(uint64_t));
+		__raw_writeq(0, port);
+	}
+
+	for (idx = 0; idx < MAC_HASH_COUNT; idx++) {
+		port = sc->sbm_base + R_MAC_HASH_BASE+(idx*sizeof(uint64_t));
+		__raw_writeq(0, port);
+	}
+
+	/*
+	 * Clear the filter to say we don't want any multicasts.
+	 */
+
+	reg = __raw_readq(sc->sbm_rxfilter);
+	reg &= ~(M_MAC_MCAST_INV | M_MAC_MCAST_EN);
+	__raw_writeq(reg, sc->sbm_rxfilter);
+
+	if (dev->flags & IFF_ALLMULTI) {
+		/*
+		 * Enable ALL multicasts.  Do this by inverting the
+		 * multicast enable bit.
+		 */
+		reg = __raw_readq(sc->sbm_rxfilter);
+		reg |= (M_MAC_MCAST_INV | M_MAC_MCAST_EN);
+		__raw_writeq(reg, sc->sbm_rxfilter);
+		return;
+	}
+
+
+	/*
+	 * Progam new multicast entries.  For now, only use the
+	 * perfect filter.  In the future we'll need to use the
+	 * hash filter if the perfect filter overflows
+	 */
+
+	/* XXX only using perfect filter for now, need to use hash
+	 * XXX if the table overflows */
+
+	idx = 1;		/* skip station address */
+	netdev_for_each_mc_addr(ha, dev) {
+		if (idx == MAC_ADDR_COUNT)
+			break;
+		reg = sbmac_addr2reg(ha->addr);
+		port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
+		__raw_writeq(reg, port);
+		idx++;
+	}
+
+	/*
+	 * Enable the "accept multicast bits" if we programmed at least one
+	 * multicast.
+	 */
+
+	if (idx > 1) {
+		reg = __raw_readq(sc->sbm_rxfilter);
+		reg |= M_MAC_MCAST_EN;
+		__raw_writeq(reg, sc->sbm_rxfilter);
+	}
+}
+
+static int sb1250_change_mtu(struct net_device *_dev, int new_mtu)
+{
+	if (new_mtu >  ENET_PACKET_SIZE)
+		return -EINVAL;
+	_dev->mtu = new_mtu;
+	pr_info("changing the mtu to %d\n", new_mtu);
+	return 0;
+}
+
+static const struct net_device_ops sbmac_netdev_ops = {
+	.ndo_open		= sbmac_open,
+	.ndo_stop		= sbmac_close,
+	.ndo_start_xmit		= sbmac_start_tx,
+	.ndo_set_multicast_list	= sbmac_set_rx_mode,
+	.ndo_tx_timeout		= sbmac_tx_timeout,
+	.ndo_do_ioctl		= sbmac_mii_ioctl,
+	.ndo_change_mtu		= sb1250_change_mtu,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_mac_address	= eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= sbmac_netpoll,
+#endif
+};
+
+/**********************************************************************
+ *  SBMAC_INIT(dev)
+ *
+ *  Attach routine - init hardware and hook ourselves into linux
+ *
+ *  Input parameters:
+ *  	   dev - net_device structure
+ *
+ *  Return value:
+ *  	   status
+ ********************************************************************* */
+
+static int sbmac_init(struct platform_device *pldev, long long base)
+{
+	struct net_device *dev = dev_get_drvdata(&pldev->dev);
+	int idx = pldev->id;
+	struct sbmac_softc *sc = netdev_priv(dev);
+	unsigned char *eaddr;
+	uint64_t ea_reg;
+	int i;
+	int err;
+
+	sc->sbm_dev = dev;
+	sc->sbe_idx = idx;
+
+	eaddr = sc->sbm_hwaddr;
+
+	/*
+	 * Read the ethernet address.  The firmware left this programmed
+	 * for us in the ethernet address register for each mac.
+	 */
+
+	ea_reg = __raw_readq(sc->sbm_base + R_MAC_ETHERNET_ADDR);
+	__raw_writeq(0, sc->sbm_base + R_MAC_ETHERNET_ADDR);
+	for (i = 0; i < 6; i++) {
+		eaddr[i] = (uint8_t) (ea_reg & 0xFF);
+		ea_reg >>= 8;
+	}
+
+	for (i = 0; i < 6; i++) {
+		dev->dev_addr[i] = eaddr[i];
+	}
+
+	/*
+	 * Initialize context (get pointers to registers and stuff), then
+	 * allocate the memory for the descriptor tables.
+	 */
+
+	sbmac_initctx(sc);
+
+	/*
+	 * Set up Linux device callins
+	 */
+
+	spin_lock_init(&(sc->sbm_lock));
+
+	dev->netdev_ops = &sbmac_netdev_ops;
+	dev->watchdog_timeo = TX_TIMEOUT;
+
+	netif_napi_add(dev, &sc->napi, sbmac_poll, 16);
+
+	dev->irq		= UNIT_INT(idx);
+
+	/* This is needed for PASS2 for Rx H/W checksum feature */
+	sbmac_set_iphdr_offset(sc);
+
+	sc->mii_bus = mdiobus_alloc();
+	if (sc->mii_bus == NULL) {
+		err = -ENOMEM;
+		goto uninit_ctx;
+	}
+
+	sc->mii_bus->name = sbmac_mdio_string;
+	snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
+	sc->mii_bus->priv = sc;
+	sc->mii_bus->read = sbmac_mii_read;
+	sc->mii_bus->write = sbmac_mii_write;
+	sc->mii_bus->irq = sc->phy_irq;
+	for (i = 0; i < PHY_MAX_ADDR; ++i)
+		sc->mii_bus->irq[i] = SBMAC_PHY_INT;
+
+	sc->mii_bus->parent = &pldev->dev;
+	/*
+	 * Probe PHY address
+	 */
+	err = mdiobus_register(sc->mii_bus);
+	if (err) {
+		printk(KERN_ERR "%s: unable to register MDIO bus\n",
+		       dev->name);
+		goto free_mdio;
+	}
+	dev_set_drvdata(&pldev->dev, sc->mii_bus);
+
+	err = register_netdev(dev);
+	if (err) {
+		printk(KERN_ERR "%s.%d: unable to register netdev\n",
+		       sbmac_string, idx);
+		goto unreg_mdio;
+	}
+
+	pr_info("%s.%d: registered as %s\n", sbmac_string, idx, dev->name);
+
+	if (sc->rx_hw_checksum == ENABLE)
+		pr_info("%s: enabling TCP rcv checksum\n", dev->name);
+
+	/*
+	 * Display Ethernet address (this is called during the config
+	 * process so we need to finish off the config message that
+	 * was being displayed)
+	 */
+	pr_info("%s: SiByte Ethernet at 0x%08Lx, address: %pM\n",
+	       dev->name, base, eaddr);
+
+	return 0;
+unreg_mdio:
+	mdiobus_unregister(sc->mii_bus);
+	dev_set_drvdata(&pldev->dev, NULL);
+free_mdio:
+	mdiobus_free(sc->mii_bus);
+uninit_ctx:
+	sbmac_uninitctx(sc);
+	return err;
+}
+
+
+static int sbmac_open(struct net_device *dev)
+{
+	struct sbmac_softc *sc = netdev_priv(dev);
+	int err;
+
+	if (debug > 1)
+		pr_debug("%s: sbmac_open() irq %d.\n", dev->name, dev->irq);
+
+	/*
+	 * map/route interrupt (clear status first, in case something
+	 * weird is pending; we haven't initialized the mac registers
+	 * yet)
+	 */
+
+	__raw_readq(sc->sbm_isr);
+	err = request_irq(dev->irq, sbmac_intr, IRQF_SHARED, dev->name, dev);
+	if (err) {
+		printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name,
+		       dev->irq);
+		goto out_err;
+	}
+
+	sc->sbm_speed = sbmac_speed_none;
+	sc->sbm_duplex = sbmac_duplex_none;
+	sc->sbm_fc = sbmac_fc_none;
+	sc->sbm_pause = -1;
+	sc->sbm_link = 0;
+
+	/*
+	 * Attach to the PHY
+	 */
+	err = sbmac_mii_probe(dev);
+	if (err)
+		goto out_unregister;
+
+	/*
+	 * Turn on the channel
+	 */
+
+	sbmac_set_channel_state(sc,sbmac_state_on);
+
+	netif_start_queue(dev);
+
+	sbmac_set_rx_mode(dev);
+
+	phy_start(sc->phy_dev);
+
+	napi_enable(&sc->napi);
+
+	return 0;
+
+out_unregister:
+	free_irq(dev->irq, dev);
+out_err:
+	return err;
+}
+
+static int sbmac_mii_probe(struct net_device *dev)
+{
+	struct sbmac_softc *sc = netdev_priv(dev);
+	struct phy_device *phy_dev;
+	int i;
+
+	for (i = 0; i < PHY_MAX_ADDR; i++) {
+		phy_dev = sc->mii_bus->phy_map[i];
+		if (phy_dev)
+			break;
+	}
+	if (!phy_dev) {
+		printk(KERN_ERR "%s: no PHY found\n", dev->name);
+		return -ENXIO;
+	}
+
+	phy_dev = phy_connect(dev, dev_name(&phy_dev->dev), &sbmac_mii_poll, 0,
+			      PHY_INTERFACE_MODE_GMII);
+	if (IS_ERR(phy_dev)) {
+		printk(KERN_ERR "%s: could not attach to PHY\n", dev->name);
+		return PTR_ERR(phy_dev);
+	}
+
+	/* Remove any features not supported by the controller */
+	phy_dev->supported &= SUPPORTED_10baseT_Half |
+			      SUPPORTED_10baseT_Full |
+			      SUPPORTED_100baseT_Half |
+			      SUPPORTED_100baseT_Full |
+			      SUPPORTED_1000baseT_Half |
+			      SUPPORTED_1000baseT_Full |
+			      SUPPORTED_Autoneg |
+			      SUPPORTED_MII |
+			      SUPPORTED_Pause |
+			      SUPPORTED_Asym_Pause;
+	phy_dev->advertising = phy_dev->supported;
+
+	pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+		dev->name, phy_dev->drv->name,
+		dev_name(&phy_dev->dev), phy_dev->irq);
+
+	sc->phy_dev = phy_dev;
+
+	return 0;
+}
+
+
+static void sbmac_mii_poll(struct net_device *dev)
+{
+	struct sbmac_softc *sc = netdev_priv(dev);
+	struct phy_device *phy_dev = sc->phy_dev;
+	unsigned long flags;
+	enum sbmac_fc fc;
+	int link_chg, speed_chg, duplex_chg, pause_chg, fc_chg;
+
+	link_chg = (sc->sbm_link != phy_dev->link);
+	speed_chg = (sc->sbm_speed != phy_dev->speed);
+	duplex_chg = (sc->sbm_duplex != phy_dev->duplex);
+	pause_chg = (sc->sbm_pause != phy_dev->pause);
+
+	if (!link_chg && !speed_chg && !duplex_chg && !pause_chg)
+		return;					/* Hmmm... */
+
+	if (!phy_dev->link) {
+		if (link_chg) {
+			sc->sbm_link = phy_dev->link;
+			sc->sbm_speed = sbmac_speed_none;
+			sc->sbm_duplex = sbmac_duplex_none;
+			sc->sbm_fc = sbmac_fc_disabled;
+			sc->sbm_pause = -1;
+			pr_info("%s: link unavailable\n", dev->name);
+		}
+		return;
+	}
+
+	if (phy_dev->duplex == DUPLEX_FULL) {
+		if (phy_dev->pause)
+			fc = sbmac_fc_frame;
+		else
+			fc = sbmac_fc_disabled;
+	} else
+		fc = sbmac_fc_collision;
+	fc_chg = (sc->sbm_fc != fc);
+
+	pr_info("%s: link available: %dbase-%cD\n", dev->name, phy_dev->speed,
+		phy_dev->duplex == DUPLEX_FULL ? 'F' : 'H');
+
+	spin_lock_irqsave(&sc->sbm_lock, flags);
+
+	sc->sbm_speed = phy_dev->speed;
+	sc->sbm_duplex = phy_dev->duplex;
+	sc->sbm_fc = fc;
+	sc->sbm_pause = phy_dev->pause;
+	sc->sbm_link = phy_dev->link;
+
+	if ((speed_chg || duplex_chg || fc_chg) &&
+	    sc->sbm_state != sbmac_state_off) {
+		/*
+		 * something changed, restart the channel
+		 */
+		if (debug > 1)
+			pr_debug("%s: restarting channel "
+				 "because PHY state changed\n", dev->name);
+		sbmac_channel_stop(sc);
+		sbmac_channel_start(sc);
+	}
+
+	spin_unlock_irqrestore(&sc->sbm_lock, flags);
+}
+
+
+static void sbmac_tx_timeout (struct net_device *dev)
+{
+	struct sbmac_softc *sc = netdev_priv(dev);
+	unsigned long flags;
+
+	spin_lock_irqsave(&sc->sbm_lock, flags);
+
+
+	dev->trans_start = jiffies; /* prevent tx timeout */
+	dev->stats.tx_errors++;
+
+	spin_unlock_irqrestore(&sc->sbm_lock, flags);
+
+	printk (KERN_WARNING "%s: Transmit timed out\n",dev->name);
+}
+
+
+
+
+static void sbmac_set_rx_mode(struct net_device *dev)
+{
+	unsigned long flags;
+	struct sbmac_softc *sc = netdev_priv(dev);
+
+	spin_lock_irqsave(&sc->sbm_lock, flags);
+	if ((dev->flags ^ sc->sbm_devflags) & IFF_PROMISC) {
+		/*
+		 * Promiscuous changed.
+		 */
+
+		if (dev->flags & IFF_PROMISC) {
+			sbmac_promiscuous_mode(sc,1);
+		}
+		else {
+			sbmac_promiscuous_mode(sc,0);
+		}
+	}
+	spin_unlock_irqrestore(&sc->sbm_lock, flags);
+
+	/*
+	 * Program the multicasts.  Do this every time.
+	 */
+
+	sbmac_setmulti(sc);
+
+}
+
+static int sbmac_mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct sbmac_softc *sc = netdev_priv(dev);
+
+	if (!netif_running(dev) || !sc->phy_dev)
+		return -EINVAL;
+
+	return phy_mii_ioctl(sc->phy_dev, rq, cmd);
+}
+
+static int sbmac_close(struct net_device *dev)
+{
+	struct sbmac_softc *sc = netdev_priv(dev);
+
+	napi_disable(&sc->napi);
+
+	phy_stop(sc->phy_dev);
+
+	sbmac_set_channel_state(sc, sbmac_state_off);
+
+	netif_stop_queue(dev);
+
+	if (debug > 1)
+		pr_debug("%s: Shutting down ethercard\n", dev->name);
+
+	phy_disconnect(sc->phy_dev);
+	sc->phy_dev = NULL;
+	free_irq(dev->irq, dev);
+
+	sbdma_emptyring(&(sc->sbm_txdma));
+	sbdma_emptyring(&(sc->sbm_rxdma));
+
+	return 0;
+}
+
+static int sbmac_poll(struct napi_struct *napi, int budget)
+{
+	struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi);
+	int work_done;
+
+	work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1);
+	sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
+
+	if (work_done < budget) {
+		napi_complete(napi);
+
+#ifdef CONFIG_SBMAC_COALESCE
+		__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
+			     ((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_RX_CH0),
+			     sc->sbm_imr);
+#else
+		__raw_writeq((M_MAC_INT_CHANNEL << S_MAC_TX_CH0) |
+			     (M_MAC_INT_CHANNEL << S_MAC_RX_CH0), sc->sbm_imr);
+#endif
+	}
+
+	return work_done;
+}
+
+
+static int __devinit sbmac_probe(struct platform_device *pldev)
+{
+	struct net_device *dev;
+	struct sbmac_softc *sc;
+	void __iomem *sbm_base;
+	struct resource *res;
+	u64 sbmac_orig_hwaddr;
+	int err;
+
+	res = platform_get_resource(pldev, IORESOURCE_MEM, 0);
+	BUG_ON(!res);
+	sbm_base = ioremap_nocache(res->start, resource_size(res));
+	if (!sbm_base) {
+		printk(KERN_ERR "%s: unable to map device registers\n",
+		       dev_name(&pldev->dev));
+		err = -ENOMEM;
+		goto out_out;
+	}
+
+	/*
+	 * The R_MAC_ETHERNET_ADDR register will be set to some nonzero
+	 * value for us by the firmware if we're going to use this MAC.
+	 * If we find a zero, skip this MAC.
+	 */
+	sbmac_orig_hwaddr = __raw_readq(sbm_base + R_MAC_ETHERNET_ADDR);
+	pr_debug("%s: %sconfiguring MAC at 0x%08Lx\n", dev_name(&pldev->dev),
+		 sbmac_orig_hwaddr ? "" : "not ", (long long)res->start);
+	if (sbmac_orig_hwaddr == 0) {
+		err = 0;
+		goto out_unmap;
+	}
+
+	/*
+	 * Okay, cool.  Initialize this MAC.
+	 */
+	dev = alloc_etherdev(sizeof(struct sbmac_softc));
+	if (!dev) {
+		printk(KERN_ERR "%s: unable to allocate etherdev\n",
+		       dev_name(&pldev->dev));
+		err = -ENOMEM;
+		goto out_unmap;
+	}
+
+	dev_set_drvdata(&pldev->dev, dev);
+	SET_NETDEV_DEV(dev, &pldev->dev);
+
+	sc = netdev_priv(dev);
+	sc->sbm_base = sbm_base;
+
+	err = sbmac_init(pldev, res->start);
+	if (err)
+		goto out_kfree;
+
+	return 0;
+
+out_kfree:
+	free_netdev(dev);
+	__raw_writeq(sbmac_orig_hwaddr, sbm_base + R_MAC_ETHERNET_ADDR);
+
+out_unmap:
+	iounmap(sbm_base);
+
+out_out:
+	return err;
+}
+
+static int __exit sbmac_remove(struct platform_device *pldev)
+{
+	struct net_device *dev = dev_get_drvdata(&pldev->dev);
+	struct sbmac_softc *sc = netdev_priv(dev);
+
+	unregister_netdev(dev);
+	sbmac_uninitctx(sc);
+	mdiobus_unregister(sc->mii_bus);
+	mdiobus_free(sc->mii_bus);
+	iounmap(sc->sbm_base);
+	free_netdev(dev);
+
+	return 0;
+}
+
+static struct platform_driver sbmac_driver = {
+	.probe = sbmac_probe,
+	.remove = __exit_p(sbmac_remove),
+	.driver = {
+		.name = sbmac_string,
+		.owner  = THIS_MODULE,
+	},
+};
+
+static int __init sbmac_init_module(void)
+{
+	return platform_driver_register(&sbmac_driver);
+}
+
+static void __exit sbmac_cleanup_module(void)
+{
+	platform_driver_unregister(&sbmac_driver);
+}
+
+module_init(sbmac_init_module);
+module_exit(sbmac_cleanup_module);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
new file mode 100644
index 0000000..dc3fbf6
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -0,0 +1,15862 @@
+/*
+ * tg3.c: Broadcom Tigon3 ethernet driver.
+ *
+ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2005-2011 Broadcom Corporation.
+ *
+ * Firmware is:
+ *	Derived from proprietary unpublished source code,
+ *	Copyright (C) 2000-2003 Broadcom Corporation.
+ *
+ *	Permission is hereby granted for the distribution of this firmware
+ *	data in hexadecimal or equivalent format, provided this copyright
+ *	notice is accompanying it.
+ */
+
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/stringify.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/in.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/brcmphy.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/workqueue.h>
+#include <linux/prefetch.h>
+#include <linux/dma-mapping.h>
+#include <linux/firmware.h>
+
+#include <net/checksum.h>
+#include <net/ip.h>
+
+#include <asm/system.h>
+#include <linux/io.h>
+#include <asm/byteorder.h>
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#include <asm/prom.h>
+#endif
+
+#define BAR_0	0
+#define BAR_2	2
+
+#include "tg3.h"
+
+/* Functions & macros to verify TG3_FLAGS types */
+
+static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
+{
+	return test_bit(flag, bits);
+}
+
+static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
+{
+	set_bit(flag, bits);
+}
+
+static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
+{
+	clear_bit(flag, bits);
+}
+
+#define tg3_flag(tp, flag)				\
+	_tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
+#define tg3_flag_set(tp, flag)				\
+	_tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
+#define tg3_flag_clear(tp, flag)			\
+	_tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
+
+#define DRV_MODULE_NAME		"tg3"
+#define TG3_MAJ_NUM			3
+#define TG3_MIN_NUM			119
+#define DRV_MODULE_VERSION	\
+	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
+#define DRV_MODULE_RELDATE	"May 18, 2011"
+
+#define TG3_DEF_MAC_MODE	0
+#define TG3_DEF_RX_MODE		0
+#define TG3_DEF_TX_MODE		0
+#define TG3_DEF_MSG_ENABLE	  \
+	(NETIF_MSG_DRV		| \
+	 NETIF_MSG_PROBE	| \
+	 NETIF_MSG_LINK		| \
+	 NETIF_MSG_TIMER	| \
+	 NETIF_MSG_IFDOWN	| \
+	 NETIF_MSG_IFUP		| \
+	 NETIF_MSG_RX_ERR	| \
+	 NETIF_MSG_TX_ERR)
+
+#define TG3_GRC_LCLCTL_PWRSW_DELAY	100
+
+/* length of time before we decide the hardware is borked,
+ * and dev->tx_timeout() should be called to fix the problem
+ */
+
+#define TG3_TX_TIMEOUT			(5 * HZ)
+
+/* hardware minimum and maximum for a single frame's data payload */
+#define TG3_MIN_MTU			60
+#define TG3_MAX_MTU(tp)	\
+	(tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
+
+/* These numbers seem to be hard coded in the NIC firmware somehow.
+ * You can't change the ring sizes, but you can change where you place
+ * them in the NIC onboard memory.
+ */
+#define TG3_RX_STD_RING_SIZE(tp) \
+	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
+	 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
+#define TG3_DEF_RX_RING_PENDING		200
+#define TG3_RX_JMB_RING_SIZE(tp) \
+	(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
+	 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
+#define TG3_DEF_RX_JUMBO_RING_PENDING	100
+#define TG3_RSS_INDIR_TBL_SIZE		128
+
+/* Do not place this n-ring entries value into the tp struct itself,
+ * we really want to expose these constants to GCC so that modulo et
+ * al.  operations are done with shifts and masks instead of with
+ * hw multiply/modulo instructions.  Another solution would be to
+ * replace things like '% foo' with '& (foo - 1)'.
+ */
+
+#define TG3_TX_RING_SIZE		512
+#define TG3_DEF_TX_RING_PENDING		(TG3_TX_RING_SIZE - 1)
+
+#define TG3_RX_STD_RING_BYTES(tp) \
+	(sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
+#define TG3_RX_JMB_RING_BYTES(tp) \
+	(sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
+#define TG3_RX_RCB_RING_BYTES(tp) \
+	(sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
+#define TG3_TX_RING_BYTES	(sizeof(struct tg3_tx_buffer_desc) * \
+				 TG3_TX_RING_SIZE)
+#define NEXT_TX(N)		(((N) + 1) & (TG3_TX_RING_SIZE - 1))
+
+#define TG3_DMA_BYTE_ENAB		64
+
+#define TG3_RX_STD_DMA_SZ		1536
+#define TG3_RX_JMB_DMA_SZ		9046
+
+#define TG3_RX_DMA_TO_MAP_SZ(x)		((x) + TG3_DMA_BYTE_ENAB)
+
+#define TG3_RX_STD_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
+#define TG3_RX_JMB_MAP_SZ		TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
+
+#define TG3_RX_STD_BUFF_RING_SIZE(tp) \
+	(sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
+
+#define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
+	(sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
+
+/* Due to a hardware bug, the 5701 can only DMA to memory addresses
+ * that are at least dword aligned when used in PCIX mode.  The driver
+ * works around this bug by double copying the packet.  This workaround
+ * is built into the normal double copy length check for efficiency.
+ *
+ * However, the double copy is only necessary on those architectures
+ * where unaligned memory accesses are inefficient.  For those architectures
+ * where unaligned memory accesses incur little penalty, we can reintegrate
+ * the 5701 in the normal rx path.  Doing so saves a device structure
+ * dereference by hardcoding the double copy threshold in place.
+ */
+#define TG3_RX_COPY_THRESHOLD		256
+#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+	#define TG3_RX_COPY_THRESH(tp)	TG3_RX_COPY_THRESHOLD
+#else
+	#define TG3_RX_COPY_THRESH(tp)	((tp)->rx_copy_thresh)
+#endif
+
+/* minimum number of free TX descriptors required to wake up TX process */
+#define TG3_TX_WAKEUP_THRESH(tnapi)		((tnapi)->tx_pending / 4)
+#define TG3_TX_BD_DMA_MAX		4096
+
+#define TG3_RAW_IP_ALIGN 2
+
+#define TG3_FW_UPDATE_TIMEOUT_SEC	5
+
+#define FIRMWARE_TG3		"tigon/tg3.bin"
+#define FIRMWARE_TG3TSO		"tigon/tg3_tso.bin"
+#define FIRMWARE_TG3TSO5	"tigon/tg3_tso5.bin"
+
+static char version[] __devinitdata =
+	DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
+
+MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
+MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_FIRMWARE(FIRMWARE_TG3);
+MODULE_FIRMWARE(FIRMWARE_TG3TSO);
+MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
+
+static int tg3_debug = -1;	/* -1 == use TG3_DEF_MSG_ENABLE as value */
+module_param(tg3_debug, int, 0);
+MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
+
+static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
+	{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
+	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
+	{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
+	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
+	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
+	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
+	{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
+	{PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
+	{PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
+	{}
+};
+
+MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
+
+static const struct {
+	const char string[ETH_GSTRING_LEN];
+} ethtool_stats_keys[] = {
+	{ "rx_octets" },
+	{ "rx_fragments" },
+	{ "rx_ucast_packets" },
+	{ "rx_mcast_packets" },
+	{ "rx_bcast_packets" },
+	{ "rx_fcs_errors" },
+	{ "rx_align_errors" },
+	{ "rx_xon_pause_rcvd" },
+	{ "rx_xoff_pause_rcvd" },
+	{ "rx_mac_ctrl_rcvd" },
+	{ "rx_xoff_entered" },
+	{ "rx_frame_too_long_errors" },
+	{ "rx_jabbers" },
+	{ "rx_undersize_packets" },
+	{ "rx_in_length_errors" },
+	{ "rx_out_length_errors" },
+	{ "rx_64_or_less_octet_packets" },
+	{ "rx_65_to_127_octet_packets" },
+	{ "rx_128_to_255_octet_packets" },
+	{ "rx_256_to_511_octet_packets" },
+	{ "rx_512_to_1023_octet_packets" },
+	{ "rx_1024_to_1522_octet_packets" },
+	{ "rx_1523_to_2047_octet_packets" },
+	{ "rx_2048_to_4095_octet_packets" },
+	{ "rx_4096_to_8191_octet_packets" },
+	{ "rx_8192_to_9022_octet_packets" },
+
+	{ "tx_octets" },
+	{ "tx_collisions" },
+
+	{ "tx_xon_sent" },
+	{ "tx_xoff_sent" },
+	{ "tx_flow_control" },
+	{ "tx_mac_errors" },
+	{ "tx_single_collisions" },
+	{ "tx_mult_collisions" },
+	{ "tx_deferred" },
+	{ "tx_excessive_collisions" },
+	{ "tx_late_collisions" },
+	{ "tx_collide_2times" },
+	{ "tx_collide_3times" },
+	{ "tx_collide_4times" },
+	{ "tx_collide_5times" },
+	{ "tx_collide_6times" },
+	{ "tx_collide_7times" },
+	{ "tx_collide_8times" },
+	{ "tx_collide_9times" },
+	{ "tx_collide_10times" },
+	{ "tx_collide_11times" },
+	{ "tx_collide_12times" },
+	{ "tx_collide_13times" },
+	{ "tx_collide_14times" },
+	{ "tx_collide_15times" },
+	{ "tx_ucast_packets" },
+	{ "tx_mcast_packets" },
+	{ "tx_bcast_packets" },
+	{ "tx_carrier_sense_errors" },
+	{ "tx_discards" },
+	{ "tx_errors" },
+
+	{ "dma_writeq_full" },
+	{ "dma_write_prioq_full" },
+	{ "rxbds_empty" },
+	{ "rx_discards" },
+	{ "rx_errors" },
+	{ "rx_threshold_hit" },
+
+	{ "dma_readq_full" },
+	{ "dma_read_prioq_full" },
+	{ "tx_comp_queue_full" },
+
+	{ "ring_set_send_prod_index" },
+	{ "ring_status_update" },
+	{ "nic_irqs" },
+	{ "nic_avoided_irqs" },
+	{ "nic_tx_threshold_hit" },
+
+	{ "mbuf_lwm_thresh_hit" },
+};
+
+#define TG3_NUM_STATS	ARRAY_SIZE(ethtool_stats_keys)
+
+
+static const struct {
+	const char string[ETH_GSTRING_LEN];
+} ethtool_test_keys[] = {
+	{ "nvram test     (online) " },
+	{ "link test      (online) " },
+	{ "register test  (offline)" },
+	{ "memory test    (offline)" },
+	{ "loopback test  (offline)" },
+	{ "interrupt test (offline)" },
+};
+
+#define TG3_NUM_TEST	ARRAY_SIZE(ethtool_test_keys)
+
+
+static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
+{
+	writel(val, tp->regs + off);
+}
+
+static u32 tg3_read32(struct tg3 *tp, u32 off)
+{
+	return readl(tp->regs + off);
+}
+
+static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
+{
+	writel(val, tp->aperegs + off);
+}
+
+static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
+{
+	return readl(tp->aperegs + off);
+}
+
+static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&tp->indirect_lock, flags);
+	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
+	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
+	spin_unlock_irqrestore(&tp->indirect_lock, flags);
+}
+
+static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
+{
+	writel(val, tp->regs + off);
+	readl(tp->regs + off);
+}
+
+static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
+{
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&tp->indirect_lock, flags);
+	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
+	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
+	spin_unlock_irqrestore(&tp->indirect_lock, flags);
+	return val;
+}
+
+static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
+{
+	unsigned long flags;
+
+	if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
+		pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
+				       TG3_64BIT_REG_LOW, val);
+		return;
+	}
+	if (off == TG3_RX_STD_PROD_IDX_REG) {
+		pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
+				       TG3_64BIT_REG_LOW, val);
+		return;
+	}
+
+	spin_lock_irqsave(&tp->indirect_lock, flags);
+	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
+	pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
+	spin_unlock_irqrestore(&tp->indirect_lock, flags);
+
+	/* In indirect mode when disabling interrupts, we also need
+	 * to clear the interrupt bit in the GRC local ctrl register.
+	 */
+	if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
+	    (val == 0x1)) {
+		pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
+				       tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
+	}
+}
+
+static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
+{
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&tp->indirect_lock, flags);
+	pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
+	pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
+	spin_unlock_irqrestore(&tp->indirect_lock, flags);
+	return val;
+}
+
+/* usec_wait specifies the wait time in usec when writing to certain registers
+ * where it is unsafe to read back the register without some delay.
+ * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
+ * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
+ */
+static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
+{
+	if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
+		/* Non-posted methods */
+		tp->write32(tp, off, val);
+	else {
+		/* Posted method */
+		tg3_write32(tp, off, val);
+		if (usec_wait)
+			udelay(usec_wait);
+		tp->read32(tp, off);
+	}
+	/* Wait again after the read for the posted method to guarantee that
+	 * the wait time is met.
+	 */
+	if (usec_wait)
+		udelay(usec_wait);
+}
+
+static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
+{
+	tp->write32_mbox(tp, off, val);
+	if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
+		tp->read32_mbox(tp, off);
+}
+
+static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
+{
+	void __iomem *mbox = tp->regs + off;
+	writel(val, mbox);
+	if (tg3_flag(tp, TXD_MBOX_HWBUG))
+		writel(val, mbox);
+	if (tg3_flag(tp, MBOX_WRITE_REORDER))
+		readl(mbox);
+}
+
+static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
+{
+	return readl(tp->regs + off + GRCMBOX_BASE);
+}
+
+static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
+{
+	writel(val, tp->regs + off + GRCMBOX_BASE);
+}
+
+#define tw32_mailbox(reg, val)		tp->write32_mbox(tp, reg, val)
+#define tw32_mailbox_f(reg, val)	tw32_mailbox_flush(tp, (reg), (val))
+#define tw32_rx_mbox(reg, val)		tp->write32_rx_mbox(tp, reg, val)
+#define tw32_tx_mbox(reg, val)		tp->write32_tx_mbox(tp, reg, val)
+#define tr32_mailbox(reg)		tp->read32_mbox(tp, reg)
+
+#define tw32(reg, val)			tp->write32(tp, reg, val)
+#define tw32_f(reg, val)		_tw32_flush(tp, (reg), (val), 0)
+#define tw32_wait_f(reg, val, us)	_tw32_flush(tp, (reg), (val), (us))
+#define tr32(reg)			tp->read32(tp, reg)
+
+static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
+{
+	unsigned long flags;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
+		return;
+
+	spin_lock_irqsave(&tp->indirect_lock, flags);
+	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
+		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+
+		/* Always leave this as zero. */
+		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+	} else {
+		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
+		tw32_f(TG3PCI_MEM_WIN_DATA, val);
+
+		/* Always leave this as zero. */
+		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+	}
+	spin_unlock_irqrestore(&tp->indirect_lock, flags);
+}
+
+static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
+{
+	unsigned long flags;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
+	    (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
+		*val = 0;
+		return;
+	}
+
+	spin_lock_irqsave(&tp->indirect_lock, flags);
+	if (tg3_flag(tp, SRAM_USE_CONFIG)) {
+		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
+		pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+
+		/* Always leave this as zero. */
+		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+	} else {
+		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
+		*val = tr32(TG3PCI_MEM_WIN_DATA);
+
+		/* Always leave this as zero. */
+		tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+	}
+	spin_unlock_irqrestore(&tp->indirect_lock, flags);
+}
+
+static void tg3_ape_lock_init(struct tg3 *tp)
+{
+	int i;
+	u32 regbase, bit;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+		regbase = TG3_APE_LOCK_GRANT;
+	else
+		regbase = TG3_APE_PER_LOCK_GRANT;
+
+	/* Make sure the driver hasn't any stale locks. */
+	for (i = 0; i < 8; i++) {
+		if (i == TG3_APE_LOCK_GPIO)
+			continue;
+		tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
+	}
+
+	/* Clear the correct bit of the GPIO lock too. */
+	if (!tp->pci_fn)
+		bit = APE_LOCK_GRANT_DRIVER;
+	else
+		bit = 1 << tp->pci_fn;
+
+	tg3_ape_write32(tp, regbase + 4 * TG3_APE_LOCK_GPIO, bit);
+}
+
+static int tg3_ape_lock(struct tg3 *tp, int locknum)
+{
+	int i, off;
+	int ret = 0;
+	u32 status, req, gnt, bit;
+
+	if (!tg3_flag(tp, ENABLE_APE))
+		return 0;
+
+	switch (locknum) {
+	case TG3_APE_LOCK_GPIO:
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+			return 0;
+	case TG3_APE_LOCK_GRC:
+	case TG3_APE_LOCK_MEM:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+		req = TG3_APE_LOCK_REQ;
+		gnt = TG3_APE_LOCK_GRANT;
+	} else {
+		req = TG3_APE_PER_LOCK_REQ;
+		gnt = TG3_APE_PER_LOCK_GRANT;
+	}
+
+	off = 4 * locknum;
+
+	if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
+		bit = APE_LOCK_REQ_DRIVER;
+	else
+		bit = 1 << tp->pci_fn;
+
+	tg3_ape_write32(tp, req + off, bit);
+
+	/* Wait for up to 1 millisecond to acquire lock. */
+	for (i = 0; i < 100; i++) {
+		status = tg3_ape_read32(tp, gnt + off);
+		if (status == bit)
+			break;
+		udelay(10);
+	}
+
+	if (status != bit) {
+		/* Revoke the lock request. */
+		tg3_ape_write32(tp, gnt + off, bit);
+		ret = -EBUSY;
+	}
+
+	return ret;
+}
+
+static void tg3_ape_unlock(struct tg3 *tp, int locknum)
+{
+	u32 gnt, bit;
+
+	if (!tg3_flag(tp, ENABLE_APE))
+		return;
+
+	switch (locknum) {
+	case TG3_APE_LOCK_GPIO:
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+			return;
+	case TG3_APE_LOCK_GRC:
+	case TG3_APE_LOCK_MEM:
+		break;
+	default:
+		return;
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+		gnt = TG3_APE_LOCK_GRANT;
+	else
+		gnt = TG3_APE_PER_LOCK_GRANT;
+
+	if (locknum != TG3_APE_LOCK_GPIO || !tp->pci_fn)
+		bit = APE_LOCK_GRANT_DRIVER;
+	else
+		bit = 1 << tp->pci_fn;
+
+	tg3_ape_write32(tp, gnt + 4 * locknum, bit);
+}
+
+static void tg3_disable_ints(struct tg3 *tp)
+{
+	int i;
+
+	tw32(TG3PCI_MISC_HOST_CTRL,
+	     (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
+	for (i = 0; i < tp->irq_max; i++)
+		tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
+}
+
+static void tg3_enable_ints(struct tg3 *tp)
+{
+	int i;
+
+	tp->irq_sync = 0;
+	wmb();
+
+	tw32(TG3PCI_MISC_HOST_CTRL,
+	     (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
+
+	tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+		if (tg3_flag(tp, 1SHOT_MSI))
+			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+
+		tp->coal_now |= tnapi->coal_now;
+	}
+
+	/* Force an initial interrupt */
+	if (!tg3_flag(tp, TAGGED_STATUS) &&
+	    (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
+		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
+	else
+		tw32(HOSTCC_MODE, tp->coal_now);
+
+	tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
+}
+
+static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
+{
+	struct tg3 *tp = tnapi->tp;
+	struct tg3_hw_status *sblk = tnapi->hw_status;
+	unsigned int work_exists = 0;
+
+	/* check for phy events */
+	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
+		if (sblk->status & SD_STATUS_LINK_CHG)
+			work_exists = 1;
+	}
+	/* check for RX/TX work to do */
+	if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
+	    *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
+		work_exists = 1;
+
+	return work_exists;
+}
+
+/* tg3_int_reenable
+ *  similar to tg3_enable_ints, but it accurately determines whether there
+ *  is new work pending and can return without flushing the PIO write
+ *  which reenables interrupts
+ */
+static void tg3_int_reenable(struct tg3_napi *tnapi)
+{
+	struct tg3 *tp = tnapi->tp;
+
+	tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
+	mmiowb();
+
+	/* When doing tagged status, this work check is unnecessary.
+	 * The last_tag we write above tells the chip which piece of
+	 * work we've completed.
+	 */
+	if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
+		tw32(HOSTCC_MODE, tp->coalesce_mode |
+		     HOSTCC_MODE_ENABLE | tnapi->coal_now);
+}
+
+static void tg3_switch_clocks(struct tg3 *tp)
+{
+	u32 clock_ctrl;
+	u32 orig_clock_ctrl;
+
+	if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
+		return;
+
+	clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
+
+	orig_clock_ctrl = clock_ctrl;
+	clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
+		       CLOCK_CTRL_CLKRUN_OENABLE |
+		       0x1f);
+	tp->pci_clock_ctrl = clock_ctrl;
+
+	if (tg3_flag(tp, 5705_PLUS)) {
+		if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
+			tw32_wait_f(TG3PCI_CLOCK_CTRL,
+				    clock_ctrl | CLOCK_CTRL_625_CORE, 40);
+		}
+	} else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
+		tw32_wait_f(TG3PCI_CLOCK_CTRL,
+			    clock_ctrl |
+			    (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
+			    40);
+		tw32_wait_f(TG3PCI_CLOCK_CTRL,
+			    clock_ctrl | (CLOCK_CTRL_ALTCLK),
+			    40);
+	}
+	tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
+}
+
+#define PHY_BUSY_LOOPS	5000
+
+static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
+{
+	u32 frame_val;
+	unsigned int loops;
+	int ret;
+
+	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+		tw32_f(MAC_MI_MODE,
+		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+		udelay(80);
+	}
+
+	*val = 0x0;
+
+	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+		      MI_COM_PHY_ADDR_MASK);
+	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
+		      MI_COM_REG_ADDR_MASK);
+	frame_val |= (MI_COM_CMD_READ | MI_COM_START);
+
+	tw32_f(MAC_MI_COM, frame_val);
+
+	loops = PHY_BUSY_LOOPS;
+	while (loops != 0) {
+		udelay(10);
+		frame_val = tr32(MAC_MI_COM);
+
+		if ((frame_val & MI_COM_BUSY) == 0) {
+			udelay(5);
+			frame_val = tr32(MAC_MI_COM);
+			break;
+		}
+		loops -= 1;
+	}
+
+	ret = -EBUSY;
+	if (loops != 0) {
+		*val = frame_val & MI_COM_DATA_MASK;
+		ret = 0;
+	}
+
+	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+		tw32_f(MAC_MI_MODE, tp->mi_mode);
+		udelay(80);
+	}
+
+	return ret;
+}
+
+static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
+{
+	u32 frame_val;
+	unsigned int loops;
+	int ret;
+
+	if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
+	    (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
+		return 0;
+
+	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+		tw32_f(MAC_MI_MODE,
+		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+		udelay(80);
+	}
+
+	frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
+		      MI_COM_PHY_ADDR_MASK);
+	frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
+		      MI_COM_REG_ADDR_MASK);
+	frame_val |= (val & MI_COM_DATA_MASK);
+	frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
+
+	tw32_f(MAC_MI_COM, frame_val);
+
+	loops = PHY_BUSY_LOOPS;
+	while (loops != 0) {
+		udelay(10);
+		frame_val = tr32(MAC_MI_COM);
+		if ((frame_val & MI_COM_BUSY) == 0) {
+			udelay(5);
+			frame_val = tr32(MAC_MI_COM);
+			break;
+		}
+		loops -= 1;
+	}
+
+	ret = -EBUSY;
+	if (loops != 0)
+		ret = 0;
+
+	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+		tw32_f(MAC_MI_MODE, tp->mi_mode);
+		udelay(80);
+	}
+
+	return ret;
+}
+
+static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
+{
+	int err;
+
+	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
+	if (err)
+		goto done;
+
+	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
+	if (err)
+		goto done;
+
+	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
+			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
+	if (err)
+		goto done;
+
+	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
+
+done:
+	return err;
+}
+
+static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
+{
+	int err;
+
+	err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
+	if (err)
+		goto done;
+
+	err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
+	if (err)
+		goto done;
+
+	err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
+			   MII_TG3_MMD_CTRL_DATA_NOINC | devad);
+	if (err)
+		goto done;
+
+	err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
+
+done:
+	return err;
+}
+
+static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
+{
+	int err;
+
+	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
+	if (!err)
+		err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
+
+	return err;
+}
+
+static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
+{
+	int err;
+
+	err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
+	if (!err)
+		err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
+
+	return err;
+}
+
+static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
+{
+	int err;
+
+	err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
+			   (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
+			   MII_TG3_AUXCTL_SHDWSEL_MISC);
+	if (!err)
+		err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
+
+	return err;
+}
+
+static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
+{
+	if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
+		set |= MII_TG3_AUXCTL_MISC_WREN;
+
+	return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
+}
+
+#define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
+	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+			     MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
+			     MII_TG3_AUXCTL_ACTL_TX_6DB)
+
+#define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
+	tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
+			     MII_TG3_AUXCTL_ACTL_TX_6DB);
+
+static int tg3_bmcr_reset(struct tg3 *tp)
+{
+	u32 phy_control;
+	int limit, err;
+
+	/* OK, reset it, and poll the BMCR_RESET bit until it
+	 * clears or we time out.
+	 */
+	phy_control = BMCR_RESET;
+	err = tg3_writephy(tp, MII_BMCR, phy_control);
+	if (err != 0)
+		return -EBUSY;
+
+	limit = 5000;
+	while (limit--) {
+		err = tg3_readphy(tp, MII_BMCR, &phy_control);
+		if (err != 0)
+			return -EBUSY;
+
+		if ((phy_control & BMCR_RESET) == 0) {
+			udelay(40);
+			break;
+		}
+		udelay(10);
+	}
+	if (limit < 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
+{
+	struct tg3 *tp = bp->priv;
+	u32 val;
+
+	spin_lock_bh(&tp->lock);
+
+	if (tg3_readphy(tp, reg, &val))
+		val = -EIO;
+
+	spin_unlock_bh(&tp->lock);
+
+	return val;
+}
+
+static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
+{
+	struct tg3 *tp = bp->priv;
+	u32 ret = 0;
+
+	spin_lock_bh(&tp->lock);
+
+	if (tg3_writephy(tp, reg, val))
+		ret = -EIO;
+
+	spin_unlock_bh(&tp->lock);
+
+	return ret;
+}
+
+static int tg3_mdio_reset(struct mii_bus *bp)
+{
+	return 0;
+}
+
+static void tg3_mdio_config_5785(struct tg3 *tp)
+{
+	u32 val;
+	struct phy_device *phydev;
+
+	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
+	case PHY_ID_BCM50610:
+	case PHY_ID_BCM50610M:
+		val = MAC_PHYCFG2_50610_LED_MODES;
+		break;
+	case PHY_ID_BCMAC131:
+		val = MAC_PHYCFG2_AC131_LED_MODES;
+		break;
+	case PHY_ID_RTL8211C:
+		val = MAC_PHYCFG2_RTL8211C_LED_MODES;
+		break;
+	case PHY_ID_RTL8201E:
+		val = MAC_PHYCFG2_RTL8201E_LED_MODES;
+		break;
+	default:
+		return;
+	}
+
+	if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
+		tw32(MAC_PHYCFG2, val);
+
+		val = tr32(MAC_PHYCFG1);
+		val &= ~(MAC_PHYCFG1_RGMII_INT |
+			 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
+		val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
+		tw32(MAC_PHYCFG1, val);
+
+		return;
+	}
+
+	if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
+		val |= MAC_PHYCFG2_EMODE_MASK_MASK |
+		       MAC_PHYCFG2_FMODE_MASK_MASK |
+		       MAC_PHYCFG2_GMODE_MASK_MASK |
+		       MAC_PHYCFG2_ACT_MASK_MASK   |
+		       MAC_PHYCFG2_QUAL_MASK_MASK |
+		       MAC_PHYCFG2_INBAND_ENABLE;
+
+	tw32(MAC_PHYCFG2, val);
+
+	val = tr32(MAC_PHYCFG1);
+	val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
+		 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
+	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
+		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
+			val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
+		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
+			val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
+	}
+	val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
+	       MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
+	tw32(MAC_PHYCFG1, val);
+
+	val = tr32(MAC_EXT_RGMII_MODE);
+	val &= ~(MAC_RGMII_MODE_RX_INT_B |
+		 MAC_RGMII_MODE_RX_QUALITY |
+		 MAC_RGMII_MODE_RX_ACTIVITY |
+		 MAC_RGMII_MODE_RX_ENG_DET |
+		 MAC_RGMII_MODE_TX_ENABLE |
+		 MAC_RGMII_MODE_TX_LOWPWR |
+		 MAC_RGMII_MODE_TX_RESET);
+	if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
+		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
+			val |= MAC_RGMII_MODE_RX_INT_B |
+			       MAC_RGMII_MODE_RX_QUALITY |
+			       MAC_RGMII_MODE_RX_ACTIVITY |
+			       MAC_RGMII_MODE_RX_ENG_DET;
+		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
+			val |= MAC_RGMII_MODE_TX_ENABLE |
+			       MAC_RGMII_MODE_TX_LOWPWR |
+			       MAC_RGMII_MODE_TX_RESET;
+	}
+	tw32(MAC_EXT_RGMII_MODE, val);
+}
+
+static void tg3_mdio_start(struct tg3 *tp)
+{
+	tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
+	tw32_f(MAC_MI_MODE, tp->mi_mode);
+	udelay(80);
+
+	if (tg3_flag(tp, MDIOBUS_INITED) &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+		tg3_mdio_config_5785(tp);
+}
+
+static int tg3_mdio_init(struct tg3 *tp)
+{
+	int i;
+	u32 reg;
+	struct phy_device *phydev;
+
+	if (tg3_flag(tp, 5717_PLUS)) {
+		u32 is_serdes;
+
+		tp->phy_addr = tp->pci_fn + 1;
+
+		if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
+			is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
+		else
+			is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
+				    TG3_CPMU_PHY_STRAP_IS_SERDES;
+		if (is_serdes)
+			tp->phy_addr += 7;
+	} else
+		tp->phy_addr = TG3_PHY_MII_ADDR;
+
+	tg3_mdio_start(tp);
+
+	if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
+		return 0;
+
+	tp->mdio_bus = mdiobus_alloc();
+	if (tp->mdio_bus == NULL)
+		return -ENOMEM;
+
+	tp->mdio_bus->name     = "tg3 mdio bus";
+	snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
+		 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
+	tp->mdio_bus->priv     = tp;
+	tp->mdio_bus->parent   = &tp->pdev->dev;
+	tp->mdio_bus->read     = &tg3_mdio_read;
+	tp->mdio_bus->write    = &tg3_mdio_write;
+	tp->mdio_bus->reset    = &tg3_mdio_reset;
+	tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
+	tp->mdio_bus->irq      = &tp->mdio_irq[0];
+
+	for (i = 0; i < PHY_MAX_ADDR; i++)
+		tp->mdio_bus->irq[i] = PHY_POLL;
+
+	/* The bus registration will look for all the PHYs on the mdio bus.
+	 * Unfortunately, it does not ensure the PHY is powered up before
+	 * accessing the PHY ID registers.  A chip reset is the
+	 * quickest way to bring the device back to an operational state..
+	 */
+	if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
+		tg3_bmcr_reset(tp);
+
+	i = mdiobus_register(tp->mdio_bus);
+	if (i) {
+		dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
+		mdiobus_free(tp->mdio_bus);
+		return i;
+	}
+
+	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+	if (!phydev || !phydev->drv) {
+		dev_warn(&tp->pdev->dev, "No PHY devices\n");
+		mdiobus_unregister(tp->mdio_bus);
+		mdiobus_free(tp->mdio_bus);
+		return -ENODEV;
+	}
+
+	switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
+	case PHY_ID_BCM57780:
+		phydev->interface = PHY_INTERFACE_MODE_GMII;
+		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
+		break;
+	case PHY_ID_BCM50610:
+	case PHY_ID_BCM50610M:
+		phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
+				     PHY_BRCM_RX_REFCLK_UNUSED |
+				     PHY_BRCM_DIS_TXCRXC_NOENRGY |
+				     PHY_BRCM_AUTO_PWRDWN_ENABLE;
+		if (tg3_flag(tp, RGMII_INBAND_DISABLE))
+			phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
+		if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
+			phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
+		if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
+			phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
+		/* fallthru */
+	case PHY_ID_RTL8211C:
+		phydev->interface = PHY_INTERFACE_MODE_RGMII;
+		break;
+	case PHY_ID_RTL8201E:
+	case PHY_ID_BCMAC131:
+		phydev->interface = PHY_INTERFACE_MODE_MII;
+		phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
+		tp->phy_flags |= TG3_PHYFLG_IS_FET;
+		break;
+	}
+
+	tg3_flag_set(tp, MDIOBUS_INITED);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+		tg3_mdio_config_5785(tp);
+
+	return 0;
+}
+
+static void tg3_mdio_fini(struct tg3 *tp)
+{
+	if (tg3_flag(tp, MDIOBUS_INITED)) {
+		tg3_flag_clear(tp, MDIOBUS_INITED);
+		mdiobus_unregister(tp->mdio_bus);
+		mdiobus_free(tp->mdio_bus);
+	}
+}
+
+/* tp->lock is held. */
+static inline void tg3_generate_fw_event(struct tg3 *tp)
+{
+	u32 val;
+
+	val = tr32(GRC_RX_CPU_EVENT);
+	val |= GRC_RX_CPU_DRIVER_EVENT;
+	tw32_f(GRC_RX_CPU_EVENT, val);
+
+	tp->last_event_jiffies = jiffies;
+}
+
+#define TG3_FW_EVENT_TIMEOUT_USEC 2500
+
+/* tp->lock is held. */
+static void tg3_wait_for_event_ack(struct tg3 *tp)
+{
+	int i;
+	unsigned int delay_cnt;
+	long time_remain;
+
+	/* If enough time has passed, no wait is necessary. */
+	time_remain = (long)(tp->last_event_jiffies + 1 +
+		      usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
+		      (long)jiffies;
+	if (time_remain < 0)
+		return;
+
+	/* Check if we can shorten the wait time. */
+	delay_cnt = jiffies_to_usecs(time_remain);
+	if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
+		delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
+	delay_cnt = (delay_cnt >> 3) + 1;
+
+	for (i = 0; i < delay_cnt; i++) {
+		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
+			break;
+		udelay(8);
+	}
+}
+
+/* tp->lock is held. */
+static void tg3_ump_link_report(struct tg3 *tp)
+{
+	u32 reg;
+	u32 val;
+
+	if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
+		return;
+
+	tg3_wait_for_event_ack(tp);
+
+	tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
+
+	tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
+
+	val = 0;
+	if (!tg3_readphy(tp, MII_BMCR, &reg))
+		val = reg << 16;
+	if (!tg3_readphy(tp, MII_BMSR, &reg))
+		val |= (reg & 0xffff);
+	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
+
+	val = 0;
+	if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
+		val = reg << 16;
+	if (!tg3_readphy(tp, MII_LPA, &reg))
+		val |= (reg & 0xffff);
+	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
+
+	val = 0;
+	if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
+		if (!tg3_readphy(tp, MII_CTRL1000, &reg))
+			val = reg << 16;
+		if (!tg3_readphy(tp, MII_STAT1000, &reg))
+			val |= (reg & 0xffff);
+	}
+	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
+
+	if (!tg3_readphy(tp, MII_PHYADDR, &reg))
+		val = reg << 16;
+	else
+		val = 0;
+	tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
+
+	tg3_generate_fw_event(tp);
+}
+
+static void tg3_link_report(struct tg3 *tp)
+{
+	if (!netif_carrier_ok(tp->dev)) {
+		netif_info(tp, link, tp->dev, "Link is down\n");
+		tg3_ump_link_report(tp);
+	} else if (netif_msg_link(tp)) {
+		netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
+			    (tp->link_config.active_speed == SPEED_1000 ?
+			     1000 :
+			     (tp->link_config.active_speed == SPEED_100 ?
+			      100 : 10)),
+			    (tp->link_config.active_duplex == DUPLEX_FULL ?
+			     "full" : "half"));
+
+		netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
+			    (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
+			    "on" : "off",
+			    (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
+			    "on" : "off");
+
+		if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
+			netdev_info(tp->dev, "EEE is %s\n",
+				    tp->setlpicnt ? "enabled" : "disabled");
+
+		tg3_ump_link_report(tp);
+	}
+}
+
+static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
+{
+	u16 miireg;
+
+	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
+		miireg = ADVERTISE_PAUSE_CAP;
+	else if (flow_ctrl & FLOW_CTRL_TX)
+		miireg = ADVERTISE_PAUSE_ASYM;
+	else if (flow_ctrl & FLOW_CTRL_RX)
+		miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+	else
+		miireg = 0;
+
+	return miireg;
+}
+
+static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
+{
+	u16 miireg;
+
+	if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
+		miireg = ADVERTISE_1000XPAUSE;
+	else if (flow_ctrl & FLOW_CTRL_TX)
+		miireg = ADVERTISE_1000XPSE_ASYM;
+	else if (flow_ctrl & FLOW_CTRL_RX)
+		miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
+	else
+		miireg = 0;
+
+	return miireg;
+}
+
+static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
+{
+	u8 cap = 0;
+
+	if (lcladv & ADVERTISE_1000XPAUSE) {
+		if (lcladv & ADVERTISE_1000XPSE_ASYM) {
+			if (rmtadv & LPA_1000XPAUSE)
+				cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+			else if (rmtadv & LPA_1000XPAUSE_ASYM)
+				cap = FLOW_CTRL_RX;
+		} else {
+			if (rmtadv & LPA_1000XPAUSE)
+				cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+		}
+	} else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
+		if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
+			cap = FLOW_CTRL_TX;
+	}
+
+	return cap;
+}
+
+static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
+{
+	u8 autoneg;
+	u8 flowctrl = 0;
+	u32 old_rx_mode = tp->rx_mode;
+	u32 old_tx_mode = tp->tx_mode;
+
+	if (tg3_flag(tp, USE_PHYLIB))
+		autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
+	else
+		autoneg = tp->link_config.autoneg;
+
+	if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
+		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+			flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
+		else
+			flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
+	} else
+		flowctrl = tp->link_config.flowctrl;
+
+	tp->link_config.active_flowctrl = flowctrl;
+
+	if (flowctrl & FLOW_CTRL_RX)
+		tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
+	else
+		tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
+
+	if (old_rx_mode != tp->rx_mode)
+		tw32_f(MAC_RX_MODE, tp->rx_mode);
+
+	if (flowctrl & FLOW_CTRL_TX)
+		tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
+	else
+		tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
+
+	if (old_tx_mode != tp->tx_mode)
+		tw32_f(MAC_TX_MODE, tp->tx_mode);
+}
+
+static void tg3_adjust_link(struct net_device *dev)
+{
+	u8 oldflowctrl, linkmesg = 0;
+	u32 mac_mode, lcl_adv, rmt_adv;
+	struct tg3 *tp = netdev_priv(dev);
+	struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+	spin_lock_bh(&tp->lock);
+
+	mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
+				    MAC_MODE_HALF_DUPLEX);
+
+	oldflowctrl = tp->link_config.active_flowctrl;
+
+	if (phydev->link) {
+		lcl_adv = 0;
+		rmt_adv = 0;
+
+		if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
+			mac_mode |= MAC_MODE_PORT_MODE_MII;
+		else if (phydev->speed == SPEED_1000 ||
+			 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
+			mac_mode |= MAC_MODE_PORT_MODE_GMII;
+		else
+			mac_mode |= MAC_MODE_PORT_MODE_MII;
+
+		if (phydev->duplex == DUPLEX_HALF)
+			mac_mode |= MAC_MODE_HALF_DUPLEX;
+		else {
+			lcl_adv = tg3_advert_flowctrl_1000T(
+				  tp->link_config.flowctrl);
+
+			if (phydev->pause)
+				rmt_adv = LPA_PAUSE_CAP;
+			if (phydev->asym_pause)
+				rmt_adv |= LPA_PAUSE_ASYM;
+		}
+
+		tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+	} else
+		mac_mode |= MAC_MODE_PORT_MODE_GMII;
+
+	if (mac_mode != tp->mac_mode) {
+		tp->mac_mode = mac_mode;
+		tw32_f(MAC_MODE, tp->mac_mode);
+		udelay(40);
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+		if (phydev->speed == SPEED_10)
+			tw32(MAC_MI_STAT,
+			     MAC_MI_STAT_10MBPS_MODE |
+			     MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
+		else
+			tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
+	}
+
+	if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
+		tw32(MAC_TX_LENGTHS,
+		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+		      (6 << TX_LENGTHS_IPG_SHIFT) |
+		      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
+	else
+		tw32(MAC_TX_LENGTHS,
+		     ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+		      (6 << TX_LENGTHS_IPG_SHIFT) |
+		      (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
+
+	if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
+	    (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
+	    phydev->speed != tp->link_config.active_speed ||
+	    phydev->duplex != tp->link_config.active_duplex ||
+	    oldflowctrl != tp->link_config.active_flowctrl)
+		linkmesg = 1;
+
+	tp->link_config.active_speed = phydev->speed;
+	tp->link_config.active_duplex = phydev->duplex;
+
+	spin_unlock_bh(&tp->lock);
+
+	if (linkmesg)
+		tg3_link_report(tp);
+}
+
+static int tg3_phy_init(struct tg3 *tp)
+{
+	struct phy_device *phydev;
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
+		return 0;
+
+	/* Bring the PHY back to a known state. */
+	tg3_bmcr_reset(tp);
+
+	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+	/* Attach the MAC to the PHY. */
+	phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
+			     phydev->dev_flags, phydev->interface);
+	if (IS_ERR(phydev)) {
+		dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
+		return PTR_ERR(phydev);
+	}
+
+	/* Mask with MAC supported features. */
+	switch (phydev->interface) {
+	case PHY_INTERFACE_MODE_GMII:
+	case PHY_INTERFACE_MODE_RGMII:
+		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+			phydev->supported &= (PHY_GBIT_FEATURES |
+					      SUPPORTED_Pause |
+					      SUPPORTED_Asym_Pause);
+			break;
+		}
+		/* fallthru */
+	case PHY_INTERFACE_MODE_MII:
+		phydev->supported &= (PHY_BASIC_FEATURES |
+				      SUPPORTED_Pause |
+				      SUPPORTED_Asym_Pause);
+		break;
+	default:
+		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+		return -EINVAL;
+	}
+
+	tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
+
+	phydev->advertising = phydev->supported;
+
+	return 0;
+}
+
+static void tg3_phy_start(struct tg3 *tp)
+{
+	struct phy_device *phydev;
+
+	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+		return;
+
+	phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+		tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
+		phydev->speed = tp->link_config.orig_speed;
+		phydev->duplex = tp->link_config.orig_duplex;
+		phydev->autoneg = tp->link_config.orig_autoneg;
+		phydev->advertising = tp->link_config.orig_advertising;
+	}
+
+	phy_start(phydev);
+
+	phy_start_aneg(phydev);
+}
+
+static void tg3_phy_stop(struct tg3 *tp)
+{
+	if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+		return;
+
+	phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+}
+
+static void tg3_phy_fini(struct tg3 *tp)
+{
+	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
+		phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+		tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
+	}
+}
+
+static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
+{
+	u32 phytest;
+
+	if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
+		u32 phy;
+
+		tg3_writephy(tp, MII_TG3_FET_TEST,
+			     phytest | MII_TG3_FET_SHADOW_EN);
+		if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
+			if (enable)
+				phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
+			else
+				phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
+			tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
+		}
+		tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
+	}
+}
+
+static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
+{
+	u32 reg;
+
+	if (!tg3_flag(tp, 5705_PLUS) ||
+	    (tg3_flag(tp, 5717_PLUS) &&
+	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
+		return;
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+		tg3_phy_fet_toggle_apd(tp, enable);
+		return;
+	}
+
+	reg = MII_TG3_MISC_SHDW_WREN |
+	      MII_TG3_MISC_SHDW_SCR5_SEL |
+	      MII_TG3_MISC_SHDW_SCR5_LPED |
+	      MII_TG3_MISC_SHDW_SCR5_DLPTLM |
+	      MII_TG3_MISC_SHDW_SCR5_SDTL |
+	      MII_TG3_MISC_SHDW_SCR5_C125OE;
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
+		reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
+
+	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+
+
+	reg = MII_TG3_MISC_SHDW_WREN |
+	      MII_TG3_MISC_SHDW_APD_SEL |
+	      MII_TG3_MISC_SHDW_APD_WKTM_84MS;
+	if (enable)
+		reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
+
+	tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
+}
+
+static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
+{
+	u32 phy;
+
+	if (!tg3_flag(tp, 5705_PLUS) ||
+	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+		return;
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+		u32 ephy;
+
+		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
+			u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
+
+			tg3_writephy(tp, MII_TG3_FET_TEST,
+				     ephy | MII_TG3_FET_SHADOW_EN);
+			if (!tg3_readphy(tp, reg, &phy)) {
+				if (enable)
+					phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
+				else
+					phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
+				tg3_writephy(tp, reg, phy);
+			}
+			tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
+		}
+	} else {
+		int ret;
+
+		ret = tg3_phy_auxctl_read(tp,
+					  MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
+		if (!ret) {
+			if (enable)
+				phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
+			else
+				phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
+			tg3_phy_auxctl_write(tp,
+					     MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
+		}
+	}
+}
+
+static void tg3_phy_set_wirespeed(struct tg3 *tp)
+{
+	int ret;
+	u32 val;
+
+	if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
+		return;
+
+	ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
+	if (!ret)
+		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
+				     val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
+}
+
+static void tg3_phy_apply_otp(struct tg3 *tp)
+{
+	u32 otp, phy;
+
+	if (!tp->phy_otp)
+		return;
+
+	otp = tp->phy_otp;
+
+	if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
+		return;
+
+	phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
+	phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
+	tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
+
+	phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
+	      ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
+	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
+
+	phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
+	phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
+	tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
+
+	phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
+	tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
+
+	phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
+	tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
+
+	phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
+	      ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
+	tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
+
+	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+}
+
+static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
+{
+	u32 val;
+
+	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
+		return;
+
+	tp->setlpicnt = 0;
+
+	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
+	    current_link_up == 1 &&
+	    tp->link_config.active_duplex == DUPLEX_FULL &&
+	    (tp->link_config.active_speed == SPEED_100 ||
+	     tp->link_config.active_speed == SPEED_1000)) {
+		u32 eeectl;
+
+		if (tp->link_config.active_speed == SPEED_1000)
+			eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
+		else
+			eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
+
+		tw32(TG3_CPMU_EEE_CTRL, eeectl);
+
+		tg3_phy_cl45_read(tp, MDIO_MMD_AN,
+				  TG3_CL45_D7_EEERES_STAT, &val);
+
+		if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
+		    val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
+			tp->setlpicnt = 2;
+	}
+
+	if (!tp->setlpicnt) {
+		if (current_link_up == 1 &&
+		   !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
+			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+		}
+
+		val = tr32(TG3_CPMU_EEE_MODE);
+		tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
+	}
+}
+
+static void tg3_phy_eee_enable(struct tg3 *tp)
+{
+	u32 val;
+
+	if (tp->link_config.active_speed == SPEED_1000 &&
+	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+		val = MII_TG3_DSP_TAP26_ALNOKO |
+		      MII_TG3_DSP_TAP26_RMRXSTO;
+		tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+	}
+
+	val = tr32(TG3_CPMU_EEE_MODE);
+	tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
+}
+
+static int tg3_wait_macro_done(struct tg3 *tp)
+{
+	int limit = 100;
+
+	while (limit--) {
+		u32 tmp32;
+
+		if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
+			if ((tmp32 & 0x1000) == 0)
+				break;
+		}
+	}
+	if (limit < 0)
+		return -EBUSY;
+
+	return 0;
+}
+
+static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
+{
+	static const u32 test_pat[4][6] = {
+	{ 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
+	{ 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
+	{ 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
+	{ 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
+	};
+	int chan;
+
+	for (chan = 0; chan < 4; chan++) {
+		int i;
+
+		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+			     (chan * 0x2000) | 0x0200);
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
+
+		for (i = 0; i < 6; i++)
+			tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
+				     test_pat[chan][i]);
+
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
+		if (tg3_wait_macro_done(tp)) {
+			*resetp = 1;
+			return -EBUSY;
+		}
+
+		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+			     (chan * 0x2000) | 0x0200);
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
+		if (tg3_wait_macro_done(tp)) {
+			*resetp = 1;
+			return -EBUSY;
+		}
+
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
+		if (tg3_wait_macro_done(tp)) {
+			*resetp = 1;
+			return -EBUSY;
+		}
+
+		for (i = 0; i < 6; i += 2) {
+			u32 low, high;
+
+			if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
+			    tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
+			    tg3_wait_macro_done(tp)) {
+				*resetp = 1;
+				return -EBUSY;
+			}
+			low &= 0x7fff;
+			high &= 0x000f;
+			if (low != test_pat[chan][i] ||
+			    high != test_pat[chan][i+1]) {
+				tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
+				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
+				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
+
+				return -EBUSY;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int tg3_phy_reset_chanpat(struct tg3 *tp)
+{
+	int chan;
+
+	for (chan = 0; chan < 4; chan++) {
+		int i;
+
+		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+			     (chan * 0x2000) | 0x0200);
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
+		for (i = 0; i < 6; i++)
+			tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
+		tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
+		if (tg3_wait_macro_done(tp))
+			return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
+{
+	u32 reg32, phy9_orig;
+	int retries, do_phy_reset, err;
+
+	retries = 10;
+	do_phy_reset = 1;
+	do {
+		if (do_phy_reset) {
+			err = tg3_bmcr_reset(tp);
+			if (err)
+				return err;
+			do_phy_reset = 0;
+		}
+
+		/* Disable transmitter and interrupt.  */
+		if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
+			continue;
+
+		reg32 |= 0x3000;
+		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+
+		/* Set full-duplex, 1000 mbps.  */
+		tg3_writephy(tp, MII_BMCR,
+			     BMCR_FULLDPLX | BMCR_SPEED1000);
+
+		/* Set to master mode.  */
+		if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
+			continue;
+
+		tg3_writephy(tp, MII_CTRL1000,
+			     CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
+
+		err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+		if (err)
+			return err;
+
+		/* Block the PHY control access.  */
+		tg3_phydsp_write(tp, 0x8005, 0x0800);
+
+		err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
+		if (!err)
+			break;
+	} while (--retries);
+
+	err = tg3_phy_reset_chanpat(tp);
+	if (err)
+		return err;
+
+	tg3_phydsp_write(tp, 0x8005, 0x0000);
+
+	tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
+	tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
+
+	TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+
+	tg3_writephy(tp, MII_CTRL1000, phy9_orig);
+
+	if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
+		reg32 &= ~0x3000;
+		tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+	} else if (!err)
+		err = -EBUSY;
+
+	return err;
+}
+
+/* This will reset the tigon3 PHY if there is no valid
+ * link unless the FORCE argument is non-zero.
+ */
+static int tg3_phy_reset(struct tg3 *tp)
+{
+	u32 val, cpmuctrl;
+	int err;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+		val = tr32(GRC_MISC_CFG);
+		tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
+		udelay(40);
+	}
+	err  = tg3_readphy(tp, MII_BMSR, &val);
+	err |= tg3_readphy(tp, MII_BMSR, &val);
+	if (err != 0)
+		return -EBUSY;
+
+	if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
+		netif_carrier_off(tp->dev);
+		tg3_link_report(tp);
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+		err = tg3_phy_reset_5703_4_5(tp);
+		if (err)
+			return err;
+		goto out;
+	}
+
+	cpmuctrl = 0;
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+		cpmuctrl = tr32(TG3_CPMU_CTRL);
+		if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
+			tw32(TG3_CPMU_CTRL,
+			     cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
+	}
+
+	err = tg3_bmcr_reset(tp);
+	if (err)
+		return err;
+
+	if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
+		val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
+		tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
+
+		tw32(TG3_CPMU_CTRL, cpmuctrl);
+	}
+
+	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
+	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
+		if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
+		    CPMU_LSPD_1000MB_MACCLK_12_5) {
+			val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
+			udelay(40);
+			tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
+		}
+	}
+
+	if (tg3_flag(tp, 5717_PLUS) &&
+	    (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
+		return 0;
+
+	tg3_phy_apply_otp(tp);
+
+	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
+		tg3_phy_toggle_apd(tp, true);
+	else
+		tg3_phy_toggle_apd(tp, false);
+
+out:
+	if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
+	    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+		tg3_phydsp_write(tp, 0x201f, 0x2aaa);
+		tg3_phydsp_write(tp, 0x000a, 0x0323);
+		TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+	}
+
+	if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
+		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+	}
+
+	if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
+		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+			tg3_phydsp_write(tp, 0x000a, 0x310b);
+			tg3_phydsp_write(tp, 0x201f, 0x9506);
+			tg3_phydsp_write(tp, 0x401f, 0x14e2);
+			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+		}
+	} else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
+		if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
+			tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
+			if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
+				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
+				tg3_writephy(tp, MII_TG3_TEST1,
+					     MII_TG3_TEST1_TRIM_EN | 0x4);
+			} else
+				tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
+
+			TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+		}
+	}
+
+	/* Set Extended packet length bit (bit 14) on all chips that */
+	/* support jumbo frames */
+	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+		/* Cannot do read-modify-write on 5401 */
+		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
+	} else if (tg3_flag(tp, JUMBO_CAPABLE)) {
+		/* Set bit 14 with read-modify-write to preserve other bits */
+		err = tg3_phy_auxctl_read(tp,
+					  MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
+		if (!err)
+			tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
+					   val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
+	}
+
+	/* Set phy register 0x10 bit 0 to high fifo elasticity to support
+	 * jumbo frames transmission.
+	 */
+	if (tg3_flag(tp, JUMBO_CAPABLE)) {
+		if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
+			tg3_writephy(tp, MII_TG3_EXT_CTRL,
+				     val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+		/* adjust output voltage */
+		tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
+	}
+
+	tg3_phy_toggle_automdix(tp, 1);
+	tg3_phy_set_wirespeed(tp);
+	return 0;
+}
+
+#define TG3_GPIO_MSG_DRVR_PRES		 0x00000001
+#define TG3_GPIO_MSG_NEED_VAUX		 0x00000002
+#define TG3_GPIO_MSG_MASK		 (TG3_GPIO_MSG_DRVR_PRES | \
+					  TG3_GPIO_MSG_NEED_VAUX)
+#define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
+	((TG3_GPIO_MSG_DRVR_PRES << 0) | \
+	 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
+	 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
+	 (TG3_GPIO_MSG_DRVR_PRES << 12))
+
+#define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
+	((TG3_GPIO_MSG_NEED_VAUX << 0) | \
+	 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
+	 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
+	 (TG3_GPIO_MSG_NEED_VAUX << 12))
+
+static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
+{
+	u32 status, shift;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+		status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
+	else
+		status = tr32(TG3_CPMU_DRV_STATUS);
+
+	shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
+	status &= ~(TG3_GPIO_MSG_MASK << shift);
+	status |= (newstat << shift);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+		tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
+	else
+		tw32(TG3_CPMU_DRV_STATUS, status);
+
+	return status >> TG3_APE_GPIO_MSG_SHIFT;
+}
+
+static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
+{
+	if (!tg3_flag(tp, IS_NIC))
+		return 0;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+		if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
+			return -EIO;
+
+		tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
+
+		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+		tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
+	} else {
+		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+	}
+
+	return 0;
+}
+
+static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
+{
+	u32 grc_local_ctrl;
+
+	if (!tg3_flag(tp, IS_NIC) ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
+		return;
+
+	grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
+
+	tw32_wait_f(GRC_LOCAL_CTRL,
+		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
+		    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+	tw32_wait_f(GRC_LOCAL_CTRL,
+		    grc_local_ctrl,
+		    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+	tw32_wait_f(GRC_LOCAL_CTRL,
+		    grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
+		    TG3_GRC_LCLCTL_PWRSW_DELAY);
+}
+
+static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
+{
+	if (!tg3_flag(tp, IS_NIC))
+		return;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+		tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+			    (GRC_LCLCTRL_GPIO_OE0 |
+			     GRC_LCLCTRL_GPIO_OE1 |
+			     GRC_LCLCTRL_GPIO_OE2 |
+			     GRC_LCLCTRL_GPIO_OUTPUT0 |
+			     GRC_LCLCTRL_GPIO_OUTPUT1),
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+	} else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+		   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
+		/* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
+		u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
+				     GRC_LCLCTRL_GPIO_OE1 |
+				     GRC_LCLCTRL_GPIO_OE2 |
+				     GRC_LCLCTRL_GPIO_OUTPUT0 |
+				     GRC_LCLCTRL_GPIO_OUTPUT1 |
+				     tp->grc_local_ctrl;
+		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
+		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+		grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
+		tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+	} else {
+		u32 no_gpio2;
+		u32 grc_local_ctrl = 0;
+
+		/* Workaround to prevent overdrawing Amps. */
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+			grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
+			tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
+				    grc_local_ctrl,
+				    TG3_GRC_LCLCTL_PWRSW_DELAY);
+		}
+
+		/* On 5753 and variants, GPIO2 cannot be used. */
+		no_gpio2 = tp->nic_sram_data_cfg &
+			   NIC_SRAM_DATA_CFG_NO_GPIO2;
+
+		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
+				  GRC_LCLCTRL_GPIO_OE1 |
+				  GRC_LCLCTRL_GPIO_OE2 |
+				  GRC_LCLCTRL_GPIO_OUTPUT1 |
+				  GRC_LCLCTRL_GPIO_OUTPUT2;
+		if (no_gpio2) {
+			grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
+					    GRC_LCLCTRL_GPIO_OUTPUT2);
+		}
+		tw32_wait_f(GRC_LOCAL_CTRL,
+			    tp->grc_local_ctrl | grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+		grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
+
+		tw32_wait_f(GRC_LOCAL_CTRL,
+			    tp->grc_local_ctrl | grc_local_ctrl,
+			    TG3_GRC_LCLCTL_PWRSW_DELAY);
+
+		if (!no_gpio2) {
+			grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
+			tw32_wait_f(GRC_LOCAL_CTRL,
+				    tp->grc_local_ctrl | grc_local_ctrl,
+				    TG3_GRC_LCLCTL_PWRSW_DELAY);
+		}
+	}
+}
+
+static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
+{
+	u32 msg = 0;
+
+	/* Serialize power state transitions */
+	if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
+		return;
+
+	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
+		msg = TG3_GPIO_MSG_NEED_VAUX;
+
+	msg = tg3_set_function_status(tp, msg);
+
+	if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
+		goto done;
+
+	if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
+		tg3_pwrsrc_switch_to_vaux(tp);
+	else
+		tg3_pwrsrc_die_with_vmain(tp);
+
+done:
+	tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
+}
+
+static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
+{
+	bool need_vaux = false;
+
+	/* The GPIOs do something completely different on 57765. */
+	if (!tg3_flag(tp, IS_NIC) ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		return;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+		tg3_frob_aux_power_5717(tp, include_wol ?
+					tg3_flag(tp, WOL_ENABLE) != 0 : 0);
+		return;
+	}
+
+	if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
+		struct net_device *dev_peer;
+
+		dev_peer = pci_get_drvdata(tp->pdev_peer);
+
+		/* remove_one() may have been run on the peer. */
+		if (dev_peer) {
+			struct tg3 *tp_peer = netdev_priv(dev_peer);
+
+			if (tg3_flag(tp_peer, INIT_COMPLETE))
+				return;
+
+			if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
+			    tg3_flag(tp_peer, ENABLE_ASF))
+				need_vaux = true;
+		}
+	}
+
+	if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
+	    tg3_flag(tp, ENABLE_ASF))
+		need_vaux = true;
+
+	if (need_vaux)
+		tg3_pwrsrc_switch_to_vaux(tp);
+	else
+		tg3_pwrsrc_die_with_vmain(tp);
+}
+
+static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
+{
+	if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
+		return 1;
+	else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
+		if (speed != SPEED_10)
+			return 1;
+	} else if (speed == SPEED_10)
+		return 1;
+
+	return 0;
+}
+
+static int tg3_setup_phy(struct tg3 *, int);
+
+#define RESET_KIND_SHUTDOWN	0
+#define RESET_KIND_INIT		1
+#define RESET_KIND_SUSPEND	2
+
+static void tg3_write_sig_post_reset(struct tg3 *, int);
+static int tg3_halt_cpu(struct tg3 *, u32);
+
+static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
+{
+	u32 val;
+
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+			u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
+			u32 serdes_cfg = tr32(MAC_SERDES_CFG);
+
+			sg_dig_ctrl |=
+				SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
+			tw32(SG_DIG_CTRL, sg_dig_ctrl);
+			tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
+		}
+		return;
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+		tg3_bmcr_reset(tp);
+		val = tr32(GRC_MISC_CFG);
+		tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
+		udelay(40);
+		return;
+	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+		u32 phytest;
+		if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
+			u32 phy;
+
+			tg3_writephy(tp, MII_ADVERTISE, 0);
+			tg3_writephy(tp, MII_BMCR,
+				     BMCR_ANENABLE | BMCR_ANRESTART);
+
+			tg3_writephy(tp, MII_TG3_FET_TEST,
+				     phytest | MII_TG3_FET_SHADOW_EN);
+			if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
+				phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
+				tg3_writephy(tp,
+					     MII_TG3_FET_SHDW_AUXMODE4,
+					     phy);
+			}
+			tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
+		}
+		return;
+	} else if (do_low_power) {
+		tg3_writephy(tp, MII_TG3_EXT_CTRL,
+			     MII_TG3_EXT_CTRL_FORCE_LED_OFF);
+
+		val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
+		      MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
+		      MII_TG3_AUXCTL_PCTL_VREG_11V;
+		tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
+	}
+
+	/* The PHY should not be powered down on some chips because
+	 * of bugs.
+	 */
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
+	     (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
+		return;
+
+	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
+	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
+		val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
+		val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
+		val |= CPMU_LSPD_1000MB_MACCLK_12_5;
+		tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
+	}
+
+	tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
+}
+
+/* tp->lock is held. */
+static int tg3_nvram_lock(struct tg3 *tp)
+{
+	if (tg3_flag(tp, NVRAM)) {
+		int i;
+
+		if (tp->nvram_lock_cnt == 0) {
+			tw32(NVRAM_SWARB, SWARB_REQ_SET1);
+			for (i = 0; i < 8000; i++) {
+				if (tr32(NVRAM_SWARB) & SWARB_GNT1)
+					break;
+				udelay(20);
+			}
+			if (i == 8000) {
+				tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
+				return -ENODEV;
+			}
+		}
+		tp->nvram_lock_cnt++;
+	}
+	return 0;
+}
+
+/* tp->lock is held. */
+static void tg3_nvram_unlock(struct tg3 *tp)
+{
+	if (tg3_flag(tp, NVRAM)) {
+		if (tp->nvram_lock_cnt > 0)
+			tp->nvram_lock_cnt--;
+		if (tp->nvram_lock_cnt == 0)
+			tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
+	}
+}
+
+/* tp->lock is held. */
+static void tg3_enable_nvram_access(struct tg3 *tp)
+{
+	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
+		u32 nvaccess = tr32(NVRAM_ACCESS);
+
+		tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
+	}
+}
+
+/* tp->lock is held. */
+static void tg3_disable_nvram_access(struct tg3 *tp)
+{
+	if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
+		u32 nvaccess = tr32(NVRAM_ACCESS);
+
+		tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
+	}
+}
+
+static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
+					u32 offset, u32 *val)
+{
+	u32 tmp;
+	int i;
+
+	if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
+		return -EINVAL;
+
+	tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
+					EEPROM_ADDR_DEVID_MASK |
+					EEPROM_ADDR_READ);
+	tw32(GRC_EEPROM_ADDR,
+	     tmp |
+	     (0 << EEPROM_ADDR_DEVID_SHIFT) |
+	     ((offset << EEPROM_ADDR_ADDR_SHIFT) &
+	      EEPROM_ADDR_ADDR_MASK) |
+	     EEPROM_ADDR_READ | EEPROM_ADDR_START);
+
+	for (i = 0; i < 1000; i++) {
+		tmp = tr32(GRC_EEPROM_ADDR);
+
+		if (tmp & EEPROM_ADDR_COMPLETE)
+			break;
+		msleep(1);
+	}
+	if (!(tmp & EEPROM_ADDR_COMPLETE))
+		return -EBUSY;
+
+	tmp = tr32(GRC_EEPROM_DATA);
+
+	/*
+	 * The data will always be opposite the native endian
+	 * format.  Perform a blind byteswap to compensate.
+	 */
+	*val = swab32(tmp);
+
+	return 0;
+}
+
+#define NVRAM_CMD_TIMEOUT 10000
+
+static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
+{
+	int i;
+
+	tw32(NVRAM_CMD, nvram_cmd);
+	for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
+		udelay(10);
+		if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
+			udelay(10);
+			break;
+		}
+	}
+
+	if (i == NVRAM_CMD_TIMEOUT)
+		return -EBUSY;
+
+	return 0;
+}
+
+static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
+{
+	if (tg3_flag(tp, NVRAM) &&
+	    tg3_flag(tp, NVRAM_BUFFERED) &&
+	    tg3_flag(tp, FLASH) &&
+	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
+	    (tp->nvram_jedecnum == JEDEC_ATMEL))
+
+		addr = ((addr / tp->nvram_pagesize) <<
+			ATMEL_AT45DB0X1B_PAGE_POS) +
+		       (addr % tp->nvram_pagesize);
+
+	return addr;
+}
+
+static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
+{
+	if (tg3_flag(tp, NVRAM) &&
+	    tg3_flag(tp, NVRAM_BUFFERED) &&
+	    tg3_flag(tp, FLASH) &&
+	    !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
+	    (tp->nvram_jedecnum == JEDEC_ATMEL))
+
+		addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
+			tp->nvram_pagesize) +
+		       (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
+
+	return addr;
+}
+
+/* NOTE: Data read in from NVRAM is byteswapped according to
+ * the byteswapping settings for all other register accesses.
+ * tg3 devices are BE devices, so on a BE machine, the data
+ * returned will be exactly as it is seen in NVRAM.  On a LE
+ * machine, the 32-bit value will be byteswapped.
+ */
+static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
+{
+	int ret;
+
+	if (!tg3_flag(tp, NVRAM))
+		return tg3_nvram_read_using_eeprom(tp, offset, val);
+
+	offset = tg3_nvram_phys_addr(tp, offset);
+
+	if (offset > NVRAM_ADDR_MSK)
+		return -EINVAL;
+
+	ret = tg3_nvram_lock(tp);
+	if (ret)
+		return ret;
+
+	tg3_enable_nvram_access(tp);
+
+	tw32(NVRAM_ADDR, offset);
+	ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
+		NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
+
+	if (ret == 0)
+		*val = tr32(NVRAM_RDDATA);
+
+	tg3_disable_nvram_access(tp);
+
+	tg3_nvram_unlock(tp);
+
+	return ret;
+}
+
+/* Ensures NVRAM data is in bytestream format. */
+static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
+{
+	u32 v;
+	int res = tg3_nvram_read(tp, offset, &v);
+	if (!res)
+		*val = cpu_to_be32(v);
+	return res;
+}
+
+/* tp->lock is held. */
+static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
+{
+	u32 addr_high, addr_low;
+	int i;
+
+	addr_high = ((tp->dev->dev_addr[0] << 8) |
+		     tp->dev->dev_addr[1]);
+	addr_low = ((tp->dev->dev_addr[2] << 24) |
+		    (tp->dev->dev_addr[3] << 16) |
+		    (tp->dev->dev_addr[4] <<  8) |
+		    (tp->dev->dev_addr[5] <<  0));
+	for (i = 0; i < 4; i++) {
+		if (i == 1 && skip_mac_1)
+			continue;
+		tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
+		tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+		for (i = 0; i < 12; i++) {
+			tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
+			tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
+		}
+	}
+
+	addr_high = (tp->dev->dev_addr[0] +
+		     tp->dev->dev_addr[1] +
+		     tp->dev->dev_addr[2] +
+		     tp->dev->dev_addr[3] +
+		     tp->dev->dev_addr[4] +
+		     tp->dev->dev_addr[5]) &
+		TX_BACKOFF_SEED_MASK;
+	tw32(MAC_TX_BACKOFF_SEED, addr_high);
+}
+
+static void tg3_enable_register_access(struct tg3 *tp)
+{
+	/*
+	 * Make sure register accesses (indirect or otherwise) will function
+	 * correctly.
+	 */
+	pci_write_config_dword(tp->pdev,
+			       TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
+}
+
+static int tg3_power_up(struct tg3 *tp)
+{
+	int err;
+
+	tg3_enable_register_access(tp);
+
+	err = pci_set_power_state(tp->pdev, PCI_D0);
+	if (!err) {
+		/* Switch out of Vaux if it is a NIC */
+		tg3_pwrsrc_switch_to_vmain(tp);
+	} else {
+		netdev_err(tp->dev, "Transition to D0 failed\n");
+	}
+
+	return err;
+}
+
+static int tg3_power_down_prepare(struct tg3 *tp)
+{
+	u32 misc_host_ctrl;
+	bool device_should_wake, do_low_power;
+
+	tg3_enable_register_access(tp);
+
+	/* Restore the CLKREQ setting. */
+	if (tg3_flag(tp, CLKREQ_BUG)) {
+		u16 lnkctl;
+
+		pci_read_config_word(tp->pdev,
+				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
+				     &lnkctl);
+		lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
+		pci_write_config_word(tp->pdev,
+				      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
+				      lnkctl);
+	}
+
+	misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
+	tw32(TG3PCI_MISC_HOST_CTRL,
+	     misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
+
+	device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
+			     tg3_flag(tp, WOL_ENABLE);
+
+	if (tg3_flag(tp, USE_PHYLIB)) {
+		do_low_power = false;
+		if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
+		    !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+			struct phy_device *phydev;
+			u32 phyid, advertising;
+
+			phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
+
+			tp->link_config.orig_speed = phydev->speed;
+			tp->link_config.orig_duplex = phydev->duplex;
+			tp->link_config.orig_autoneg = phydev->autoneg;
+			tp->link_config.orig_advertising = phydev->advertising;
+
+			advertising = ADVERTISED_TP |
+				      ADVERTISED_Pause |
+				      ADVERTISED_Autoneg |
+				      ADVERTISED_10baseT_Half;
+
+			if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
+				if (tg3_flag(tp, WOL_SPEED_100MB))
+					advertising |=
+						ADVERTISED_100baseT_Half |
+						ADVERTISED_100baseT_Full |
+						ADVERTISED_10baseT_Full;
+				else
+					advertising |= ADVERTISED_10baseT_Full;
+			}
+
+			phydev->advertising = advertising;
+
+			phy_start_aneg(phydev);
+
+			phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
+			if (phyid != PHY_ID_BCMAC131) {
+				phyid &= PHY_BCM_OUI_MASK;
+				if (phyid == PHY_BCM_OUI_1 ||
+				    phyid == PHY_BCM_OUI_2 ||
+				    phyid == PHY_BCM_OUI_3)
+					do_low_power = true;
+			}
+		}
+	} else {
+		do_low_power = true;
+
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+			tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
+			tp->link_config.orig_speed = tp->link_config.speed;
+			tp->link_config.orig_duplex = tp->link_config.duplex;
+			tp->link_config.orig_autoneg = tp->link_config.autoneg;
+		}
+
+		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+			tp->link_config.speed = SPEED_10;
+			tp->link_config.duplex = DUPLEX_HALF;
+			tp->link_config.autoneg = AUTONEG_ENABLE;
+			tg3_setup_phy(tp, 0);
+		}
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+		u32 val;
+
+		val = tr32(GRC_VCPU_EXT_CTRL);
+		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
+	} else if (!tg3_flag(tp, ENABLE_ASF)) {
+		int i;
+		u32 val;
+
+		for (i = 0; i < 200; i++) {
+			tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
+			if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+				break;
+			msleep(1);
+		}
+	}
+	if (tg3_flag(tp, WOL_CAP))
+		tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
+						     WOL_DRV_STATE_SHUTDOWN |
+						     WOL_DRV_WOL |
+						     WOL_SET_MAGIC_PKT);
+
+	if (device_should_wake) {
+		u32 mac_mode;
+
+		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
+			if (do_low_power &&
+			    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
+				tg3_phy_auxctl_write(tp,
+					       MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
+					       MII_TG3_AUXCTL_PCTL_WOL_EN |
+					       MII_TG3_AUXCTL_PCTL_100TX_LPWR |
+					       MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
+				udelay(40);
+			}
+
+			if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+				mac_mode = MAC_MODE_PORT_MODE_GMII;
+			else
+				mac_mode = MAC_MODE_PORT_MODE_MII;
+
+			mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
+			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+			    ASIC_REV_5700) {
+				u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
+					     SPEED_100 : SPEED_10;
+				if (tg3_5700_link_polarity(tp, speed))
+					mac_mode |= MAC_MODE_LINK_POLARITY;
+				else
+					mac_mode &= ~MAC_MODE_LINK_POLARITY;
+			}
+		} else {
+			mac_mode = MAC_MODE_PORT_MODE_TBI;
+		}
+
+		if (!tg3_flag(tp, 5750_PLUS))
+			tw32(MAC_LED_CTRL, tp->led_ctrl);
+
+		mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
+		if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
+		    (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
+			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
+
+		if (tg3_flag(tp, ENABLE_APE))
+			mac_mode |= MAC_MODE_APE_TX_EN |
+				    MAC_MODE_APE_RX_EN |
+				    MAC_MODE_TDE_ENABLE;
+
+		tw32_f(MAC_MODE, mac_mode);
+		udelay(100);
+
+		tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
+		udelay(10);
+	}
+
+	if (!tg3_flag(tp, WOL_SPEED_100MB) &&
+	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+		u32 base_val;
+
+		base_val = tp->pci_clock_ctrl;
+		base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
+			     CLOCK_CTRL_TXCLK_DISABLE);
+
+		tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
+			    CLOCK_CTRL_PWRDOWN_PLL133, 40);
+	} else if (tg3_flag(tp, 5780_CLASS) ||
+		   tg3_flag(tp, CPMU_PRESENT) ||
+		   GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+		/* do nothing */
+	} else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
+		u32 newbits1, newbits2;
+
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+			newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
+				    CLOCK_CTRL_TXCLK_DISABLE |
+				    CLOCK_CTRL_ALTCLK);
+			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
+		} else if (tg3_flag(tp, 5705_PLUS)) {
+			newbits1 = CLOCK_CTRL_625_CORE;
+			newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
+		} else {
+			newbits1 = CLOCK_CTRL_ALTCLK;
+			newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
+		}
+
+		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
+			    40);
+
+		tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
+			    40);
+
+		if (!tg3_flag(tp, 5705_PLUS)) {
+			u32 newbits3;
+
+			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+				newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
+					    CLOCK_CTRL_TXCLK_DISABLE |
+					    CLOCK_CTRL_44MHZ_CORE);
+			} else {
+				newbits3 = CLOCK_CTRL_44MHZ_CORE;
+			}
+
+			tw32_wait_f(TG3PCI_CLOCK_CTRL,
+				    tp->pci_clock_ctrl | newbits3, 40);
+		}
+	}
+
+	if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
+		tg3_power_down_phy(tp, do_low_power);
+
+	tg3_frob_aux_power(tp, true);
+
+	/* Workaround for unstable PLL clock */
+	if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
+	    (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
+		u32 val = tr32(0x7d00);
+
+		val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
+		tw32(0x7d00, val);
+		if (!tg3_flag(tp, ENABLE_ASF)) {
+			int err;
+
+			err = tg3_nvram_lock(tp);
+			tg3_halt_cpu(tp, RX_CPU_BASE);
+			if (!err)
+				tg3_nvram_unlock(tp);
+		}
+	}
+
+	tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
+
+	return 0;
+}
+
+static void tg3_power_down(struct tg3 *tp)
+{
+	tg3_power_down_prepare(tp);
+
+	pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
+	pci_set_power_state(tp->pdev, PCI_D3hot);
+}
+
+static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
+{
+	switch (val & MII_TG3_AUX_STAT_SPDMASK) {
+	case MII_TG3_AUX_STAT_10HALF:
+		*speed = SPEED_10;
+		*duplex = DUPLEX_HALF;
+		break;
+
+	case MII_TG3_AUX_STAT_10FULL:
+		*speed = SPEED_10;
+		*duplex = DUPLEX_FULL;
+		break;
+
+	case MII_TG3_AUX_STAT_100HALF:
+		*speed = SPEED_100;
+		*duplex = DUPLEX_HALF;
+		break;
+
+	case MII_TG3_AUX_STAT_100FULL:
+		*speed = SPEED_100;
+		*duplex = DUPLEX_FULL;
+		break;
+
+	case MII_TG3_AUX_STAT_1000HALF:
+		*speed = SPEED_1000;
+		*duplex = DUPLEX_HALF;
+		break;
+
+	case MII_TG3_AUX_STAT_1000FULL:
+		*speed = SPEED_1000;
+		*duplex = DUPLEX_FULL;
+		break;
+
+	default:
+		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+			*speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
+				 SPEED_10;
+			*duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
+				  DUPLEX_HALF;
+			break;
+		}
+		*speed = SPEED_INVALID;
+		*duplex = DUPLEX_INVALID;
+		break;
+	}
+}
+
+static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
+{
+	int err = 0;
+	u32 val, new_adv;
+
+	new_adv = ADVERTISE_CSMA;
+	if (advertise & ADVERTISED_10baseT_Half)
+		new_adv |= ADVERTISE_10HALF;
+	if (advertise & ADVERTISED_10baseT_Full)
+		new_adv |= ADVERTISE_10FULL;
+	if (advertise & ADVERTISED_100baseT_Half)
+		new_adv |= ADVERTISE_100HALF;
+	if (advertise & ADVERTISED_100baseT_Full)
+		new_adv |= ADVERTISE_100FULL;
+
+	new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
+
+	err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
+	if (err)
+		goto done;
+
+	if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+		goto done;
+
+	new_adv = 0;
+	if (advertise & ADVERTISED_1000baseT_Half)
+		new_adv |= ADVERTISE_1000HALF;
+	if (advertise & ADVERTISED_1000baseT_Full)
+		new_adv |= ADVERTISE_1000FULL;
+
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+		new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
+
+	err = tg3_writephy(tp, MII_CTRL1000, new_adv);
+	if (err)
+		goto done;
+
+	if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
+		goto done;
+
+	tw32(TG3_CPMU_EEE_MODE,
+	     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
+
+	err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
+	if (!err) {
+		u32 err2;
+
+		val = 0;
+		/* Advertise 100-BaseTX EEE ability */
+		if (advertise & ADVERTISED_100baseT_Full)
+			val |= MDIO_AN_EEE_ADV_100TX;
+		/* Advertise 1000-BaseT EEE ability */
+		if (advertise & ADVERTISED_1000baseT_Full)
+			val |= MDIO_AN_EEE_ADV_1000T;
+		err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
+		if (err)
+			val = 0;
+
+		switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
+		case ASIC_REV_5717:
+		case ASIC_REV_57765:
+		case ASIC_REV_5719:
+			/* If we advertised any eee advertisements above... */
+			if (val)
+				val = MII_TG3_DSP_TAP26_ALNOKO |
+				      MII_TG3_DSP_TAP26_RMRXSTO |
+				      MII_TG3_DSP_TAP26_OPCSINPT;
+			tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
+			/* Fall through */
+		case ASIC_REV_5720:
+			if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
+				tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
+						 MII_TG3_DSP_CH34TP2_HIBW01);
+		}
+
+		err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
+		if (!err)
+			err = err2;
+	}
+
+done:
+	return err;
+}
+
+static void tg3_phy_copper_begin(struct tg3 *tp)
+{
+	u32 new_adv;
+	int i;
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+		new_adv = ADVERTISED_10baseT_Half |
+			  ADVERTISED_10baseT_Full;
+		if (tg3_flag(tp, WOL_SPEED_100MB))
+			new_adv |= ADVERTISED_100baseT_Half |
+				   ADVERTISED_100baseT_Full;
+
+		tg3_phy_autoneg_cfg(tp, new_adv,
+				    FLOW_CTRL_TX | FLOW_CTRL_RX);
+	} else if (tp->link_config.speed == SPEED_INVALID) {
+		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+			tp->link_config.advertising &=
+				~(ADVERTISED_1000baseT_Half |
+				  ADVERTISED_1000baseT_Full);
+
+		tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
+				    tp->link_config.flowctrl);
+	} else {
+		/* Asking for a specific link mode. */
+		if (tp->link_config.speed == SPEED_1000) {
+			if (tp->link_config.duplex == DUPLEX_FULL)
+				new_adv = ADVERTISED_1000baseT_Full;
+			else
+				new_adv = ADVERTISED_1000baseT_Half;
+		} else if (tp->link_config.speed == SPEED_100) {
+			if (tp->link_config.duplex == DUPLEX_FULL)
+				new_adv = ADVERTISED_100baseT_Full;
+			else
+				new_adv = ADVERTISED_100baseT_Half;
+		} else {
+			if (tp->link_config.duplex == DUPLEX_FULL)
+				new_adv = ADVERTISED_10baseT_Full;
+			else
+				new_adv = ADVERTISED_10baseT_Half;
+		}
+
+		tg3_phy_autoneg_cfg(tp, new_adv,
+				    tp->link_config.flowctrl);
+	}
+
+	if (tp->link_config.autoneg == AUTONEG_DISABLE &&
+	    tp->link_config.speed != SPEED_INVALID) {
+		u32 bmcr, orig_bmcr;
+
+		tp->link_config.active_speed = tp->link_config.speed;
+		tp->link_config.active_duplex = tp->link_config.duplex;
+
+		bmcr = 0;
+		switch (tp->link_config.speed) {
+		default:
+		case SPEED_10:
+			break;
+
+		case SPEED_100:
+			bmcr |= BMCR_SPEED100;
+			break;
+
+		case SPEED_1000:
+			bmcr |= BMCR_SPEED1000;
+			break;
+		}
+
+		if (tp->link_config.duplex == DUPLEX_FULL)
+			bmcr |= BMCR_FULLDPLX;
+
+		if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
+		    (bmcr != orig_bmcr)) {
+			tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
+			for (i = 0; i < 1500; i++) {
+				u32 tmp;
+
+				udelay(10);
+				if (tg3_readphy(tp, MII_BMSR, &tmp) ||
+				    tg3_readphy(tp, MII_BMSR, &tmp))
+					continue;
+				if (!(tmp & BMSR_LSTATUS)) {
+					udelay(40);
+					break;
+				}
+			}
+			tg3_writephy(tp, MII_BMCR, bmcr);
+			udelay(40);
+		}
+	} else {
+		tg3_writephy(tp, MII_BMCR,
+			     BMCR_ANENABLE | BMCR_ANRESTART);
+	}
+}
+
+static int tg3_init_5401phy_dsp(struct tg3 *tp)
+{
+	int err;
+
+	/* Turn off tap power management. */
+	/* Set Extended packet length bit */
+	err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
+
+	err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
+	err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
+	err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
+	err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
+	err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
+
+	udelay(40);
+
+	return err;
+}
+
+static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
+{
+	u32 adv_reg, all_mask = 0;
+
+	if (mask & ADVERTISED_10baseT_Half)
+		all_mask |= ADVERTISE_10HALF;
+	if (mask & ADVERTISED_10baseT_Full)
+		all_mask |= ADVERTISE_10FULL;
+	if (mask & ADVERTISED_100baseT_Half)
+		all_mask |= ADVERTISE_100HALF;
+	if (mask & ADVERTISED_100baseT_Full)
+		all_mask |= ADVERTISE_100FULL;
+
+	if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
+		return 0;
+
+	if ((adv_reg & all_mask) != all_mask)
+		return 0;
+	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+		u32 tg3_ctrl;
+
+		all_mask = 0;
+		if (mask & ADVERTISED_1000baseT_Half)
+			all_mask |= ADVERTISE_1000HALF;
+		if (mask & ADVERTISED_1000baseT_Full)
+			all_mask |= ADVERTISE_1000FULL;
+
+		if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
+			return 0;
+
+		if ((tg3_ctrl & all_mask) != all_mask)
+			return 0;
+	}
+	return 1;
+}
+
+static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
+{
+	u32 curadv, reqadv;
+
+	if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
+		return 1;
+
+	curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+	reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
+
+	if (tp->link_config.active_duplex == DUPLEX_FULL) {
+		if (curadv != reqadv)
+			return 0;
+
+		if (tg3_flag(tp, PAUSE_AUTONEG))
+			tg3_readphy(tp, MII_LPA, rmtadv);
+	} else {
+		/* Reprogram the advertisement register, even if it
+		 * does not affect the current link.  If the link
+		 * gets renegotiated in the future, we can save an
+		 * additional renegotiation cycle by advertising
+		 * it correctly in the first place.
+		 */
+		if (curadv != reqadv) {
+			*lcladv &= ~(ADVERTISE_PAUSE_CAP |
+				     ADVERTISE_PAUSE_ASYM);
+			tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
+		}
+	}
+
+	return 1;
+}
+
+static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
+{
+	int current_link_up;
+	u32 bmsr, val;
+	u32 lcl_adv, rmt_adv;
+	u16 current_speed;
+	u8 current_duplex;
+	int i, err;
+
+	tw32(MAC_EVENT, 0);
+
+	tw32_f(MAC_STATUS,
+	     (MAC_STATUS_SYNC_CHANGED |
+	      MAC_STATUS_CFG_CHANGED |
+	      MAC_STATUS_MI_COMPLETION |
+	      MAC_STATUS_LNKSTATE_CHANGED));
+	udelay(40);
+
+	if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
+		tw32_f(MAC_MI_MODE,
+		     (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
+		udelay(80);
+	}
+
+	tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
+
+	/* Some third-party PHYs need to be reset on link going
+	 * down.
+	 */
+	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
+	    netif_carrier_ok(tp->dev)) {
+		tg3_readphy(tp, MII_BMSR, &bmsr);
+		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+		    !(bmsr & BMSR_LSTATUS))
+			force_reset = 1;
+	}
+	if (force_reset)
+		tg3_phy_reset(tp);
+
+	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+		tg3_readphy(tp, MII_BMSR, &bmsr);
+		if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
+		    !tg3_flag(tp, INIT_COMPLETE))
+			bmsr = 0;
+
+		if (!(bmsr & BMSR_LSTATUS)) {
+			err = tg3_init_5401phy_dsp(tp);
+			if (err)
+				return err;
+
+			tg3_readphy(tp, MII_BMSR, &bmsr);
+			for (i = 0; i < 1000; i++) {
+				udelay(10);
+				if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+				    (bmsr & BMSR_LSTATUS)) {
+					udelay(40);
+					break;
+				}
+			}
+
+			if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
+			    TG3_PHY_REV_BCM5401_B0 &&
+			    !(bmsr & BMSR_LSTATUS) &&
+			    tp->link_config.active_speed == SPEED_1000) {
+				err = tg3_phy_reset(tp);
+				if (!err)
+					err = tg3_init_5401phy_dsp(tp);
+				if (err)
+					return err;
+			}
+		}
+	} else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+		   tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
+		/* 5701 {A0,B0} CRC bug workaround */
+		tg3_writephy(tp, 0x15, 0x0a75);
+		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
+		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
+		tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
+	}
+
+	/* Clear pending interrupts... */
+	tg3_readphy(tp, MII_TG3_ISTAT, &val);
+	tg3_readphy(tp, MII_TG3_ISTAT, &val);
+
+	if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
+		tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
+	else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
+		tg3_writephy(tp, MII_TG3_IMASK, ~0);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+		if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
+			tg3_writephy(tp, MII_TG3_EXT_CTRL,
+				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
+		else
+			tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
+	}
+
+	current_link_up = 0;
+	current_speed = SPEED_INVALID;
+	current_duplex = DUPLEX_INVALID;
+
+	if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
+		err = tg3_phy_auxctl_read(tp,
+					  MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
+					  &val);
+		if (!err && !(val & (1 << 10))) {
+			tg3_phy_auxctl_write(tp,
+					     MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
+					     val | (1 << 10));
+			goto relink;
+		}
+	}
+
+	bmsr = 0;
+	for (i = 0; i < 100; i++) {
+		tg3_readphy(tp, MII_BMSR, &bmsr);
+		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+		    (bmsr & BMSR_LSTATUS))
+			break;
+		udelay(40);
+	}
+
+	if (bmsr & BMSR_LSTATUS) {
+		u32 aux_stat, bmcr;
+
+		tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
+		for (i = 0; i < 2000; i++) {
+			udelay(10);
+			if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
+			    aux_stat)
+				break;
+		}
+
+		tg3_aux_stat_to_speed_duplex(tp, aux_stat,
+					     &current_speed,
+					     &current_duplex);
+
+		bmcr = 0;
+		for (i = 0; i < 200; i++) {
+			tg3_readphy(tp, MII_BMCR, &bmcr);
+			if (tg3_readphy(tp, MII_BMCR, &bmcr))
+				continue;
+			if (bmcr && bmcr != 0x7fff)
+				break;
+			udelay(10);
+		}
+
+		lcl_adv = 0;
+		rmt_adv = 0;
+
+		tp->link_config.active_speed = current_speed;
+		tp->link_config.active_duplex = current_duplex;
+
+		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+			if ((bmcr & BMCR_ANENABLE) &&
+			    tg3_copper_is_advertising_all(tp,
+						tp->link_config.advertising)) {
+				if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
+								  &rmt_adv))
+					current_link_up = 1;
+			}
+		} else {
+			if (!(bmcr & BMCR_ANENABLE) &&
+			    tp->link_config.speed == current_speed &&
+			    tp->link_config.duplex == current_duplex &&
+			    tp->link_config.flowctrl ==
+			    tp->link_config.active_flowctrl) {
+				current_link_up = 1;
+			}
+		}
+
+		if (current_link_up == 1 &&
+		    tp->link_config.active_duplex == DUPLEX_FULL)
+			tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+	}
+
+relink:
+	if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
+		tg3_phy_copper_begin(tp);
+
+		tg3_readphy(tp, MII_BMSR, &bmsr);
+		if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
+		    (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
+			current_link_up = 1;
+	}
+
+	tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
+	if (current_link_up == 1) {
+		if (tp->link_config.active_speed == SPEED_100 ||
+		    tp->link_config.active_speed == SPEED_10)
+			tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+		else
+			tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+	} else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
+		tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
+	else
+		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+
+	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
+	if (tp->link_config.active_duplex == DUPLEX_HALF)
+		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+		if (current_link_up == 1 &&
+		    tg3_5700_link_polarity(tp, tp->link_config.active_speed))
+			tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+		else
+			tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
+	}
+
+	/* ??? Without this setting Netgear GA302T PHY does not
+	 * ??? send/receive packets...
+	 */
+	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
+	    tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
+		tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
+		tw32_f(MAC_MI_MODE, tp->mi_mode);
+		udelay(80);
+	}
+
+	tw32_f(MAC_MODE, tp->mac_mode);
+	udelay(40);
+
+	tg3_phy_eee_adjust(tp, current_link_up);
+
+	if (tg3_flag(tp, USE_LINKCHG_REG)) {
+		/* Polled via timer. */
+		tw32_f(MAC_EVENT, 0);
+	} else {
+		tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+	}
+	udelay(40);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
+	    current_link_up == 1 &&
+	    tp->link_config.active_speed == SPEED_1000 &&
+	    (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
+		udelay(120);
+		tw32_f(MAC_STATUS,
+		     (MAC_STATUS_SYNC_CHANGED |
+		      MAC_STATUS_CFG_CHANGED));
+		udelay(40);
+		tg3_write_mem(tp,
+			      NIC_SRAM_FIRMWARE_MBOX,
+			      NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
+	}
+
+	/* Prevent send BD corruption. */
+	if (tg3_flag(tp, CLKREQ_BUG)) {
+		u16 oldlnkctl, newlnkctl;
+
+		pci_read_config_word(tp->pdev,
+				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
+				     &oldlnkctl);
+		if (tp->link_config.active_speed == SPEED_100 ||
+		    tp->link_config.active_speed == SPEED_10)
+			newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
+		else
+			newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
+		if (newlnkctl != oldlnkctl)
+			pci_write_config_word(tp->pdev,
+					      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
+					      newlnkctl);
+	}
+
+	if (current_link_up != netif_carrier_ok(tp->dev)) {
+		if (current_link_up)
+			netif_carrier_on(tp->dev);
+		else
+			netif_carrier_off(tp->dev);
+		tg3_link_report(tp);
+	}
+
+	return 0;
+}
+
+struct tg3_fiber_aneginfo {
+	int state;
+#define ANEG_STATE_UNKNOWN		0
+#define ANEG_STATE_AN_ENABLE		1
+#define ANEG_STATE_RESTART_INIT		2
+#define ANEG_STATE_RESTART		3
+#define ANEG_STATE_DISABLE_LINK_OK	4
+#define ANEG_STATE_ABILITY_DETECT_INIT	5
+#define ANEG_STATE_ABILITY_DETECT	6
+#define ANEG_STATE_ACK_DETECT_INIT	7
+#define ANEG_STATE_ACK_DETECT		8
+#define ANEG_STATE_COMPLETE_ACK_INIT	9
+#define ANEG_STATE_COMPLETE_ACK		10
+#define ANEG_STATE_IDLE_DETECT_INIT	11
+#define ANEG_STATE_IDLE_DETECT		12
+#define ANEG_STATE_LINK_OK		13
+#define ANEG_STATE_NEXT_PAGE_WAIT_INIT	14
+#define ANEG_STATE_NEXT_PAGE_WAIT	15
+
+	u32 flags;
+#define MR_AN_ENABLE		0x00000001
+#define MR_RESTART_AN		0x00000002
+#define MR_AN_COMPLETE		0x00000004
+#define MR_PAGE_RX		0x00000008
+#define MR_NP_LOADED		0x00000010
+#define MR_TOGGLE_TX		0x00000020
+#define MR_LP_ADV_FULL_DUPLEX	0x00000040
+#define MR_LP_ADV_HALF_DUPLEX	0x00000080
+#define MR_LP_ADV_SYM_PAUSE	0x00000100
+#define MR_LP_ADV_ASYM_PAUSE	0x00000200
+#define MR_LP_ADV_REMOTE_FAULT1	0x00000400
+#define MR_LP_ADV_REMOTE_FAULT2	0x00000800
+#define MR_LP_ADV_NEXT_PAGE	0x00001000
+#define MR_TOGGLE_RX		0x00002000
+#define MR_NP_RX		0x00004000
+
+#define MR_LINK_OK		0x80000000
+
+	unsigned long link_time, cur_time;
+
+	u32 ability_match_cfg;
+	int ability_match_count;
+
+	char ability_match, idle_match, ack_match;
+
+	u32 txconfig, rxconfig;
+#define ANEG_CFG_NP		0x00000080
+#define ANEG_CFG_ACK		0x00000040
+#define ANEG_CFG_RF2		0x00000020
+#define ANEG_CFG_RF1		0x00000010
+#define ANEG_CFG_PS2		0x00000001
+#define ANEG_CFG_PS1		0x00008000
+#define ANEG_CFG_HD		0x00004000
+#define ANEG_CFG_FD		0x00002000
+#define ANEG_CFG_INVAL		0x00001f06
+
+};
+#define ANEG_OK		0
+#define ANEG_DONE	1
+#define ANEG_TIMER_ENAB	2
+#define ANEG_FAILED	-1
+
+#define ANEG_STATE_SETTLE_TIME	10000
+
+static int tg3_fiber_aneg_smachine(struct tg3 *tp,
+				   struct tg3_fiber_aneginfo *ap)
+{
+	u16 flowctrl;
+	unsigned long delta;
+	u32 rx_cfg_reg;
+	int ret;
+
+	if (ap->state == ANEG_STATE_UNKNOWN) {
+		ap->rxconfig = 0;
+		ap->link_time = 0;
+		ap->cur_time = 0;
+		ap->ability_match_cfg = 0;
+		ap->ability_match_count = 0;
+		ap->ability_match = 0;
+		ap->idle_match = 0;
+		ap->ack_match = 0;
+	}
+	ap->cur_time++;
+
+	if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
+		rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
+
+		if (rx_cfg_reg != ap->ability_match_cfg) {
+			ap->ability_match_cfg = rx_cfg_reg;
+			ap->ability_match = 0;
+			ap->ability_match_count = 0;
+		} else {
+			if (++ap->ability_match_count > 1) {
+				ap->ability_match = 1;
+				ap->ability_match_cfg = rx_cfg_reg;
+			}
+		}
+		if (rx_cfg_reg & ANEG_CFG_ACK)
+			ap->ack_match = 1;
+		else
+			ap->ack_match = 0;
+
+		ap->idle_match = 0;
+	} else {
+		ap->idle_match = 1;
+		ap->ability_match_cfg = 0;
+		ap->ability_match_count = 0;
+		ap->ability_match = 0;
+		ap->ack_match = 0;
+
+		rx_cfg_reg = 0;
+	}
+
+	ap->rxconfig = rx_cfg_reg;
+	ret = ANEG_OK;
+
+	switch (ap->state) {
+	case ANEG_STATE_UNKNOWN:
+		if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
+			ap->state = ANEG_STATE_AN_ENABLE;
+
+		/* fallthru */
+	case ANEG_STATE_AN_ENABLE:
+		ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
+		if (ap->flags & MR_AN_ENABLE) {
+			ap->link_time = 0;
+			ap->cur_time = 0;
+			ap->ability_match_cfg = 0;
+			ap->ability_match_count = 0;
+			ap->ability_match = 0;
+			ap->idle_match = 0;
+			ap->ack_match = 0;
+
+			ap->state = ANEG_STATE_RESTART_INIT;
+		} else {
+			ap->state = ANEG_STATE_DISABLE_LINK_OK;
+		}
+		break;
+
+	case ANEG_STATE_RESTART_INIT:
+		ap->link_time = ap->cur_time;
+		ap->flags &= ~(MR_NP_LOADED);
+		ap->txconfig = 0;
+		tw32(MAC_TX_AUTO_NEG, 0);
+		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
+		tw32_f(MAC_MODE, tp->mac_mode);
+		udelay(40);
+
+		ret = ANEG_TIMER_ENAB;
+		ap->state = ANEG_STATE_RESTART;
+
+		/* fallthru */
+	case ANEG_STATE_RESTART:
+		delta = ap->cur_time - ap->link_time;
+		if (delta > ANEG_STATE_SETTLE_TIME)
+			ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
+		else
+			ret = ANEG_TIMER_ENAB;
+		break;
+
+	case ANEG_STATE_DISABLE_LINK_OK:
+		ret = ANEG_DONE;
+		break;
+
+	case ANEG_STATE_ABILITY_DETECT_INIT:
+		ap->flags &= ~(MR_TOGGLE_TX);
+		ap->txconfig = ANEG_CFG_FD;
+		flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+		if (flowctrl & ADVERTISE_1000XPAUSE)
+			ap->txconfig |= ANEG_CFG_PS1;
+		if (flowctrl & ADVERTISE_1000XPSE_ASYM)
+			ap->txconfig |= ANEG_CFG_PS2;
+		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
+		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
+		tw32_f(MAC_MODE, tp->mac_mode);
+		udelay(40);
+
+		ap->state = ANEG_STATE_ABILITY_DETECT;
+		break;
+
+	case ANEG_STATE_ABILITY_DETECT:
+		if (ap->ability_match != 0 && ap->rxconfig != 0)
+			ap->state = ANEG_STATE_ACK_DETECT_INIT;
+		break;
+
+	case ANEG_STATE_ACK_DETECT_INIT:
+		ap->txconfig |= ANEG_CFG_ACK;
+		tw32(MAC_TX_AUTO_NEG, ap->txconfig);
+		tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
+		tw32_f(MAC_MODE, tp->mac_mode);
+		udelay(40);
+
+		ap->state = ANEG_STATE_ACK_DETECT;
+
+		/* fallthru */
+	case ANEG_STATE_ACK_DETECT:
+		if (ap->ack_match != 0) {
+			if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
+			    (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
+				ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
+			} else {
+				ap->state = ANEG_STATE_AN_ENABLE;
+			}
+		} else if (ap->ability_match != 0 &&
+			   ap->rxconfig == 0) {
+			ap->state = ANEG_STATE_AN_ENABLE;
+		}
+		break;
+
+	case ANEG_STATE_COMPLETE_ACK_INIT:
+		if (ap->rxconfig & ANEG_CFG_INVAL) {
+			ret = ANEG_FAILED;
+			break;
+		}
+		ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
+			       MR_LP_ADV_HALF_DUPLEX |
+			       MR_LP_ADV_SYM_PAUSE |
+			       MR_LP_ADV_ASYM_PAUSE |
+			       MR_LP_ADV_REMOTE_FAULT1 |
+			       MR_LP_ADV_REMOTE_FAULT2 |
+			       MR_LP_ADV_NEXT_PAGE |
+			       MR_TOGGLE_RX |
+			       MR_NP_RX);
+		if (ap->rxconfig & ANEG_CFG_FD)
+			ap->flags |= MR_LP_ADV_FULL_DUPLEX;
+		if (ap->rxconfig & ANEG_CFG_HD)
+			ap->flags |= MR_LP_ADV_HALF_DUPLEX;
+		if (ap->rxconfig & ANEG_CFG_PS1)
+			ap->flags |= MR_LP_ADV_SYM_PAUSE;
+		if (ap->rxconfig & ANEG_CFG_PS2)
+			ap->flags |= MR_LP_ADV_ASYM_PAUSE;
+		if (ap->rxconfig & ANEG_CFG_RF1)
+			ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
+		if (ap->rxconfig & ANEG_CFG_RF2)
+			ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
+		if (ap->rxconfig & ANEG_CFG_NP)
+			ap->flags |= MR_LP_ADV_NEXT_PAGE;
+
+		ap->link_time = ap->cur_time;
+
+		ap->flags ^= (MR_TOGGLE_TX);
+		if (ap->rxconfig & 0x0008)
+			ap->flags |= MR_TOGGLE_RX;
+		if (ap->rxconfig & ANEG_CFG_NP)
+			ap->flags |= MR_NP_RX;
+		ap->flags |= MR_PAGE_RX;
+
+		ap->state = ANEG_STATE_COMPLETE_ACK;
+		ret = ANEG_TIMER_ENAB;
+		break;
+
+	case ANEG_STATE_COMPLETE_ACK:
+		if (ap->ability_match != 0 &&
+		    ap->rxconfig == 0) {
+			ap->state = ANEG_STATE_AN_ENABLE;
+			break;
+		}
+		delta = ap->cur_time - ap->link_time;
+		if (delta > ANEG_STATE_SETTLE_TIME) {
+			if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
+				ap->state = ANEG_STATE_IDLE_DETECT_INIT;
+			} else {
+				if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
+				    !(ap->flags & MR_NP_RX)) {
+					ap->state = ANEG_STATE_IDLE_DETECT_INIT;
+				} else {
+					ret = ANEG_FAILED;
+				}
+			}
+		}
+		break;
+
+	case ANEG_STATE_IDLE_DETECT_INIT:
+		ap->link_time = ap->cur_time;
+		tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
+		tw32_f(MAC_MODE, tp->mac_mode);
+		udelay(40);
+
+		ap->state = ANEG_STATE_IDLE_DETECT;
+		ret = ANEG_TIMER_ENAB;
+		break;
+
+	case ANEG_STATE_IDLE_DETECT:
+		if (ap->ability_match != 0 &&
+		    ap->rxconfig == 0) {
+			ap->state = ANEG_STATE_AN_ENABLE;
+			break;
+		}
+		delta = ap->cur_time - ap->link_time;
+		if (delta > ANEG_STATE_SETTLE_TIME) {
+			/* XXX another gem from the Broadcom driver :( */
+			ap->state = ANEG_STATE_LINK_OK;
+		}
+		break;
+
+	case ANEG_STATE_LINK_OK:
+		ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
+		ret = ANEG_DONE;
+		break;
+
+	case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
+		/* ??? unimplemented */
+		break;
+
+	case ANEG_STATE_NEXT_PAGE_WAIT:
+		/* ??? unimplemented */
+		break;
+
+	default:
+		ret = ANEG_FAILED;
+		break;
+	}
+
+	return ret;
+}
+
+static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
+{
+	int res = 0;
+	struct tg3_fiber_aneginfo aninfo;
+	int status = ANEG_FAILED;
+	unsigned int tick;
+	u32 tmp;
+
+	tw32_f(MAC_TX_AUTO_NEG, 0);
+
+	tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
+	tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
+	udelay(40);
+
+	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
+	udelay(40);
+
+	memset(&aninfo, 0, sizeof(aninfo));
+	aninfo.flags |= MR_AN_ENABLE;
+	aninfo.state = ANEG_STATE_UNKNOWN;
+	aninfo.cur_time = 0;
+	tick = 0;
+	while (++tick < 195000) {
+		status = tg3_fiber_aneg_smachine(tp, &aninfo);
+		if (status == ANEG_DONE || status == ANEG_FAILED)
+			break;
+
+		udelay(1);
+	}
+
+	tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
+	tw32_f(MAC_MODE, tp->mac_mode);
+	udelay(40);
+
+	*txflags = aninfo.txconfig;
+	*rxflags = aninfo.flags;
+
+	if (status == ANEG_DONE &&
+	    (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
+			     MR_LP_ADV_FULL_DUPLEX)))
+		res = 1;
+
+	return res;
+}
+
+static void tg3_init_bcm8002(struct tg3 *tp)
+{
+	u32 mac_status = tr32(MAC_STATUS);
+	int i;
+
+	/* Reset when initting first time or we have a link. */
+	if (tg3_flag(tp, INIT_COMPLETE) &&
+	    !(mac_status & MAC_STATUS_PCS_SYNCED))
+		return;
+
+	/* Set PLL lock range. */
+	tg3_writephy(tp, 0x16, 0x8007);
+
+	/* SW reset */
+	tg3_writephy(tp, MII_BMCR, BMCR_RESET);
+
+	/* Wait for reset to complete. */
+	/* XXX schedule_timeout() ... */
+	for (i = 0; i < 500; i++)
+		udelay(10);
+
+	/* Config mode; select PMA/Ch 1 regs. */
+	tg3_writephy(tp, 0x10, 0x8411);
+
+	/* Enable auto-lock and comdet, select txclk for tx. */
+	tg3_writephy(tp, 0x11, 0x0a10);
+
+	tg3_writephy(tp, 0x18, 0x00a0);
+	tg3_writephy(tp, 0x16, 0x41ff);
+
+	/* Assert and deassert POR. */
+	tg3_writephy(tp, 0x13, 0x0400);
+	udelay(40);
+	tg3_writephy(tp, 0x13, 0x0000);
+
+	tg3_writephy(tp, 0x11, 0x0a50);
+	udelay(40);
+	tg3_writephy(tp, 0x11, 0x0a10);
+
+	/* Wait for signal to stabilize */
+	/* XXX schedule_timeout() ... */
+	for (i = 0; i < 15000; i++)
+		udelay(10);
+
+	/* Deselect the channel register so we can read the PHYID
+	 * later.
+	 */
+	tg3_writephy(tp, 0x10, 0x8011);
+}
+
+static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
+{
+	u16 flowctrl;
+	u32 sg_dig_ctrl, sg_dig_status;
+	u32 serdes_cfg, expected_sg_dig_ctrl;
+	int workaround, port_a;
+	int current_link_up;
+
+	serdes_cfg = 0;
+	expected_sg_dig_ctrl = 0;
+	workaround = 0;
+	port_a = 1;
+	current_link_up = 0;
+
+	if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
+	    tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
+		workaround = 1;
+		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
+			port_a = 0;
+
+		/* preserve bits 0-11,13,14 for signal pre-emphasis */
+		/* preserve bits 20-23 for voltage regulator */
+		serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
+	}
+
+	sg_dig_ctrl = tr32(SG_DIG_CTRL);
+
+	if (tp->link_config.autoneg != AUTONEG_ENABLE) {
+		if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
+			if (workaround) {
+				u32 val = serdes_cfg;
+
+				if (port_a)
+					val |= 0xc010000;
+				else
+					val |= 0x4010000;
+				tw32_f(MAC_SERDES_CFG, val);
+			}
+
+			tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
+		}
+		if (mac_status & MAC_STATUS_PCS_SYNCED) {
+			tg3_setup_flow_control(tp, 0, 0);
+			current_link_up = 1;
+		}
+		goto out;
+	}
+
+	/* Want auto-negotiation.  */
+	expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
+
+	flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+	if (flowctrl & ADVERTISE_1000XPAUSE)
+		expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
+	if (flowctrl & ADVERTISE_1000XPSE_ASYM)
+		expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
+
+	if (sg_dig_ctrl != expected_sg_dig_ctrl) {
+		if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
+		    tp->serdes_counter &&
+		    ((mac_status & (MAC_STATUS_PCS_SYNCED |
+				    MAC_STATUS_RCVD_CFG)) ==
+		     MAC_STATUS_PCS_SYNCED)) {
+			tp->serdes_counter--;
+			current_link_up = 1;
+			goto out;
+		}
+restart_autoneg:
+		if (workaround)
+			tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
+		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
+		udelay(5);
+		tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
+
+		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
+		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+	} else if (mac_status & (MAC_STATUS_PCS_SYNCED |
+				 MAC_STATUS_SIGNAL_DET)) {
+		sg_dig_status = tr32(SG_DIG_STATUS);
+		mac_status = tr32(MAC_STATUS);
+
+		if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
+		    (mac_status & MAC_STATUS_PCS_SYNCED)) {
+			u32 local_adv = 0, remote_adv = 0;
+
+			if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
+				local_adv |= ADVERTISE_1000XPAUSE;
+			if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
+				local_adv |= ADVERTISE_1000XPSE_ASYM;
+
+			if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
+				remote_adv |= LPA_1000XPAUSE;
+			if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
+				remote_adv |= LPA_1000XPAUSE_ASYM;
+
+			tg3_setup_flow_control(tp, local_adv, remote_adv);
+			current_link_up = 1;
+			tp->serdes_counter = 0;
+			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+		} else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
+			if (tp->serdes_counter)
+				tp->serdes_counter--;
+			else {
+				if (workaround) {
+					u32 val = serdes_cfg;
+
+					if (port_a)
+						val |= 0xc010000;
+					else
+						val |= 0x4010000;
+
+					tw32_f(MAC_SERDES_CFG, val);
+				}
+
+				tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
+				udelay(40);
+
+				/* Link parallel detection - link is up */
+				/* only if we have PCS_SYNC and not */
+				/* receiving config code words */
+				mac_status = tr32(MAC_STATUS);
+				if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
+				    !(mac_status & MAC_STATUS_RCVD_CFG)) {
+					tg3_setup_flow_control(tp, 0, 0);
+					current_link_up = 1;
+					tp->phy_flags |=
+						TG3_PHYFLG_PARALLEL_DETECT;
+					tp->serdes_counter =
+						SERDES_PARALLEL_DET_TIMEOUT;
+				} else
+					goto restart_autoneg;
+			}
+		}
+	} else {
+		tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
+		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+	}
+
+out:
+	return current_link_up;
+}
+
+static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
+{
+	int current_link_up = 0;
+
+	if (!(mac_status & MAC_STATUS_PCS_SYNCED))
+		goto out;
+
+	if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+		u32 txflags, rxflags;
+		int i;
+
+		if (fiber_autoneg(tp, &txflags, &rxflags)) {
+			u32 local_adv = 0, remote_adv = 0;
+
+			if (txflags & ANEG_CFG_PS1)
+				local_adv |= ADVERTISE_1000XPAUSE;
+			if (txflags & ANEG_CFG_PS2)
+				local_adv |= ADVERTISE_1000XPSE_ASYM;
+
+			if (rxflags & MR_LP_ADV_SYM_PAUSE)
+				remote_adv |= LPA_1000XPAUSE;
+			if (rxflags & MR_LP_ADV_ASYM_PAUSE)
+				remote_adv |= LPA_1000XPAUSE_ASYM;
+
+			tg3_setup_flow_control(tp, local_adv, remote_adv);
+
+			current_link_up = 1;
+		}
+		for (i = 0; i < 30; i++) {
+			udelay(20);
+			tw32_f(MAC_STATUS,
+			       (MAC_STATUS_SYNC_CHANGED |
+				MAC_STATUS_CFG_CHANGED));
+			udelay(40);
+			if ((tr32(MAC_STATUS) &
+			     (MAC_STATUS_SYNC_CHANGED |
+			      MAC_STATUS_CFG_CHANGED)) == 0)
+				break;
+		}
+
+		mac_status = tr32(MAC_STATUS);
+		if (current_link_up == 0 &&
+		    (mac_status & MAC_STATUS_PCS_SYNCED) &&
+		    !(mac_status & MAC_STATUS_RCVD_CFG))
+			current_link_up = 1;
+	} else {
+		tg3_setup_flow_control(tp, 0, 0);
+
+		/* Forcing 1000FD link up. */
+		current_link_up = 1;
+
+		tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
+		udelay(40);
+
+		tw32_f(MAC_MODE, tp->mac_mode);
+		udelay(40);
+	}
+
+out:
+	return current_link_up;
+}
+
+static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
+{
+	u32 orig_pause_cfg;
+	u16 orig_active_speed;
+	u8 orig_active_duplex;
+	u32 mac_status;
+	int current_link_up;
+	int i;
+
+	orig_pause_cfg = tp->link_config.active_flowctrl;
+	orig_active_speed = tp->link_config.active_speed;
+	orig_active_duplex = tp->link_config.active_duplex;
+
+	if (!tg3_flag(tp, HW_AUTONEG) &&
+	    netif_carrier_ok(tp->dev) &&
+	    tg3_flag(tp, INIT_COMPLETE)) {
+		mac_status = tr32(MAC_STATUS);
+		mac_status &= (MAC_STATUS_PCS_SYNCED |
+			       MAC_STATUS_SIGNAL_DET |
+			       MAC_STATUS_CFG_CHANGED |
+			       MAC_STATUS_RCVD_CFG);
+		if (mac_status == (MAC_STATUS_PCS_SYNCED |
+				   MAC_STATUS_SIGNAL_DET)) {
+			tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
+					    MAC_STATUS_CFG_CHANGED));
+			return 0;
+		}
+	}
+
+	tw32_f(MAC_TX_AUTO_NEG, 0);
+
+	tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+	tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
+	tw32_f(MAC_MODE, tp->mac_mode);
+	udelay(40);
+
+	if (tp->phy_id == TG3_PHY_ID_BCM8002)
+		tg3_init_bcm8002(tp);
+
+	/* Enable link change event even when serdes polling.  */
+	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+	udelay(40);
+
+	current_link_up = 0;
+	mac_status = tr32(MAC_STATUS);
+
+	if (tg3_flag(tp, HW_AUTONEG))
+		current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
+	else
+		current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
+
+	tp->napi[0].hw_status->status =
+		(SD_STATUS_UPDATED |
+		 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
+
+	for (i = 0; i < 100; i++) {
+		tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
+				    MAC_STATUS_CFG_CHANGED));
+		udelay(5);
+		if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
+					 MAC_STATUS_CFG_CHANGED |
+					 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
+			break;
+	}
+
+	mac_status = tr32(MAC_STATUS);
+	if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
+		current_link_up = 0;
+		if (tp->link_config.autoneg == AUTONEG_ENABLE &&
+		    tp->serdes_counter == 0) {
+			tw32_f(MAC_MODE, (tp->mac_mode |
+					  MAC_MODE_SEND_CONFIGS));
+			udelay(1);
+			tw32_f(MAC_MODE, tp->mac_mode);
+		}
+	}
+
+	if (current_link_up == 1) {
+		tp->link_config.active_speed = SPEED_1000;
+		tp->link_config.active_duplex = DUPLEX_FULL;
+		tw32(MAC_LED_CTRL, (tp->led_ctrl |
+				    LED_CTRL_LNKLED_OVERRIDE |
+				    LED_CTRL_1000MBPS_ON));
+	} else {
+		tp->link_config.active_speed = SPEED_INVALID;
+		tp->link_config.active_duplex = DUPLEX_INVALID;
+		tw32(MAC_LED_CTRL, (tp->led_ctrl |
+				    LED_CTRL_LNKLED_OVERRIDE |
+				    LED_CTRL_TRAFFIC_OVERRIDE));
+	}
+
+	if (current_link_up != netif_carrier_ok(tp->dev)) {
+		if (current_link_up)
+			netif_carrier_on(tp->dev);
+		else
+			netif_carrier_off(tp->dev);
+		tg3_link_report(tp);
+	} else {
+		u32 now_pause_cfg = tp->link_config.active_flowctrl;
+		if (orig_pause_cfg != now_pause_cfg ||
+		    orig_active_speed != tp->link_config.active_speed ||
+		    orig_active_duplex != tp->link_config.active_duplex)
+			tg3_link_report(tp);
+	}
+
+	return 0;
+}
+
+static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
+{
+	int current_link_up, err = 0;
+	u32 bmsr, bmcr;
+	u16 current_speed;
+	u8 current_duplex;
+	u32 local_adv, remote_adv;
+
+	tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+	tw32_f(MAC_MODE, tp->mac_mode);
+	udelay(40);
+
+	tw32(MAC_EVENT, 0);
+
+	tw32_f(MAC_STATUS,
+	     (MAC_STATUS_SYNC_CHANGED |
+	      MAC_STATUS_CFG_CHANGED |
+	      MAC_STATUS_MI_COMPLETION |
+	      MAC_STATUS_LNKSTATE_CHANGED));
+	udelay(40);
+
+	if (force_reset)
+		tg3_phy_reset(tp);
+
+	current_link_up = 0;
+	current_speed = SPEED_INVALID;
+	current_duplex = DUPLEX_INVALID;
+
+	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+	err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+		if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+			bmsr |= BMSR_LSTATUS;
+		else
+			bmsr &= ~BMSR_LSTATUS;
+	}
+
+	err |= tg3_readphy(tp, MII_BMCR, &bmcr);
+
+	if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
+	    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
+		/* do nothing, just check for link up at the end */
+	} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
+		u32 adv, new_adv;
+
+		err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
+		new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
+				  ADVERTISE_1000XPAUSE |
+				  ADVERTISE_1000XPSE_ASYM |
+				  ADVERTISE_SLCT);
+
+		new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+
+		if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
+			new_adv |= ADVERTISE_1000XHALF;
+		if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
+			new_adv |= ADVERTISE_1000XFULL;
+
+		if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
+			tg3_writephy(tp, MII_ADVERTISE, new_adv);
+			bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
+			tg3_writephy(tp, MII_BMCR, bmcr);
+
+			tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+			tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
+			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+
+			return err;
+		}
+	} else {
+		u32 new_bmcr;
+
+		bmcr &= ~BMCR_SPEED1000;
+		new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
+
+		if (tp->link_config.duplex == DUPLEX_FULL)
+			new_bmcr |= BMCR_FULLDPLX;
+
+		if (new_bmcr != bmcr) {
+			/* BMCR_SPEED1000 is a reserved bit that needs
+			 * to be set on write.
+			 */
+			new_bmcr |= BMCR_SPEED1000;
+
+			/* Force a linkdown */
+			if (netif_carrier_ok(tp->dev)) {
+				u32 adv;
+
+				err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
+				adv &= ~(ADVERTISE_1000XFULL |
+					 ADVERTISE_1000XHALF |
+					 ADVERTISE_SLCT);
+				tg3_writephy(tp, MII_ADVERTISE, adv);
+				tg3_writephy(tp, MII_BMCR, bmcr |
+							   BMCR_ANRESTART |
+							   BMCR_ANENABLE);
+				udelay(10);
+				netif_carrier_off(tp->dev);
+			}
+			tg3_writephy(tp, MII_BMCR, new_bmcr);
+			bmcr = new_bmcr;
+			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+			err |= tg3_readphy(tp, MII_BMSR, &bmsr);
+			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+			    ASIC_REV_5714) {
+				if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+					bmsr |= BMSR_LSTATUS;
+				else
+					bmsr &= ~BMSR_LSTATUS;
+			}
+			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+		}
+	}
+
+	if (bmsr & BMSR_LSTATUS) {
+		current_speed = SPEED_1000;
+		current_link_up = 1;
+		if (bmcr & BMCR_FULLDPLX)
+			current_duplex = DUPLEX_FULL;
+		else
+			current_duplex = DUPLEX_HALF;
+
+		local_adv = 0;
+		remote_adv = 0;
+
+		if (bmcr & BMCR_ANENABLE) {
+			u32 common;
+
+			err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
+			err |= tg3_readphy(tp, MII_LPA, &remote_adv);
+			common = local_adv & remote_adv;
+			if (common & (ADVERTISE_1000XHALF |
+				      ADVERTISE_1000XFULL)) {
+				if (common & ADVERTISE_1000XFULL)
+					current_duplex = DUPLEX_FULL;
+				else
+					current_duplex = DUPLEX_HALF;
+			} else if (!tg3_flag(tp, 5780_CLASS)) {
+				/* Link is up via parallel detect */
+			} else {
+				current_link_up = 0;
+			}
+		}
+	}
+
+	if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
+		tg3_setup_flow_control(tp, local_adv, remote_adv);
+
+	tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
+	if (tp->link_config.active_duplex == DUPLEX_HALF)
+		tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
+
+	tw32_f(MAC_MODE, tp->mac_mode);
+	udelay(40);
+
+	tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
+
+	tp->link_config.active_speed = current_speed;
+	tp->link_config.active_duplex = current_duplex;
+
+	if (current_link_up != netif_carrier_ok(tp->dev)) {
+		if (current_link_up)
+			netif_carrier_on(tp->dev);
+		else {
+			netif_carrier_off(tp->dev);
+			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+		}
+		tg3_link_report(tp);
+	}
+	return err;
+}
+
+static void tg3_serdes_parallel_detect(struct tg3 *tp)
+{
+	if (tp->serdes_counter) {
+		/* Give autoneg time to complete. */
+		tp->serdes_counter--;
+		return;
+	}
+
+	if (!netif_carrier_ok(tp->dev) &&
+	    (tp->link_config.autoneg == AUTONEG_ENABLE)) {
+		u32 bmcr;
+
+		tg3_readphy(tp, MII_BMCR, &bmcr);
+		if (bmcr & BMCR_ANENABLE) {
+			u32 phy1, phy2;
+
+			/* Select shadow register 0x1f */
+			tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
+			tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
+
+			/* Select expansion interrupt status register */
+			tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+					 MII_TG3_DSP_EXP1_INT_STAT);
+			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
+			tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
+
+			if ((phy1 & 0x10) && !(phy2 & 0x20)) {
+				/* We have signal detect and not receiving
+				 * config code words, link is up by parallel
+				 * detection.
+				 */
+
+				bmcr &= ~BMCR_ANENABLE;
+				bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
+				tg3_writephy(tp, MII_BMCR, bmcr);
+				tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
+			}
+		}
+	} else if (netif_carrier_ok(tp->dev) &&
+		   (tp->link_config.autoneg == AUTONEG_ENABLE) &&
+		   (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
+		u32 phy2;
+
+		/* Select expansion interrupt status register */
+		tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
+				 MII_TG3_DSP_EXP1_INT_STAT);
+		tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
+		if (phy2 & 0x20) {
+			u32 bmcr;
+
+			/* Config code words received, turn on autoneg. */
+			tg3_readphy(tp, MII_BMCR, &bmcr);
+			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
+
+			tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+
+		}
+	}
+}
+
+static int tg3_setup_phy(struct tg3 *tp, int force_reset)
+{
+	u32 val;
+	int err;
+
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+		err = tg3_setup_fiber_phy(tp, force_reset);
+	else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+		err = tg3_setup_fiber_mii_phy(tp, force_reset);
+	else
+		err = tg3_setup_copper_phy(tp, force_reset);
+
+	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+		u32 scale;
+
+		val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
+		if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
+			scale = 65;
+		else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
+			scale = 6;
+		else
+			scale = 12;
+
+		val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
+		val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
+		tw32(GRC_MISC_CFG, val);
+	}
+
+	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+	      (6 << TX_LENGTHS_IPG_SHIFT);
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+		val |= tr32(MAC_TX_LENGTHS) &
+		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
+			TX_LENGTHS_CNT_DWN_VAL_MSK);
+
+	if (tp->link_config.active_speed == SPEED_1000 &&
+	    tp->link_config.active_duplex == DUPLEX_HALF)
+		tw32(MAC_TX_LENGTHS, val |
+		     (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
+	else
+		tw32(MAC_TX_LENGTHS, val |
+		     (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
+
+	if (!tg3_flag(tp, 5705_PLUS)) {
+		if (netif_carrier_ok(tp->dev)) {
+			tw32(HOSTCC_STAT_COAL_TICKS,
+			     tp->coal.stats_block_coalesce_usecs);
+		} else {
+			tw32(HOSTCC_STAT_COAL_TICKS, 0);
+		}
+	}
+
+	if (tg3_flag(tp, ASPM_WORKAROUND)) {
+		val = tr32(PCIE_PWR_MGMT_THRESH);
+		if (!netif_carrier_ok(tp->dev))
+			val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
+			      tp->pwrmgmt_thresh;
+		else
+			val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
+		tw32(PCIE_PWR_MGMT_THRESH, val);
+	}
+
+	return err;
+}
+
+static inline int tg3_irq_sync(struct tg3 *tp)
+{
+	return tp->irq_sync;
+}
+
+static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
+{
+	int i;
+
+	dst = (u32 *)((u8 *)dst + off);
+	for (i = 0; i < len; i += sizeof(u32))
+		*dst++ = tr32(off + i);
+}
+
+static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
+{
+	tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
+	tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
+	tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
+	tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
+	tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
+	tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
+	tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
+	tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
+	tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
+	tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
+	tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
+	tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
+	tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
+	tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
+	tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
+	tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
+	tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
+	tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
+	tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
+
+	if (tg3_flag(tp, SUPPORT_MSIX))
+		tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
+
+	tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
+	tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
+	tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
+	tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
+	tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
+	tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
+	tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
+	tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
+
+	if (!tg3_flag(tp, 5705_PLUS)) {
+		tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
+		tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
+		tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
+	}
+
+	tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
+	tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
+	tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
+	tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
+	tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
+
+	if (tg3_flag(tp, NVRAM))
+		tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
+}
+
+static void tg3_dump_state(struct tg3 *tp)
+{
+	int i;
+	u32 *regs;
+
+	regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
+	if (!regs) {
+		netdev_err(tp->dev, "Failed allocating register dump buffer\n");
+		return;
+	}
+
+	if (tg3_flag(tp, PCI_EXPRESS)) {
+		/* Read up to but not including private PCI registers */
+		for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
+			regs[i / sizeof(u32)] = tr32(i);
+	} else
+		tg3_dump_legacy_regs(tp, regs);
+
+	for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
+		if (!regs[i + 0] && !regs[i + 1] &&
+		    !regs[i + 2] && !regs[i + 3])
+			continue;
+
+		netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
+			   i * 4,
+			   regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
+	}
+
+	kfree(regs);
+
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		/* SW status block */
+		netdev_err(tp->dev,
+			 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
+			   i,
+			   tnapi->hw_status->status,
+			   tnapi->hw_status->status_tag,
+			   tnapi->hw_status->rx_jumbo_consumer,
+			   tnapi->hw_status->rx_consumer,
+			   tnapi->hw_status->rx_mini_consumer,
+			   tnapi->hw_status->idx[0].rx_producer,
+			   tnapi->hw_status->idx[0].tx_consumer);
+
+		netdev_err(tp->dev,
+		"%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
+			   i,
+			   tnapi->last_tag, tnapi->last_irq_tag,
+			   tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
+			   tnapi->rx_rcb_ptr,
+			   tnapi->prodring.rx_std_prod_idx,
+			   tnapi->prodring.rx_std_cons_idx,
+			   tnapi->prodring.rx_jmb_prod_idx,
+			   tnapi->prodring.rx_jmb_cons_idx);
+	}
+}
+
+/* This is called whenever we suspect that the system chipset is re-
+ * ordering the sequence of MMIO to the tx send mailbox. The symptom
+ * is bogus tx completions. We try to recover by setting the
+ * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
+ * in the workqueue.
+ */
+static void tg3_tx_recover(struct tg3 *tp)
+{
+	BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
+	       tp->write32_tx_mbox == tg3_write_indirect_mbox);
+
+	netdev_warn(tp->dev,
+		    "The system may be re-ordering memory-mapped I/O "
+		    "cycles to the network device, attempting to recover. "
+		    "Please report the problem to the driver maintainer "
+		    "and include system chipset information.\n");
+
+	spin_lock(&tp->lock);
+	tg3_flag_set(tp, TX_RECOVERY_PENDING);
+	spin_unlock(&tp->lock);
+}
+
+static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
+{
+	/* Tell compiler to fetch tx indices from memory. */
+	barrier();
+	return tnapi->tx_pending -
+	       ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
+}
+
+/* Tigon3 never reports partial packet sends.  So we do not
+ * need special logic to handle SKBs that have not had all
+ * of their frags sent yet, like SunGEM does.
+ */
+static void tg3_tx(struct tg3_napi *tnapi)
+{
+	struct tg3 *tp = tnapi->tp;
+	u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
+	u32 sw_idx = tnapi->tx_cons;
+	struct netdev_queue *txq;
+	int index = tnapi - tp->napi;
+
+	if (tg3_flag(tp, ENABLE_TSS))
+		index--;
+
+	txq = netdev_get_tx_queue(tp->dev, index);
+
+	while (sw_idx != hw_idx) {
+		struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
+		struct sk_buff *skb = ri->skb;
+		int i, tx_bug = 0;
+
+		if (unlikely(skb == NULL)) {
+			tg3_tx_recover(tp);
+			return;
+		}
+
+		pci_unmap_single(tp->pdev,
+				 dma_unmap_addr(ri, mapping),
+				 skb_headlen(skb),
+				 PCI_DMA_TODEVICE);
+
+		ri->skb = NULL;
+
+		while (ri->fragmented) {
+			ri->fragmented = false;
+			sw_idx = NEXT_TX(sw_idx);
+			ri = &tnapi->tx_buffers[sw_idx];
+		}
+
+		sw_idx = NEXT_TX(sw_idx);
+
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			ri = &tnapi->tx_buffers[sw_idx];
+			if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
+				tx_bug = 1;
+
+			pci_unmap_page(tp->pdev,
+				       dma_unmap_addr(ri, mapping),
+				       skb_shinfo(skb)->frags[i].size,
+				       PCI_DMA_TODEVICE);
+
+			while (ri->fragmented) {
+				ri->fragmented = false;
+				sw_idx = NEXT_TX(sw_idx);
+				ri = &tnapi->tx_buffers[sw_idx];
+			}
+
+			sw_idx = NEXT_TX(sw_idx);
+		}
+
+		dev_kfree_skb(skb);
+
+		if (unlikely(tx_bug)) {
+			tg3_tx_recover(tp);
+			return;
+		}
+	}
+
+	tnapi->tx_cons = sw_idx;
+
+	/* Need to make the tx_cons update visible to tg3_start_xmit()
+	 * before checking for netif_queue_stopped().  Without the
+	 * memory barrier, there is a small possibility that tg3_start_xmit()
+	 * will miss it and cause the queue to be stopped forever.
+	 */
+	smp_mb();
+
+	if (unlikely(netif_tx_queue_stopped(txq) &&
+		     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
+		__netif_tx_lock(txq, smp_processor_id());
+		if (netif_tx_queue_stopped(txq) &&
+		    (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
+			netif_tx_wake_queue(txq);
+		__netif_tx_unlock(txq);
+	}
+}
+
+static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
+{
+	if (!ri->skb)
+		return;
+
+	pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
+			 map_sz, PCI_DMA_FROMDEVICE);
+	dev_kfree_skb_any(ri->skb);
+	ri->skb = NULL;
+}
+
+/* Returns size of skb allocated or < 0 on error.
+ *
+ * We only need to fill in the address because the other members
+ * of the RX descriptor are invariant, see tg3_init_rings.
+ *
+ * Note the purposeful assymetry of cpu vs. chip accesses.  For
+ * posting buffers we only dirty the first cache line of the RX
+ * descriptor (containing the address).  Whereas for the RX status
+ * buffers the cpu only reads the last cacheline of the RX descriptor
+ * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
+ */
+static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
+			    u32 opaque_key, u32 dest_idx_unmasked)
+{
+	struct tg3_rx_buffer_desc *desc;
+	struct ring_info *map;
+	struct sk_buff *skb;
+	dma_addr_t mapping;
+	int skb_size, dest_idx;
+
+	switch (opaque_key) {
+	case RXD_OPAQUE_RING_STD:
+		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
+		desc = &tpr->rx_std[dest_idx];
+		map = &tpr->rx_std_buffers[dest_idx];
+		skb_size = tp->rx_pkt_map_sz;
+		break;
+
+	case RXD_OPAQUE_RING_JUMBO:
+		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
+		desc = &tpr->rx_jmb[dest_idx].std;
+		map = &tpr->rx_jmb_buffers[dest_idx];
+		skb_size = TG3_RX_JMB_MAP_SZ;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	/* Do not overwrite any of the map or rp information
+	 * until we are sure we can commit to a new buffer.
+	 *
+	 * Callers depend upon this behavior and assume that
+	 * we leave everything unchanged if we fail.
+	 */
+	skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
+	if (skb == NULL)
+		return -ENOMEM;
+
+	skb_reserve(skb, tp->rx_offset);
+
+	mapping = pci_map_single(tp->pdev, skb->data, skb_size,
+				 PCI_DMA_FROMDEVICE);
+	if (pci_dma_mapping_error(tp->pdev, mapping)) {
+		dev_kfree_skb(skb);
+		return -EIO;
+	}
+
+	map->skb = skb;
+	dma_unmap_addr_set(map, mapping, mapping);
+
+	desc->addr_hi = ((u64)mapping >> 32);
+	desc->addr_lo = ((u64)mapping & 0xffffffff);
+
+	return skb_size;
+}
+
+/* We only need to move over in the address because the other
+ * members of the RX descriptor are invariant.  See notes above
+ * tg3_alloc_rx_skb for full details.
+ */
+static void tg3_recycle_rx(struct tg3_napi *tnapi,
+			   struct tg3_rx_prodring_set *dpr,
+			   u32 opaque_key, int src_idx,
+			   u32 dest_idx_unmasked)
+{
+	struct tg3 *tp = tnapi->tp;
+	struct tg3_rx_buffer_desc *src_desc, *dest_desc;
+	struct ring_info *src_map, *dest_map;
+	struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
+	int dest_idx;
+
+	switch (opaque_key) {
+	case RXD_OPAQUE_RING_STD:
+		dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
+		dest_desc = &dpr->rx_std[dest_idx];
+		dest_map = &dpr->rx_std_buffers[dest_idx];
+		src_desc = &spr->rx_std[src_idx];
+		src_map = &spr->rx_std_buffers[src_idx];
+		break;
+
+	case RXD_OPAQUE_RING_JUMBO:
+		dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
+		dest_desc = &dpr->rx_jmb[dest_idx].std;
+		dest_map = &dpr->rx_jmb_buffers[dest_idx];
+		src_desc = &spr->rx_jmb[src_idx].std;
+		src_map = &spr->rx_jmb_buffers[src_idx];
+		break;
+
+	default:
+		return;
+	}
+
+	dest_map->skb = src_map->skb;
+	dma_unmap_addr_set(dest_map, mapping,
+			   dma_unmap_addr(src_map, mapping));
+	dest_desc->addr_hi = src_desc->addr_hi;
+	dest_desc->addr_lo = src_desc->addr_lo;
+
+	/* Ensure that the update to the skb happens after the physical
+	 * addresses have been transferred to the new BD location.
+	 */
+	smp_wmb();
+
+	src_map->skb = NULL;
+}
+
+/* The RX ring scheme is composed of multiple rings which post fresh
+ * buffers to the chip, and one special ring the chip uses to report
+ * status back to the host.
+ *
+ * The special ring reports the status of received packets to the
+ * host.  The chip does not write into the original descriptor the
+ * RX buffer was obtained from.  The chip simply takes the original
+ * descriptor as provided by the host, updates the status and length
+ * field, then writes this into the next status ring entry.
+ *
+ * Each ring the host uses to post buffers to the chip is described
+ * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
+ * it is first placed into the on-chip ram.  When the packet's length
+ * is known, it walks down the TG3_BDINFO entries to select the ring.
+ * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
+ * which is within the range of the new packet's length is chosen.
+ *
+ * The "separate ring for rx status" scheme may sound queer, but it makes
+ * sense from a cache coherency perspective.  If only the host writes
+ * to the buffer post rings, and only the chip writes to the rx status
+ * rings, then cache lines never move beyond shared-modified state.
+ * If both the host and chip were to write into the same ring, cache line
+ * eviction could occur since both entities want it in an exclusive state.
+ */
+static int tg3_rx(struct tg3_napi *tnapi, int budget)
+{
+	struct tg3 *tp = tnapi->tp;
+	u32 work_mask, rx_std_posted = 0;
+	u32 std_prod_idx, jmb_prod_idx;
+	u32 sw_idx = tnapi->rx_rcb_ptr;
+	u16 hw_idx;
+	int received;
+	struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
+
+	hw_idx = *(tnapi->rx_rcb_prod_idx);
+	/*
+	 * We need to order the read of hw_idx and the read of
+	 * the opaque cookie.
+	 */
+	rmb();
+	work_mask = 0;
+	received = 0;
+	std_prod_idx = tpr->rx_std_prod_idx;
+	jmb_prod_idx = tpr->rx_jmb_prod_idx;
+	while (sw_idx != hw_idx && budget > 0) {
+		struct ring_info *ri;
+		struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
+		unsigned int len;
+		struct sk_buff *skb;
+		dma_addr_t dma_addr;
+		u32 opaque_key, desc_idx, *post_ptr;
+
+		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
+		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
+		if (opaque_key == RXD_OPAQUE_RING_STD) {
+			ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
+			dma_addr = dma_unmap_addr(ri, mapping);
+			skb = ri->skb;
+			post_ptr = &std_prod_idx;
+			rx_std_posted++;
+		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
+			ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
+			dma_addr = dma_unmap_addr(ri, mapping);
+			skb = ri->skb;
+			post_ptr = &jmb_prod_idx;
+		} else
+			goto next_pkt_nopost;
+
+		work_mask |= opaque_key;
+
+		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
+		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
+		drop_it:
+			tg3_recycle_rx(tnapi, tpr, opaque_key,
+				       desc_idx, *post_ptr);
+		drop_it_no_recycle:
+			/* Other statistics kept track of by card. */
+			tp->rx_dropped++;
+			goto next_pkt;
+		}
+
+		len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
+		      ETH_FCS_LEN;
+
+		if (len > TG3_RX_COPY_THRESH(tp)) {
+			int skb_size;
+
+			skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
+						    *post_ptr);
+			if (skb_size < 0)
+				goto drop_it;
+
+			pci_unmap_single(tp->pdev, dma_addr, skb_size,
+					 PCI_DMA_FROMDEVICE);
+
+			/* Ensure that the update to the skb happens
+			 * after the usage of the old DMA mapping.
+			 */
+			smp_wmb();
+
+			ri->skb = NULL;
+
+			skb_put(skb, len);
+		} else {
+			struct sk_buff *copy_skb;
+
+			tg3_recycle_rx(tnapi, tpr, opaque_key,
+				       desc_idx, *post_ptr);
+
+			copy_skb = netdev_alloc_skb(tp->dev, len +
+						    TG3_RAW_IP_ALIGN);
+			if (copy_skb == NULL)
+				goto drop_it_no_recycle;
+
+			skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
+			skb_put(copy_skb, len);
+			pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+			skb_copy_from_linear_data(skb, copy_skb->data, len);
+			pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
+
+			/* We'll reuse the original ring buffer. */
+			skb = copy_skb;
+		}
+
+		if ((tp->dev->features & NETIF_F_RXCSUM) &&
+		    (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
+		    (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
+		      >> RXD_TCPCSUM_SHIFT) == 0xffff))
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else
+			skb_checksum_none_assert(skb);
+
+		skb->protocol = eth_type_trans(skb, tp->dev);
+
+		if (len > (tp->dev->mtu + ETH_HLEN) &&
+		    skb->protocol != htons(ETH_P_8021Q)) {
+			dev_kfree_skb(skb);
+			goto drop_it_no_recycle;
+		}
+
+		if (desc->type_flags & RXD_FLAG_VLAN &&
+		    !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
+			__vlan_hwaccel_put_tag(skb,
+					       desc->err_vlan & RXD_VLAN_MASK);
+
+		napi_gro_receive(&tnapi->napi, skb);
+
+		received++;
+		budget--;
+
+next_pkt:
+		(*post_ptr)++;
+
+		if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
+			tpr->rx_std_prod_idx = std_prod_idx &
+					       tp->rx_std_ring_mask;
+			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+				     tpr->rx_std_prod_idx);
+			work_mask &= ~RXD_OPAQUE_RING_STD;
+			rx_std_posted = 0;
+		}
+next_pkt_nopost:
+		sw_idx++;
+		sw_idx &= tp->rx_ret_ring_mask;
+
+		/* Refresh hw_idx to see if there is new work */
+		if (sw_idx == hw_idx) {
+			hw_idx = *(tnapi->rx_rcb_prod_idx);
+			rmb();
+		}
+	}
+
+	/* ACK the status ring. */
+	tnapi->rx_rcb_ptr = sw_idx;
+	tw32_rx_mbox(tnapi->consmbox, sw_idx);
+
+	/* Refill RX ring(s). */
+	if (!tg3_flag(tp, ENABLE_RSS)) {
+		if (work_mask & RXD_OPAQUE_RING_STD) {
+			tpr->rx_std_prod_idx = std_prod_idx &
+					       tp->rx_std_ring_mask;
+			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+				     tpr->rx_std_prod_idx);
+		}
+		if (work_mask & RXD_OPAQUE_RING_JUMBO) {
+			tpr->rx_jmb_prod_idx = jmb_prod_idx &
+					       tp->rx_jmb_ring_mask;
+			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
+				     tpr->rx_jmb_prod_idx);
+		}
+		mmiowb();
+	} else if (work_mask) {
+		/* rx_std_buffers[] and rx_jmb_buffers[] entries must be
+		 * updated before the producer indices can be updated.
+		 */
+		smp_wmb();
+
+		tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
+		tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
+
+		if (tnapi != &tp->napi[1])
+			napi_schedule(&tp->napi[1].napi);
+	}
+
+	return received;
+}
+
+static void tg3_poll_link(struct tg3 *tp)
+{
+	/* handle link change and other phy events */
+	if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
+		struct tg3_hw_status *sblk = tp->napi[0].hw_status;
+
+		if (sblk->status & SD_STATUS_LINK_CHG) {
+			sblk->status = SD_STATUS_UPDATED |
+				       (sblk->status & ~SD_STATUS_LINK_CHG);
+			spin_lock(&tp->lock);
+			if (tg3_flag(tp, USE_PHYLIB)) {
+				tw32_f(MAC_STATUS,
+				     (MAC_STATUS_SYNC_CHANGED |
+				      MAC_STATUS_CFG_CHANGED |
+				      MAC_STATUS_MI_COMPLETION |
+				      MAC_STATUS_LNKSTATE_CHANGED));
+				udelay(40);
+			} else
+				tg3_setup_phy(tp, 0);
+			spin_unlock(&tp->lock);
+		}
+	}
+}
+
+static int tg3_rx_prodring_xfer(struct tg3 *tp,
+				struct tg3_rx_prodring_set *dpr,
+				struct tg3_rx_prodring_set *spr)
+{
+	u32 si, di, cpycnt, src_prod_idx;
+	int i, err = 0;
+
+	while (1) {
+		src_prod_idx = spr->rx_std_prod_idx;
+
+		/* Make sure updates to the rx_std_buffers[] entries and the
+		 * standard producer index are seen in the correct order.
+		 */
+		smp_rmb();
+
+		if (spr->rx_std_cons_idx == src_prod_idx)
+			break;
+
+		if (spr->rx_std_cons_idx < src_prod_idx)
+			cpycnt = src_prod_idx - spr->rx_std_cons_idx;
+		else
+			cpycnt = tp->rx_std_ring_mask + 1 -
+				 spr->rx_std_cons_idx;
+
+		cpycnt = min(cpycnt,
+			     tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
+
+		si = spr->rx_std_cons_idx;
+		di = dpr->rx_std_prod_idx;
+
+		for (i = di; i < di + cpycnt; i++) {
+			if (dpr->rx_std_buffers[i].skb) {
+				cpycnt = i - di;
+				err = -ENOSPC;
+				break;
+			}
+		}
+
+		if (!cpycnt)
+			break;
+
+		/* Ensure that updates to the rx_std_buffers ring and the
+		 * shadowed hardware producer ring from tg3_recycle_skb() are
+		 * ordered correctly WRT the skb check above.
+		 */
+		smp_rmb();
+
+		memcpy(&dpr->rx_std_buffers[di],
+		       &spr->rx_std_buffers[si],
+		       cpycnt * sizeof(struct ring_info));
+
+		for (i = 0; i < cpycnt; i++, di++, si++) {
+			struct tg3_rx_buffer_desc *sbd, *dbd;
+			sbd = &spr->rx_std[si];
+			dbd = &dpr->rx_std[di];
+			dbd->addr_hi = sbd->addr_hi;
+			dbd->addr_lo = sbd->addr_lo;
+		}
+
+		spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
+				       tp->rx_std_ring_mask;
+		dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
+				       tp->rx_std_ring_mask;
+	}
+
+	while (1) {
+		src_prod_idx = spr->rx_jmb_prod_idx;
+
+		/* Make sure updates to the rx_jmb_buffers[] entries and
+		 * the jumbo producer index are seen in the correct order.
+		 */
+		smp_rmb();
+
+		if (spr->rx_jmb_cons_idx == src_prod_idx)
+			break;
+
+		if (spr->rx_jmb_cons_idx < src_prod_idx)
+			cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
+		else
+			cpycnt = tp->rx_jmb_ring_mask + 1 -
+				 spr->rx_jmb_cons_idx;
+
+		cpycnt = min(cpycnt,
+			     tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
+
+		si = spr->rx_jmb_cons_idx;
+		di = dpr->rx_jmb_prod_idx;
+
+		for (i = di; i < di + cpycnt; i++) {
+			if (dpr->rx_jmb_buffers[i].skb) {
+				cpycnt = i - di;
+				err = -ENOSPC;
+				break;
+			}
+		}
+
+		if (!cpycnt)
+			break;
+
+		/* Ensure that updates to the rx_jmb_buffers ring and the
+		 * shadowed hardware producer ring from tg3_recycle_skb() are
+		 * ordered correctly WRT the skb check above.
+		 */
+		smp_rmb();
+
+		memcpy(&dpr->rx_jmb_buffers[di],
+		       &spr->rx_jmb_buffers[si],
+		       cpycnt * sizeof(struct ring_info));
+
+		for (i = 0; i < cpycnt; i++, di++, si++) {
+			struct tg3_rx_buffer_desc *sbd, *dbd;
+			sbd = &spr->rx_jmb[si].std;
+			dbd = &dpr->rx_jmb[di].std;
+			dbd->addr_hi = sbd->addr_hi;
+			dbd->addr_lo = sbd->addr_lo;
+		}
+
+		spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
+				       tp->rx_jmb_ring_mask;
+		dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
+				       tp->rx_jmb_ring_mask;
+	}
+
+	return err;
+}
+
+static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
+{
+	struct tg3 *tp = tnapi->tp;
+
+	/* run TX completion thread */
+	if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
+		tg3_tx(tnapi);
+		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
+			return work_done;
+	}
+
+	/* run RX thread, within the bounds set by NAPI.
+	 * All RX "locking" is done by ensuring outside
+	 * code synchronizes with tg3->napi.poll()
+	 */
+	if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
+		work_done += tg3_rx(tnapi, budget - work_done);
+
+	if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
+		struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
+		int i, err = 0;
+		u32 std_prod_idx = dpr->rx_std_prod_idx;
+		u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
+
+		for (i = 1; i < tp->irq_cnt; i++)
+			err |= tg3_rx_prodring_xfer(tp, dpr,
+						    &tp->napi[i].prodring);
+
+		wmb();
+
+		if (std_prod_idx != dpr->rx_std_prod_idx)
+			tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+				     dpr->rx_std_prod_idx);
+
+		if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
+			tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
+				     dpr->rx_jmb_prod_idx);
+
+		mmiowb();
+
+		if (err)
+			tw32_f(HOSTCC_MODE, tp->coal_now);
+	}
+
+	return work_done;
+}
+
+static int tg3_poll_msix(struct napi_struct *napi, int budget)
+{
+	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
+	struct tg3 *tp = tnapi->tp;
+	int work_done = 0;
+	struct tg3_hw_status *sblk = tnapi->hw_status;
+
+	while (1) {
+		work_done = tg3_poll_work(tnapi, work_done, budget);
+
+		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
+			goto tx_recovery;
+
+		if (unlikely(work_done >= budget))
+			break;
+
+		/* tp->last_tag is used in tg3_int_reenable() below
+		 * to tell the hw how much work has been processed,
+		 * so we must read it before checking for more work.
+		 */
+		tnapi->last_tag = sblk->status_tag;
+		tnapi->last_irq_tag = tnapi->last_tag;
+		rmb();
+
+		/* check for RX/TX work to do */
+		if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
+			   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
+			napi_complete(napi);
+			/* Reenable interrupts. */
+			tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
+			mmiowb();
+			break;
+		}
+	}
+
+	return work_done;
+
+tx_recovery:
+	/* work_done is guaranteed to be less than budget. */
+	napi_complete(napi);
+	schedule_work(&tp->reset_task);
+	return work_done;
+}
+
+static void tg3_process_error(struct tg3 *tp)
+{
+	u32 val;
+	bool real_error = false;
+
+	if (tg3_flag(tp, ERROR_PROCESSED))
+		return;
+
+	/* Check Flow Attention register */
+	val = tr32(HOSTCC_FLOW_ATTN);
+	if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
+		netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
+		real_error = true;
+	}
+
+	if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
+		netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
+		real_error = true;
+	}
+
+	if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
+		netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
+		real_error = true;
+	}
+
+	if (!real_error)
+		return;
+
+	tg3_dump_state(tp);
+
+	tg3_flag_set(tp, ERROR_PROCESSED);
+	schedule_work(&tp->reset_task);
+}
+
+static int tg3_poll(struct napi_struct *napi, int budget)
+{
+	struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
+	struct tg3 *tp = tnapi->tp;
+	int work_done = 0;
+	struct tg3_hw_status *sblk = tnapi->hw_status;
+
+	while (1) {
+		if (sblk->status & SD_STATUS_ERROR)
+			tg3_process_error(tp);
+
+		tg3_poll_link(tp);
+
+		work_done = tg3_poll_work(tnapi, work_done, budget);
+
+		if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
+			goto tx_recovery;
+
+		if (unlikely(work_done >= budget))
+			break;
+
+		if (tg3_flag(tp, TAGGED_STATUS)) {
+			/* tp->last_tag is used in tg3_int_reenable() below
+			 * to tell the hw how much work has been processed,
+			 * so we must read it before checking for more work.
+			 */
+			tnapi->last_tag = sblk->status_tag;
+			tnapi->last_irq_tag = tnapi->last_tag;
+			rmb();
+		} else
+			sblk->status &= ~SD_STATUS_UPDATED;
+
+		if (likely(!tg3_has_work(tnapi))) {
+			napi_complete(napi);
+			tg3_int_reenable(tnapi);
+			break;
+		}
+	}
+
+	return work_done;
+
+tx_recovery:
+	/* work_done is guaranteed to be less than budget. */
+	napi_complete(napi);
+	schedule_work(&tp->reset_task);
+	return work_done;
+}
+
+static void tg3_napi_disable(struct tg3 *tp)
+{
+	int i;
+
+	for (i = tp->irq_cnt - 1; i >= 0; i--)
+		napi_disable(&tp->napi[i].napi);
+}
+
+static void tg3_napi_enable(struct tg3 *tp)
+{
+	int i;
+
+	for (i = 0; i < tp->irq_cnt; i++)
+		napi_enable(&tp->napi[i].napi);
+}
+
+static void tg3_napi_init(struct tg3 *tp)
+{
+	int i;
+
+	netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
+	for (i = 1; i < tp->irq_cnt; i++)
+		netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
+}
+
+static void tg3_napi_fini(struct tg3 *tp)
+{
+	int i;
+
+	for (i = 0; i < tp->irq_cnt; i++)
+		netif_napi_del(&tp->napi[i].napi);
+}
+
+static inline void tg3_netif_stop(struct tg3 *tp)
+{
+	tp->dev->trans_start = jiffies;	/* prevent tx timeout */
+	tg3_napi_disable(tp);
+	netif_tx_disable(tp->dev);
+}
+
+static inline void tg3_netif_start(struct tg3 *tp)
+{
+	/* NOTE: unconditional netif_tx_wake_all_queues is only
+	 * appropriate so long as all callers are assured to
+	 * have free tx slots (such as after tg3_init_hw)
+	 */
+	netif_tx_wake_all_queues(tp->dev);
+
+	tg3_napi_enable(tp);
+	tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
+	tg3_enable_ints(tp);
+}
+
+static void tg3_irq_quiesce(struct tg3 *tp)
+{
+	int i;
+
+	BUG_ON(tp->irq_sync);
+
+	tp->irq_sync = 1;
+	smp_mb();
+
+	for (i = 0; i < tp->irq_cnt; i++)
+		synchronize_irq(tp->napi[i].irq_vec);
+}
+
+/* Fully shutdown all tg3 driver activity elsewhere in the system.
+ * If irq_sync is non-zero, then the IRQ handler must be synchronized
+ * with as well.  Most of the time, this is not necessary except when
+ * shutting down the device.
+ */
+static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
+{
+	spin_lock_bh(&tp->lock);
+	if (irq_sync)
+		tg3_irq_quiesce(tp);
+}
+
+static inline void tg3_full_unlock(struct tg3 *tp)
+{
+	spin_unlock_bh(&tp->lock);
+}
+
+/* One-shot MSI handler - Chip automatically disables interrupt
+ * after sending MSI so driver doesn't have to do it.
+ */
+static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
+{
+	struct tg3_napi *tnapi = dev_id;
+	struct tg3 *tp = tnapi->tp;
+
+	prefetch(tnapi->hw_status);
+	if (tnapi->rx_rcb)
+		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+
+	if (likely(!tg3_irq_sync(tp)))
+		napi_schedule(&tnapi->napi);
+
+	return IRQ_HANDLED;
+}
+
+/* MSI ISR - No need to check for interrupt sharing and no need to
+ * flush status block and interrupt mailbox. PCI ordering rules
+ * guarantee that MSI will arrive after the status block.
+ */
+static irqreturn_t tg3_msi(int irq, void *dev_id)
+{
+	struct tg3_napi *tnapi = dev_id;
+	struct tg3 *tp = tnapi->tp;
+
+	prefetch(tnapi->hw_status);
+	if (tnapi->rx_rcb)
+		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+	/*
+	 * Writing any value to intr-mbox-0 clears PCI INTA# and
+	 * chip-internal interrupt pending events.
+	 * Writing non-zero to intr-mbox-0 additional tells the
+	 * NIC to stop sending us irqs, engaging "in-intr-handler"
+	 * event coalescing.
+	 */
+	tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+	if (likely(!tg3_irq_sync(tp)))
+		napi_schedule(&tnapi->napi);
+
+	return IRQ_RETVAL(1);
+}
+
+static irqreturn_t tg3_interrupt(int irq, void *dev_id)
+{
+	struct tg3_napi *tnapi = dev_id;
+	struct tg3 *tp = tnapi->tp;
+	struct tg3_hw_status *sblk = tnapi->hw_status;
+	unsigned int handled = 1;
+
+	/* In INTx mode, it is possible for the interrupt to arrive at
+	 * the CPU before the status block posted prior to the interrupt.
+	 * Reading the PCI State register will confirm whether the
+	 * interrupt is ours and will flush the status block.
+	 */
+	if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
+		if (tg3_flag(tp, CHIP_RESETTING) ||
+		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+			handled = 0;
+			goto out;
+		}
+	}
+
+	/*
+	 * Writing any value to intr-mbox-0 clears PCI INTA# and
+	 * chip-internal interrupt pending events.
+	 * Writing non-zero to intr-mbox-0 additional tells the
+	 * NIC to stop sending us irqs, engaging "in-intr-handler"
+	 * event coalescing.
+	 *
+	 * Flush the mailbox to de-assert the IRQ immediately to prevent
+	 * spurious interrupts.  The flush impacts performance but
+	 * excessive spurious interrupts can be worse in some cases.
+	 */
+	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+	if (tg3_irq_sync(tp))
+		goto out;
+	sblk->status &= ~SD_STATUS_UPDATED;
+	if (likely(tg3_has_work(tnapi))) {
+		prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+		napi_schedule(&tnapi->napi);
+	} else {
+		/* No work, shared interrupt perhaps?  re-enable
+		 * interrupts, and flush that PCI write
+		 */
+		tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
+			       0x00000000);
+	}
+out:
+	return IRQ_RETVAL(handled);
+}
+
+static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
+{
+	struct tg3_napi *tnapi = dev_id;
+	struct tg3 *tp = tnapi->tp;
+	struct tg3_hw_status *sblk = tnapi->hw_status;
+	unsigned int handled = 1;
+
+	/* In INTx mode, it is possible for the interrupt to arrive at
+	 * the CPU before the status block posted prior to the interrupt.
+	 * Reading the PCI State register will confirm whether the
+	 * interrupt is ours and will flush the status block.
+	 */
+	if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
+		if (tg3_flag(tp, CHIP_RESETTING) ||
+		    (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+			handled = 0;
+			goto out;
+		}
+	}
+
+	/*
+	 * writing any value to intr-mbox-0 clears PCI INTA# and
+	 * chip-internal interrupt pending events.
+	 * writing non-zero to intr-mbox-0 additional tells the
+	 * NIC to stop sending us irqs, engaging "in-intr-handler"
+	 * event coalescing.
+	 *
+	 * Flush the mailbox to de-assert the IRQ immediately to prevent
+	 * spurious interrupts.  The flush impacts performance but
+	 * excessive spurious interrupts can be worse in some cases.
+	 */
+	tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
+
+	/*
+	 * In a shared interrupt configuration, sometimes other devices'
+	 * interrupts will scream.  We record the current status tag here
+	 * so that the above check can report that the screaming interrupts
+	 * are unhandled.  Eventually they will be silenced.
+	 */
+	tnapi->last_irq_tag = sblk->status_tag;
+
+	if (tg3_irq_sync(tp))
+		goto out;
+
+	prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
+
+	napi_schedule(&tnapi->napi);
+
+out:
+	return IRQ_RETVAL(handled);
+}
+
+/* ISR for interrupt test */
+static irqreturn_t tg3_test_isr(int irq, void *dev_id)
+{
+	struct tg3_napi *tnapi = dev_id;
+	struct tg3 *tp = tnapi->tp;
+	struct tg3_hw_status *sblk = tnapi->hw_status;
+
+	if ((sblk->status & SD_STATUS_UPDATED) ||
+	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
+		tg3_disable_ints(tp);
+		return IRQ_RETVAL(1);
+	}
+	return IRQ_RETVAL(0);
+}
+
+static int tg3_init_hw(struct tg3 *, int);
+static int tg3_halt(struct tg3 *, int, int);
+
+/* Restart hardware after configuration changes, self-test, etc.
+ * Invoked with tp->lock held.
+ */
+static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
+	__releases(tp->lock)
+	__acquires(tp->lock)
+{
+	int err;
+
+	err = tg3_init_hw(tp, reset_phy);
+	if (err) {
+		netdev_err(tp->dev,
+			   "Failed to re-initialize device, aborting\n");
+		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+		tg3_full_unlock(tp);
+		del_timer_sync(&tp->timer);
+		tp->irq_sync = 0;
+		tg3_napi_enable(tp);
+		dev_close(tp->dev);
+		tg3_full_lock(tp, 0);
+	}
+	return err;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void tg3_poll_controller(struct net_device *dev)
+{
+	int i;
+	struct tg3 *tp = netdev_priv(dev);
+
+	for (i = 0; i < tp->irq_cnt; i++)
+		tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
+}
+#endif
+
+static void tg3_reset_task(struct work_struct *work)
+{
+	struct tg3 *tp = container_of(work, struct tg3, reset_task);
+	int err;
+	unsigned int restart_timer;
+
+	tg3_full_lock(tp, 0);
+
+	if (!netif_running(tp->dev)) {
+		tg3_full_unlock(tp);
+		return;
+	}
+
+	tg3_full_unlock(tp);
+
+	tg3_phy_stop(tp);
+
+	tg3_netif_stop(tp);
+
+	tg3_full_lock(tp, 1);
+
+	restart_timer = tg3_flag(tp, RESTART_TIMER);
+	tg3_flag_clear(tp, RESTART_TIMER);
+
+	if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
+		tp->write32_tx_mbox = tg3_write32_tx_mbox;
+		tp->write32_rx_mbox = tg3_write_flush_reg32;
+		tg3_flag_set(tp, MBOX_WRITE_REORDER);
+		tg3_flag_clear(tp, TX_RECOVERY_PENDING);
+	}
+
+	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
+	err = tg3_init_hw(tp, 1);
+	if (err)
+		goto out;
+
+	tg3_netif_start(tp);
+
+	if (restart_timer)
+		mod_timer(&tp->timer, jiffies + 1);
+
+out:
+	tg3_full_unlock(tp);
+
+	if (!err)
+		tg3_phy_start(tp);
+}
+
+static void tg3_tx_timeout(struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (netif_msg_tx_err(tp)) {
+		netdev_err(dev, "transmit timed out, resetting\n");
+		tg3_dump_state(tp);
+	}
+
+	schedule_work(&tp->reset_task);
+}
+
+/* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
+static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
+{
+	u32 base = (u32) mapping & 0xffffffff;
+
+	return (base > 0xffffdcc0) && (base + len + 8 < base);
+}
+
+/* Test for DMA addresses > 40-bit */
+static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
+					  int len)
+{
+#if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
+	if (tg3_flag(tp, 40BIT_DMA_BUG))
+		return ((u64) mapping + len) > DMA_BIT_MASK(40);
+	return 0;
+#else
+	return 0;
+#endif
+}
+
+static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
+				 dma_addr_t mapping, u32 len, u32 flags,
+				 u32 mss, u32 vlan)
+{
+	txbd->addr_hi = ((u64) mapping >> 32);
+	txbd->addr_lo = ((u64) mapping & 0xffffffff);
+	txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
+	txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
+}
+
+static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
+			    dma_addr_t map, u32 len, u32 flags,
+			    u32 mss, u32 vlan)
+{
+	struct tg3 *tp = tnapi->tp;
+	bool hwbug = false;
+
+	if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
+		hwbug = 1;
+
+	if (tg3_4g_overflow_test(map, len))
+		hwbug = 1;
+
+	if (tg3_40bit_overflow_test(tp, map, len))
+		hwbug = 1;
+
+	if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+		u32 tmp_flag = flags & ~TXD_FLAG_END;
+		while (len > TG3_TX_BD_DMA_MAX) {
+			u32 frag_len = TG3_TX_BD_DMA_MAX;
+			len -= TG3_TX_BD_DMA_MAX;
+
+			if (len) {
+				tnapi->tx_buffers[*entry].fragmented = true;
+				/* Avoid the 8byte DMA problem */
+				if (len <= 8) {
+					len += TG3_TX_BD_DMA_MAX / 2;
+					frag_len = TG3_TX_BD_DMA_MAX / 2;
+				}
+			} else
+				tmp_flag = flags;
+
+			if (*budget) {
+				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+					      frag_len, tmp_flag, mss, vlan);
+				(*budget)--;
+				*entry = NEXT_TX(*entry);
+			} else {
+				hwbug = 1;
+				break;
+			}
+
+			map += frag_len;
+		}
+
+		if (len) {
+			if (*budget) {
+				tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+					      len, flags, mss, vlan);
+				(*budget)--;
+				*entry = NEXT_TX(*entry);
+			} else {
+				hwbug = 1;
+			}
+		}
+	} else {
+		tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
+			      len, flags, mss, vlan);
+		*entry = NEXT_TX(*entry);
+	}
+
+	return hwbug;
+}
+
+static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
+{
+	int i;
+	struct sk_buff *skb;
+	struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
+
+	skb = txb->skb;
+	txb->skb = NULL;
+
+	pci_unmap_single(tnapi->tp->pdev,
+			 dma_unmap_addr(txb, mapping),
+			 skb_headlen(skb),
+			 PCI_DMA_TODEVICE);
+
+	while (txb->fragmented) {
+		txb->fragmented = false;
+		entry = NEXT_TX(entry);
+		txb = &tnapi->tx_buffers[entry];
+	}
+
+	for (i = 0; i < last; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		entry = NEXT_TX(entry);
+		txb = &tnapi->tx_buffers[entry];
+
+		pci_unmap_page(tnapi->tp->pdev,
+			       dma_unmap_addr(txb, mapping),
+			       frag->size, PCI_DMA_TODEVICE);
+
+		while (txb->fragmented) {
+			txb->fragmented = false;
+			entry = NEXT_TX(entry);
+			txb = &tnapi->tx_buffers[entry];
+		}
+	}
+}
+
+/* Workaround 4GB and 40-bit hardware DMA bugs. */
+static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
+				       struct sk_buff *skb,
+				       u32 *entry, u32 *budget,
+				       u32 base_flags, u32 mss, u32 vlan)
+{
+	struct tg3 *tp = tnapi->tp;
+	struct sk_buff *new_skb;
+	dma_addr_t new_addr = 0;
+	int ret = 0;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+		new_skb = skb_copy(skb, GFP_ATOMIC);
+	else {
+		int more_headroom = 4 - ((unsigned long)skb->data & 3);
+
+		new_skb = skb_copy_expand(skb,
+					  skb_headroom(skb) + more_headroom,
+					  skb_tailroom(skb), GFP_ATOMIC);
+	}
+
+	if (!new_skb) {
+		ret = -1;
+	} else {
+		/* New SKB is guaranteed to be linear. */
+		new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
+					  PCI_DMA_TODEVICE);
+		/* Make sure the mapping succeeded */
+		if (pci_dma_mapping_error(tp->pdev, new_addr)) {
+			dev_kfree_skb(new_skb);
+			ret = -1;
+		} else {
+			base_flags |= TXD_FLAG_END;
+
+			tnapi->tx_buffers[*entry].skb = new_skb;
+			dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
+					   mapping, new_addr);
+
+			if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
+					    new_skb->len, base_flags,
+					    mss, vlan)) {
+				tg3_tx_skb_unmap(tnapi, *entry, 0);
+				dev_kfree_skb(new_skb);
+				ret = -1;
+			}
+		}
+	}
+
+	dev_kfree_skb(skb);
+
+	return ret;
+}
+
+static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
+
+/* Use GSO to workaround a rare TSO bug that may be triggered when the
+ * TSO header is greater than 80 bytes.
+ */
+static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
+{
+	struct sk_buff *segs, *nskb;
+	u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
+
+	/* Estimate the number of fragments in the worst case */
+	if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
+		netif_stop_queue(tp->dev);
+
+		/* netif_tx_stop_queue() must be done before checking
+		 * checking tx index in tg3_tx_avail() below, because in
+		 * tg3_tx(), we update tx index before checking for
+		 * netif_tx_queue_stopped().
+		 */
+		smp_mb();
+		if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
+			return NETDEV_TX_BUSY;
+
+		netif_wake_queue(tp->dev);
+	}
+
+	segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
+	if (IS_ERR(segs))
+		goto tg3_tso_bug_end;
+
+	do {
+		nskb = segs;
+		segs = segs->next;
+		nskb->next = NULL;
+		tg3_start_xmit(nskb, tp->dev);
+	} while (segs);
+
+tg3_tso_bug_end:
+	dev_kfree_skb(skb);
+
+	return NETDEV_TX_OK;
+}
+
+/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
+ * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
+ */
+static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	u32 len, entry, base_flags, mss, vlan = 0;
+	u32 budget;
+	int i = -1, would_hit_hwbug;
+	dma_addr_t mapping;
+	struct tg3_napi *tnapi;
+	struct netdev_queue *txq;
+	unsigned int last;
+
+	txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+	tnapi = &tp->napi[skb_get_queue_mapping(skb)];
+	if (tg3_flag(tp, ENABLE_TSS))
+		tnapi++;
+
+	budget = tg3_tx_avail(tnapi);
+
+	/* We are running in BH disabled context with netif_tx_lock
+	 * and TX reclaim runs via tp->napi.poll inside of a software
+	 * interrupt.  Furthermore, IRQ processing runs lockless so we have
+	 * no IRQ context deadlocks to worry about either.  Rejoice!
+	 */
+	if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
+		if (!netif_tx_queue_stopped(txq)) {
+			netif_tx_stop_queue(txq);
+
+			/* This is a hard error, log it. */
+			netdev_err(dev,
+				   "BUG! Tx Ring full when queue awake!\n");
+		}
+		return NETDEV_TX_BUSY;
+	}
+
+	entry = tnapi->tx_prod;
+	base_flags = 0;
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		base_flags |= TXD_FLAG_TCPUDP_CSUM;
+
+	mss = skb_shinfo(skb)->gso_size;
+	if (mss) {
+		struct iphdr *iph;
+		u32 tcp_opt_len, hdr_len;
+
+		if (skb_header_cloned(skb) &&
+		    pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
+			dev_kfree_skb(skb);
+			goto out_unlock;
+		}
+
+		iph = ip_hdr(skb);
+		tcp_opt_len = tcp_optlen(skb);
+
+		if (skb_is_gso_v6(skb)) {
+			hdr_len = skb_headlen(skb) - ETH_HLEN;
+		} else {
+			u32 ip_tcp_len;
+
+			ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
+			hdr_len = ip_tcp_len + tcp_opt_len;
+
+			iph->check = 0;
+			iph->tot_len = htons(mss + hdr_len);
+		}
+
+		if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+		    tg3_flag(tp, TSO_BUG))
+			return tg3_tso_bug(tp, skb);
+
+		base_flags |= (TXD_FLAG_CPU_PRE_DMA |
+			       TXD_FLAG_CPU_POST_DMA);
+
+		if (tg3_flag(tp, HW_TSO_1) ||
+		    tg3_flag(tp, HW_TSO_2) ||
+		    tg3_flag(tp, HW_TSO_3)) {
+			tcp_hdr(skb)->check = 0;
+			base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
+		} else
+			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+								 iph->daddr, 0,
+								 IPPROTO_TCP,
+								 0);
+
+		if (tg3_flag(tp, HW_TSO_3)) {
+			mss |= (hdr_len & 0xc) << 12;
+			if (hdr_len & 0x10)
+				base_flags |= 0x00000010;
+			base_flags |= (hdr_len & 0x3e0) << 5;
+		} else if (tg3_flag(tp, HW_TSO_2))
+			mss |= hdr_len << 9;
+		else if (tg3_flag(tp, HW_TSO_1) ||
+			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+			if (tcp_opt_len || iph->ihl > 5) {
+				int tsflags;
+
+				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
+				mss |= (tsflags << 11);
+			}
+		} else {
+			if (tcp_opt_len || iph->ihl > 5) {
+				int tsflags;
+
+				tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
+				base_flags |= tsflags << 12;
+			}
+		}
+	}
+
+#ifdef BCM_KERNEL_SUPPORTS_8021Q
+	if (vlan_tx_tag_present(skb)) {
+		base_flags |= TXD_FLAG_VLAN;
+		vlan = vlan_tx_tag_get(skb);
+	}
+#endif
+
+	if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
+	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
+		base_flags |= TXD_FLAG_JMB_PKT;
+
+	len = skb_headlen(skb);
+
+	mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(tp->pdev, mapping)) {
+		dev_kfree_skb(skb);
+		goto out_unlock;
+	}
+
+	tnapi->tx_buffers[entry].skb = skb;
+	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
+
+	would_hit_hwbug = 0;
+
+	if (tg3_flag(tp, 5701_DMA_BUG))
+		would_hit_hwbug = 1;
+
+	if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
+			  ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
+			    mss, vlan))
+		would_hit_hwbug = 1;
+
+	/* Now loop through additional data fragments, and queue them. */
+	if (skb_shinfo(skb)->nr_frags > 0) {
+		u32 tmp_mss = mss;
+
+		if (!tg3_flag(tp, HW_TSO_1) &&
+		    !tg3_flag(tp, HW_TSO_2) &&
+		    !tg3_flag(tp, HW_TSO_3))
+			tmp_mss = 0;
+
+		last = skb_shinfo(skb)->nr_frags - 1;
+		for (i = 0; i <= last; i++) {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+			len = frag->size;
+			mapping = pci_map_page(tp->pdev,
+					       frag->page,
+					       frag->page_offset,
+					       len, PCI_DMA_TODEVICE);
+
+			tnapi->tx_buffers[entry].skb = NULL;
+			dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
+					   mapping);
+			if (pci_dma_mapping_error(tp->pdev, mapping))
+				goto dma_error;
+
+			if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
+					    len, base_flags |
+					    ((i == last) ? TXD_FLAG_END : 0),
+					    tmp_mss, vlan))
+				would_hit_hwbug = 1;
+		}
+	}
+
+	if (would_hit_hwbug) {
+		tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+
+		/* If the workaround fails due to memory/mapping
+		 * failure, silently drop this packet.
+		 */
+		entry = tnapi->tx_prod;
+		budget = tg3_tx_avail(tnapi);
+		if (tigon3_dma_hwbug_workaround(tnapi, skb, &entry, &budget,
+						base_flags, mss, vlan))
+			goto out_unlock;
+	}
+
+	skb_tx_timestamp(skb);
+
+	/* Packets are ready, update Tx producer idx local and on card. */
+	tw32_tx_mbox(tnapi->prodmbox, entry);
+
+	tnapi->tx_prod = entry;
+	if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
+		netif_tx_stop_queue(txq);
+
+		/* netif_tx_stop_queue() must be done before checking
+		 * checking tx index in tg3_tx_avail() below, because in
+		 * tg3_tx(), we update tx index before checking for
+		 * netif_tx_queue_stopped().
+		 */
+		smp_mb();
+		if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
+			netif_tx_wake_queue(txq);
+	}
+
+out_unlock:
+	mmiowb();
+
+	return NETDEV_TX_OK;
+
+dma_error:
+	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+	dev_kfree_skb(skb);
+	tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
+	return NETDEV_TX_OK;
+}
+
+static void tg3_set_loopback(struct net_device *dev, u32 features)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (features & NETIF_F_LOOPBACK) {
+		if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
+			return;
+
+		/*
+		 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
+		 * loopback mode if Half-Duplex mode was negotiated earlier.
+		 */
+		tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
+
+		/* Enable internal MAC loopback mode */
+		tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
+		spin_lock_bh(&tp->lock);
+		tw32(MAC_MODE, tp->mac_mode);
+		netif_carrier_on(tp->dev);
+		spin_unlock_bh(&tp->lock);
+		netdev_info(dev, "Internal MAC loopback mode enabled.\n");
+	} else {
+		if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
+			return;
+
+		/* Disable internal MAC loopback mode */
+		tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
+		spin_lock_bh(&tp->lock);
+		tw32(MAC_MODE, tp->mac_mode);
+		/* Force link status check */
+		tg3_setup_phy(tp, 1);
+		spin_unlock_bh(&tp->lock);
+		netdev_info(dev, "Internal MAC loopback mode disabled.\n");
+	}
+}
+
+static u32 tg3_fix_features(struct net_device *dev, u32 features)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
+		features &= ~NETIF_F_ALL_TSO;
+
+	return features;
+}
+
+static int tg3_set_features(struct net_device *dev, u32 features)
+{
+	u32 changed = dev->features ^ features;
+
+	if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
+		tg3_set_loopback(dev, features);
+
+	return 0;
+}
+
+static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
+			       int new_mtu)
+{
+	dev->mtu = new_mtu;
+
+	if (new_mtu > ETH_DATA_LEN) {
+		if (tg3_flag(tp, 5780_CLASS)) {
+			netdev_update_features(dev);
+			tg3_flag_clear(tp, TSO_CAPABLE);
+		} else {
+			tg3_flag_set(tp, JUMBO_RING_ENABLE);
+		}
+	} else {
+		if (tg3_flag(tp, 5780_CLASS)) {
+			tg3_flag_set(tp, TSO_CAPABLE);
+			netdev_update_features(dev);
+		}
+		tg3_flag_clear(tp, JUMBO_RING_ENABLE);
+	}
+}
+
+static int tg3_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int err;
+
+	if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
+		return -EINVAL;
+
+	if (!netif_running(dev)) {
+		/* We'll just catch it later when the
+		 * device is up'd.
+		 */
+		tg3_set_mtu(dev, tp, new_mtu);
+		return 0;
+	}
+
+	tg3_phy_stop(tp);
+
+	tg3_netif_stop(tp);
+
+	tg3_full_lock(tp, 1);
+
+	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+
+	tg3_set_mtu(dev, tp, new_mtu);
+
+	err = tg3_restart_hw(tp, 0);
+
+	if (!err)
+		tg3_netif_start(tp);
+
+	tg3_full_unlock(tp);
+
+	if (!err)
+		tg3_phy_start(tp);
+
+	return err;
+}
+
+static void tg3_rx_prodring_free(struct tg3 *tp,
+				 struct tg3_rx_prodring_set *tpr)
+{
+	int i;
+
+	if (tpr != &tp->napi[0].prodring) {
+		for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
+		     i = (i + 1) & tp->rx_std_ring_mask)
+			tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+					tp->rx_pkt_map_sz);
+
+		if (tg3_flag(tp, JUMBO_CAPABLE)) {
+			for (i = tpr->rx_jmb_cons_idx;
+			     i != tpr->rx_jmb_prod_idx;
+			     i = (i + 1) & tp->rx_jmb_ring_mask) {
+				tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+						TG3_RX_JMB_MAP_SZ);
+			}
+		}
+
+		return;
+	}
+
+	for (i = 0; i <= tp->rx_std_ring_mask; i++)
+		tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+				tp->rx_pkt_map_sz);
+
+	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
+		for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
+			tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+					TG3_RX_JMB_MAP_SZ);
+	}
+}
+
+/* Initialize rx rings for packet processing.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver.  tp->{tx,}lock are held and thus
+ * we may not sleep.
+ */
+static int tg3_rx_prodring_alloc(struct tg3 *tp,
+				 struct tg3_rx_prodring_set *tpr)
+{
+	u32 i, rx_pkt_dma_sz;
+
+	tpr->rx_std_cons_idx = 0;
+	tpr->rx_std_prod_idx = 0;
+	tpr->rx_jmb_cons_idx = 0;
+	tpr->rx_jmb_prod_idx = 0;
+
+	if (tpr != &tp->napi[0].prodring) {
+		memset(&tpr->rx_std_buffers[0], 0,
+		       TG3_RX_STD_BUFF_RING_SIZE(tp));
+		if (tpr->rx_jmb_buffers)
+			memset(&tpr->rx_jmb_buffers[0], 0,
+			       TG3_RX_JMB_BUFF_RING_SIZE(tp));
+		goto done;
+	}
+
+	/* Zero out all descriptors. */
+	memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
+
+	rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
+	if (tg3_flag(tp, 5780_CLASS) &&
+	    tp->dev->mtu > ETH_DATA_LEN)
+		rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
+	tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
+
+	/* Initialize invariants of the rings, we only set this
+	 * stuff once.  This works because the card does not
+	 * write into the rx buffer posting rings.
+	 */
+	for (i = 0; i <= tp->rx_std_ring_mask; i++) {
+		struct tg3_rx_buffer_desc *rxd;
+
+		rxd = &tpr->rx_std[i];
+		rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
+		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
+		rxd->opaque = (RXD_OPAQUE_RING_STD |
+			       (i << RXD_OPAQUE_INDEX_SHIFT));
+	}
+
+	/* Now allocate fresh SKBs for each rx ring. */
+	for (i = 0; i < tp->rx_pending; i++) {
+		if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
+			netdev_warn(tp->dev,
+				    "Using a smaller RX standard ring. Only "
+				    "%d out of %d buffers were allocated "
+				    "successfully\n", i, tp->rx_pending);
+			if (i == 0)
+				goto initfail;
+			tp->rx_pending = i;
+			break;
+		}
+	}
+
+	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
+		goto done;
+
+	memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
+
+	if (!tg3_flag(tp, JUMBO_RING_ENABLE))
+		goto done;
+
+	for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
+		struct tg3_rx_buffer_desc *rxd;
+
+		rxd = &tpr->rx_jmb[i].std;
+		rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
+		rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
+				  RXD_FLAG_JUMBO;
+		rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
+		       (i << RXD_OPAQUE_INDEX_SHIFT));
+	}
+
+	for (i = 0; i < tp->rx_jumbo_pending; i++) {
+		if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
+			netdev_warn(tp->dev,
+				    "Using a smaller RX jumbo ring. Only %d "
+				    "out of %d buffers were allocated "
+				    "successfully\n", i, tp->rx_jumbo_pending);
+			if (i == 0)
+				goto initfail;
+			tp->rx_jumbo_pending = i;
+			break;
+		}
+	}
+
+done:
+	return 0;
+
+initfail:
+	tg3_rx_prodring_free(tp, tpr);
+	return -ENOMEM;
+}
+
+static void tg3_rx_prodring_fini(struct tg3 *tp,
+				 struct tg3_rx_prodring_set *tpr)
+{
+	kfree(tpr->rx_std_buffers);
+	tpr->rx_std_buffers = NULL;
+	kfree(tpr->rx_jmb_buffers);
+	tpr->rx_jmb_buffers = NULL;
+	if (tpr->rx_std) {
+		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
+				  tpr->rx_std, tpr->rx_std_mapping);
+		tpr->rx_std = NULL;
+	}
+	if (tpr->rx_jmb) {
+		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
+				  tpr->rx_jmb, tpr->rx_jmb_mapping);
+		tpr->rx_jmb = NULL;
+	}
+}
+
+static int tg3_rx_prodring_init(struct tg3 *tp,
+				struct tg3_rx_prodring_set *tpr)
+{
+	tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
+				      GFP_KERNEL);
+	if (!tpr->rx_std_buffers)
+		return -ENOMEM;
+
+	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
+					 TG3_RX_STD_RING_BYTES(tp),
+					 &tpr->rx_std_mapping,
+					 GFP_KERNEL);
+	if (!tpr->rx_std)
+		goto err_out;
+
+	if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
+		tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
+					      GFP_KERNEL);
+		if (!tpr->rx_jmb_buffers)
+			goto err_out;
+
+		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
+						 TG3_RX_JMB_RING_BYTES(tp),
+						 &tpr->rx_jmb_mapping,
+						 GFP_KERNEL);
+		if (!tpr->rx_jmb)
+			goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	tg3_rx_prodring_fini(tp, tpr);
+	return -ENOMEM;
+}
+
+/* Free up pending packets in all rx/tx rings.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver.  tp->{tx,}lock is not held and we are not
+ * in an interrupt context and thus may sleep.
+ */
+static void tg3_free_rings(struct tg3 *tp)
+{
+	int i, j;
+
+	for (j = 0; j < tp->irq_cnt; j++) {
+		struct tg3_napi *tnapi = &tp->napi[j];
+
+		tg3_rx_prodring_free(tp, &tnapi->prodring);
+
+		if (!tnapi->tx_buffers)
+			continue;
+
+		for (i = 0; i < TG3_TX_RING_SIZE; i++) {
+			struct sk_buff *skb = tnapi->tx_buffers[i].skb;
+
+			if (!skb)
+				continue;
+
+			tg3_tx_skb_unmap(tnapi, i, skb_shinfo(skb)->nr_frags);
+
+			dev_kfree_skb_any(skb);
+		}
+	}
+}
+
+/* Initialize tx/rx rings for packet processing.
+ *
+ * The chip has been shut down and the driver detached from
+ * the networking, so no interrupts or new tx packets will
+ * end up in the driver.  tp->{tx,}lock are held and thus
+ * we may not sleep.
+ */
+static int tg3_init_rings(struct tg3 *tp)
+{
+	int i;
+
+	/* Free up all the SKBs. */
+	tg3_free_rings(tp);
+
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		tnapi->last_tag = 0;
+		tnapi->last_irq_tag = 0;
+		tnapi->hw_status->status = 0;
+		tnapi->hw_status->status_tag = 0;
+		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+		tnapi->tx_prod = 0;
+		tnapi->tx_cons = 0;
+		if (tnapi->tx_ring)
+			memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
+
+		tnapi->rx_rcb_ptr = 0;
+		if (tnapi->rx_rcb)
+			memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+
+		if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
+			tg3_free_rings(tp);
+			return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.
+ */
+static void tg3_free_consistent(struct tg3 *tp)
+{
+	int i;
+
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		if (tnapi->tx_ring) {
+			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
+				tnapi->tx_ring, tnapi->tx_desc_mapping);
+			tnapi->tx_ring = NULL;
+		}
+
+		kfree(tnapi->tx_buffers);
+		tnapi->tx_buffers = NULL;
+
+		if (tnapi->rx_rcb) {
+			dma_free_coherent(&tp->pdev->dev,
+					  TG3_RX_RCB_RING_BYTES(tp),
+					  tnapi->rx_rcb,
+					  tnapi->rx_rcb_mapping);
+			tnapi->rx_rcb = NULL;
+		}
+
+		tg3_rx_prodring_fini(tp, &tnapi->prodring);
+
+		if (tnapi->hw_status) {
+			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
+					  tnapi->hw_status,
+					  tnapi->status_mapping);
+			tnapi->hw_status = NULL;
+		}
+	}
+
+	if (tp->hw_stats) {
+		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
+				  tp->hw_stats, tp->stats_mapping);
+		tp->hw_stats = NULL;
+	}
+}
+
+/*
+ * Must not be invoked with interrupt sources disabled and
+ * the hardware shutdown down.  Can sleep.
+ */
+static int tg3_alloc_consistent(struct tg3 *tp)
+{
+	int i;
+
+	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
+					  sizeof(struct tg3_hw_stats),
+					  &tp->stats_mapping,
+					  GFP_KERNEL);
+	if (!tp->hw_stats)
+		goto err_out;
+
+	memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+		struct tg3_hw_status *sblk;
+
+		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
+						      TG3_HW_STATUS_SIZE,
+						      &tnapi->status_mapping,
+						      GFP_KERNEL);
+		if (!tnapi->hw_status)
+			goto err_out;
+
+		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+		sblk = tnapi->hw_status;
+
+		if (tg3_rx_prodring_init(tp, &tnapi->prodring))
+			goto err_out;
+
+		/* If multivector TSS is enabled, vector 0 does not handle
+		 * tx interrupts.  Don't allocate any resources for it.
+		 */
+		if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
+		    (i && tg3_flag(tp, ENABLE_TSS))) {
+			tnapi->tx_buffers = kzalloc(
+					       sizeof(struct tg3_tx_ring_info) *
+					       TG3_TX_RING_SIZE, GFP_KERNEL);
+			if (!tnapi->tx_buffers)
+				goto err_out;
+
+			tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
+							    TG3_TX_RING_BYTES,
+							&tnapi->tx_desc_mapping,
+							    GFP_KERNEL);
+			if (!tnapi->tx_ring)
+				goto err_out;
+		}
+
+		/*
+		 * When RSS is enabled, the status block format changes
+		 * slightly.  The "rx_jumbo_consumer", "reserved",
+		 * and "rx_mini_consumer" members get mapped to the
+		 * other three rx return ring producer indexes.
+		 */
+		switch (i) {
+		default:
+			tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
+			break;
+		case 2:
+			tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
+			break;
+		case 3:
+			tnapi->rx_rcb_prod_idx = &sblk->reserved;
+			break;
+		case 4:
+			tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
+			break;
+		}
+
+		/*
+		 * If multivector RSS is enabled, vector 0 does not handle
+		 * rx or tx interrupts.  Don't allocate any resources for it.
+		 */
+		if (!i && tg3_flag(tp, ENABLE_RSS))
+			continue;
+
+		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
+						   TG3_RX_RCB_RING_BYTES(tp),
+						   &tnapi->rx_rcb_mapping,
+						   GFP_KERNEL);
+		if (!tnapi->rx_rcb)
+			goto err_out;
+
+		memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+	}
+
+	return 0;
+
+err_out:
+	tg3_free_consistent(tp);
+	return -ENOMEM;
+}
+
+#define MAX_WAIT_CNT 1000
+
+/* To stop a block, clear the enable bit and poll till it
+ * clears.  tp->lock is held.
+ */
+static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
+{
+	unsigned int i;
+	u32 val;
+
+	if (tg3_flag(tp, 5705_PLUS)) {
+		switch (ofs) {
+		case RCVLSC_MODE:
+		case DMAC_MODE:
+		case MBFREE_MODE:
+		case BUFMGR_MODE:
+		case MEMARB_MODE:
+			/* We can't enable/disable these bits of the
+			 * 5705/5750, just say success.
+			 */
+			return 0;
+
+		default:
+			break;
+		}
+	}
+
+	val = tr32(ofs);
+	val &= ~enable_bit;
+	tw32_f(ofs, val);
+
+	for (i = 0; i < MAX_WAIT_CNT; i++) {
+		udelay(100);
+		val = tr32(ofs);
+		if ((val & enable_bit) == 0)
+			break;
+	}
+
+	if (i == MAX_WAIT_CNT && !silent) {
+		dev_err(&tp->pdev->dev,
+			"tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
+			ofs, enable_bit);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/* tp->lock is held. */
+static int tg3_abort_hw(struct tg3 *tp, int silent)
+{
+	int i, err;
+
+	tg3_disable_ints(tp);
+
+	tp->rx_mode &= ~RX_MODE_ENABLE;
+	tw32_f(MAC_RX_MODE, tp->rx_mode);
+	udelay(10);
+
+	err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
+
+	err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
+
+	tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
+	tw32_f(MAC_MODE, tp->mac_mode);
+	udelay(40);
+
+	tp->tx_mode &= ~TX_MODE_ENABLE;
+	tw32_f(MAC_TX_MODE, tp->tx_mode);
+
+	for (i = 0; i < MAX_WAIT_CNT; i++) {
+		udelay(100);
+		if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
+			break;
+	}
+	if (i >= MAX_WAIT_CNT) {
+		dev_err(&tp->pdev->dev,
+			"%s timed out, TX_MODE_ENABLE will not clear "
+			"MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
+		err |= -ENODEV;
+	}
+
+	err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
+
+	tw32(FTQ_RESET, 0xffffffff);
+	tw32(FTQ_RESET, 0x00000000);
+
+	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
+	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
+
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+		if (tnapi->hw_status)
+			memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+	}
+	if (tp->hw_stats)
+		memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+
+	return err;
+}
+
+static void tg3_ape_send_event(struct tg3 *tp, u32 event)
+{
+	int i;
+	u32 apedata;
+
+	/* NCSI does not support APE events */
+	if (tg3_flag(tp, APE_HAS_NCSI))
+		return;
+
+	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+	if (apedata != APE_SEG_SIG_MAGIC)
+		return;
+
+	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+	if (!(apedata & APE_FW_STATUS_READY))
+		return;
+
+	/* Wait for up to 1 millisecond for APE to service previous event. */
+	for (i = 0; i < 10; i++) {
+		if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
+			return;
+
+		apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
+
+		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+			tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
+					event | APE_EVENT_STATUS_EVENT_PENDING);
+
+		tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
+
+		if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+			break;
+
+		udelay(100);
+	}
+
+	if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
+		tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
+}
+
+static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
+{
+	u32 event;
+	u32 apedata;
+
+	if (!tg3_flag(tp, ENABLE_APE))
+		return;
+
+	switch (kind) {
+	case RESET_KIND_INIT:
+		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
+				APE_HOST_SEG_SIG_MAGIC);
+		tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
+				APE_HOST_SEG_LEN_MAGIC);
+		apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
+		tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
+		tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
+			APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
+		tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
+				APE_HOST_BEHAV_NO_PHYLOCK);
+		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
+				    TG3_APE_HOST_DRVR_STATE_START);
+
+		event = APE_EVENT_STATUS_STATE_START;
+		break;
+	case RESET_KIND_SHUTDOWN:
+		/* With the interface we are currently using,
+		 * APE does not track driver state.  Wiping
+		 * out the HOST SEGMENT SIGNATURE forces
+		 * the APE to assume OS absent status.
+		 */
+		tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
+
+		if (device_may_wakeup(&tp->pdev->dev) &&
+		    tg3_flag(tp, WOL_ENABLE)) {
+			tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
+					    TG3_APE_HOST_WOL_SPEED_AUTO);
+			apedata = TG3_APE_HOST_DRVR_STATE_WOL;
+		} else
+			apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
+
+		tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
+
+		event = APE_EVENT_STATUS_STATE_UNLOAD;
+		break;
+	case RESET_KIND_SUSPEND:
+		event = APE_EVENT_STATUS_STATE_SUSPEND;
+		break;
+	default:
+		return;
+	}
+
+	event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
+
+	tg3_ape_send_event(tp, event);
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
+{
+	tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
+		      NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
+
+	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
+		switch (kind) {
+		case RESET_KIND_INIT:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_START);
+			break;
+
+		case RESET_KIND_SHUTDOWN:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_UNLOAD);
+			break;
+
+		case RESET_KIND_SUSPEND:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_SUSPEND);
+			break;
+
+		default:
+			break;
+		}
+	}
+
+	if (kind == RESET_KIND_INIT ||
+	    kind == RESET_KIND_SUSPEND)
+		tg3_ape_driver_state_change(tp, kind);
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
+{
+	if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
+		switch (kind) {
+		case RESET_KIND_INIT:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_START_DONE);
+			break;
+
+		case RESET_KIND_SHUTDOWN:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_UNLOAD_DONE);
+			break;
+
+		default:
+			break;
+		}
+	}
+
+	if (kind == RESET_KIND_SHUTDOWN)
+		tg3_ape_driver_state_change(tp, kind);
+}
+
+/* tp->lock is held. */
+static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
+{
+	if (tg3_flag(tp, ENABLE_ASF)) {
+		switch (kind) {
+		case RESET_KIND_INIT:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_START);
+			break;
+
+		case RESET_KIND_SHUTDOWN:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_UNLOAD);
+			break;
+
+		case RESET_KIND_SUSPEND:
+			tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
+				      DRV_STATE_SUSPEND);
+			break;
+
+		default:
+			break;
+		}
+	}
+}
+
+static int tg3_poll_fw(struct tg3 *tp)
+{
+	int i;
+	u32 val;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+		/* Wait up to 20ms for init done. */
+		for (i = 0; i < 200; i++) {
+			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
+				return 0;
+			udelay(100);
+		}
+		return -ENODEV;
+	}
+
+	/* Wait for firmware initialization to complete. */
+	for (i = 0; i < 100000; i++) {
+		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
+		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+			break;
+		udelay(10);
+	}
+
+	/* Chip might not be fitted with firmware.  Some Sun onboard
+	 * parts are configured like that.  So don't signal the timeout
+	 * of the above loop as an error, but do report the lack of
+	 * running firmware once.
+	 */
+	if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
+		tg3_flag_set(tp, NO_FWARE_REPORTED);
+
+		netdev_info(tp->dev, "No firmware running\n");
+	}
+
+	if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+		/* The 57765 A0 needs a little more
+		 * time to do some important work.
+		 */
+		mdelay(10);
+	}
+
+	return 0;
+}
+
+/* Save PCI command register before chip reset */
+static void tg3_save_pci_state(struct tg3 *tp)
+{
+	pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
+}
+
+/* Restore PCI state after chip reset */
+static void tg3_restore_pci_state(struct tg3 *tp)
+{
+	u32 val;
+
+	/* Re-enable indirect register accesses. */
+	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+			       tp->misc_host_ctrl);
+
+	/* Set MAX PCI retry to zero. */
+	val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+	    tg3_flag(tp, PCIX_MODE))
+		val |= PCISTATE_RETRY_SAME_DMA;
+	/* Allow reads and writes to the APE register and memory space. */
+	if (tg3_flag(tp, ENABLE_APE))
+		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
+		       PCISTATE_ALLOW_APE_SHMEM_WR |
+		       PCISTATE_ALLOW_APE_PSPACE_WR;
+	pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
+
+	pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
+		if (tg3_flag(tp, PCI_EXPRESS))
+			pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+		else {
+			pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+					      tp->pci_cacheline_sz);
+			pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+					      tp->pci_lat_timer);
+		}
+	}
+
+	/* Make sure PCI-X relaxed ordering bit is clear. */
+	if (tg3_flag(tp, PCIX_MODE)) {
+		u16 pcix_cmd;
+
+		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+				     &pcix_cmd);
+		pcix_cmd &= ~PCI_X_CMD_ERO;
+		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+				      pcix_cmd);
+	}
+
+	if (tg3_flag(tp, 5780_CLASS)) {
+
+		/* Chip reset on 5780 will reset MSI enable bit,
+		 * so need to restore it.
+		 */
+		if (tg3_flag(tp, USING_MSI)) {
+			u16 ctrl;
+
+			pci_read_config_word(tp->pdev,
+					     tp->msi_cap + PCI_MSI_FLAGS,
+					     &ctrl);
+			pci_write_config_word(tp->pdev,
+					      tp->msi_cap + PCI_MSI_FLAGS,
+					      ctrl | PCI_MSI_FLAGS_ENABLE);
+			val = tr32(MSGINT_MODE);
+			tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
+		}
+	}
+}
+
+static void tg3_stop_fw(struct tg3 *);
+
+/* tp->lock is held. */
+static int tg3_chip_reset(struct tg3 *tp)
+{
+	u32 val;
+	void (*write_op)(struct tg3 *, u32, u32);
+	int i, err;
+
+	tg3_nvram_lock(tp);
+
+	tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
+
+	/* No matching tg3_nvram_unlock() after this because
+	 * chip reset below will undo the nvram lock.
+	 */
+	tp->nvram_lock_cnt = 0;
+
+	/* GRC_MISC_CFG core clock reset will clear the memory
+	 * enable bit in PCI register 4 and the MSI enable bit
+	 * on some chips, so we save relevant registers here.
+	 */
+	tg3_save_pci_state(tp);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+	    tg3_flag(tp, 5755_PLUS))
+		tw32(GRC_FASTBOOT_PC, 0);
+
+	/*
+	 * We must avoid the readl() that normally takes place.
+	 * It locks machines, causes machine checks, and other
+	 * fun things.  So, temporarily disable the 5701
+	 * hardware workaround, while we do the reset.
+	 */
+	write_op = tp->write32;
+	if (write_op == tg3_write_flush_reg32)
+		tp->write32 = tg3_write32;
+
+	/* Prevent the irq handler from reading or writing PCI registers
+	 * during chip reset when the memory enable bit in the PCI command
+	 * register may be cleared.  The chip does not generate interrupt
+	 * at this time, but the irq handler may still be called due to irq
+	 * sharing or irqpoll.
+	 */
+	tg3_flag_set(tp, CHIP_RESETTING);
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+		if (tnapi->hw_status) {
+			tnapi->hw_status->status = 0;
+			tnapi->hw_status->status_tag = 0;
+		}
+		tnapi->last_tag = 0;
+		tnapi->last_irq_tag = 0;
+	}
+	smp_mb();
+
+	for (i = 0; i < tp->irq_cnt; i++)
+		synchronize_irq(tp->napi[i].irq_vec);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
+		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
+	}
+
+	/* do the reset */
+	val = GRC_MISC_CFG_CORECLK_RESET;
+
+	if (tg3_flag(tp, PCI_EXPRESS)) {
+		/* Force PCIe 1.0a mode */
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+		    !tg3_flag(tp, 57765_PLUS) &&
+		    tr32(TG3_PCIE_PHY_TSTCTL) ==
+		    (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
+			tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
+
+		if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
+			tw32(GRC_MISC_CFG, (1 << 29));
+			val |= (1 << 29);
+		}
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+		tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
+		tw32(GRC_VCPU_EXT_CTRL,
+		     tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
+	}
+
+	/* Manage gphy power for all CPMU absent PCIe devices. */
+	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
+		val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
+
+	tw32(GRC_MISC_CFG, val);
+
+	/* restore 5701 hardware bug workaround write method */
+	tp->write32 = write_op;
+
+	/* Unfortunately, we have to delay before the PCI read back.
+	 * Some 575X chips even will not respond to a PCI cfg access
+	 * when the reset command is given to the chip.
+	 *
+	 * How do these hardware designers expect things to work
+	 * properly if the PCI write is posted for a long period
+	 * of time?  It is always necessary to have some method by
+	 * which a register read back can occur to push the write
+	 * out which does the reset.
+	 *
+	 * For most tg3 variants the trick below was working.
+	 * Ho hum...
+	 */
+	udelay(120);
+
+	/* Flush PCI posted writes.  The normal MMIO registers
+	 * are inaccessible at this time so this is the only
+	 * way to make this reliably (actually, this is no longer
+	 * the case, see above).  I tried to use indirect
+	 * register read/write but this upset some 5701 variants.
+	 */
+	pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
+
+	udelay(120);
+
+	if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
+		u16 val16;
+
+		if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
+			int i;
+			u32 cfg_val;
+
+			/* Wait for link training to complete.  */
+			for (i = 0; i < 5000; i++)
+				udelay(100);
+
+			pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
+			pci_write_config_dword(tp->pdev, 0xc4,
+					       cfg_val | (1 << 15));
+		}
+
+		/* Clear the "no snoop" and "relaxed ordering" bits. */
+		pci_read_config_word(tp->pdev,
+				     pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
+				     &val16);
+		val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
+			   PCI_EXP_DEVCTL_NOSNOOP_EN);
+		/*
+		 * Older PCIe devices only support the 128 byte
+		 * MPS setting.  Enforce the restriction.
+		 */
+		if (!tg3_flag(tp, CPMU_PRESENT))
+			val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
+		pci_write_config_word(tp->pdev,
+				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
+				      val16);
+
+		pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+
+		/* Clear error status */
+		pci_write_config_word(tp->pdev,
+				      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
+				      PCI_EXP_DEVSTA_CED |
+				      PCI_EXP_DEVSTA_NFED |
+				      PCI_EXP_DEVSTA_FED |
+				      PCI_EXP_DEVSTA_URD);
+	}
+
+	tg3_restore_pci_state(tp);
+
+	tg3_flag_clear(tp, CHIP_RESETTING);
+	tg3_flag_clear(tp, ERROR_PROCESSED);
+
+	val = 0;
+	if (tg3_flag(tp, 5780_CLASS))
+		val = tr32(MEMARB_MODE);
+	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
+		tg3_stop_fw(tp);
+		tw32(0x5000, 0x400);
+	}
+
+	tw32(GRC_MODE, tp->grc_mode);
+
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
+		val = tr32(0xc4);
+
+		tw32(0xc4, val | (1 << 15));
+	}
+
+	if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+		tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
+		if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
+			tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
+		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
+	}
+
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
+		val = tp->mac_mode;
+	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
+		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
+		val = tp->mac_mode;
+	} else
+		val = 0;
+
+	tw32_f(MAC_MODE, val);
+	udelay(40);
+
+	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
+
+	err = tg3_poll_fw(tp);
+	if (err)
+		return err;
+
+	tg3_mdio_start(tp);
+
+	if (tg3_flag(tp, PCI_EXPRESS) &&
+	    tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+	    !tg3_flag(tp, 57765_PLUS)) {
+		val = tr32(0x7c00);
+
+		tw32(0x7c00, val | (1 << 25));
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+		val = tr32(TG3_CPMU_CLCK_ORIDE);
+		tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
+	}
+
+	/* Reprobe ASF enable state.  */
+	tg3_flag_clear(tp, ENABLE_ASF);
+	tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
+	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
+	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
+		u32 nic_cfg;
+
+		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
+		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
+			tg3_flag_set(tp, ENABLE_ASF);
+			tp->last_event_jiffies = jiffies;
+			if (tg3_flag(tp, 5750_PLUS))
+				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
+		}
+	}
+
+	return 0;
+}
+
+/* tp->lock is held. */
+static void tg3_stop_fw(struct tg3 *tp)
+{
+	if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
+		/* Wait for RX cpu to ACK the previous event. */
+		tg3_wait_for_event_ack(tp);
+
+		tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
+
+		tg3_generate_fw_event(tp);
+
+		/* Wait for RX cpu to ACK this event. */
+		tg3_wait_for_event_ack(tp);
+	}
+}
+
+/* tp->lock is held. */
+static int tg3_halt(struct tg3 *tp, int kind, int silent)
+{
+	int err;
+
+	tg3_stop_fw(tp);
+
+	tg3_write_sig_pre_reset(tp, kind);
+
+	tg3_abort_hw(tp, silent);
+	err = tg3_chip_reset(tp);
+
+	__tg3_set_mac_addr(tp, 0);
+
+	tg3_write_sig_legacy(tp, kind);
+	tg3_write_sig_post_reset(tp, kind);
+
+	if (err)
+		return err;
+
+	return 0;
+}
+
+#define RX_CPU_SCRATCH_BASE	0x30000
+#define RX_CPU_SCRATCH_SIZE	0x04000
+#define TX_CPU_SCRATCH_BASE	0x34000
+#define TX_CPU_SCRATCH_SIZE	0x04000
+
+/* tp->lock is held. */
+static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
+{
+	int i;
+
+	BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+		u32 val = tr32(GRC_VCPU_EXT_CTRL);
+
+		tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
+		return 0;
+	}
+	if (offset == RX_CPU_BASE) {
+		for (i = 0; i < 10000; i++) {
+			tw32(offset + CPU_STATE, 0xffffffff);
+			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
+			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
+				break;
+		}
+
+		tw32(offset + CPU_STATE, 0xffffffff);
+		tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
+		udelay(10);
+	} else {
+		for (i = 0; i < 10000; i++) {
+			tw32(offset + CPU_STATE, 0xffffffff);
+			tw32(offset + CPU_MODE,  CPU_MODE_HALT);
+			if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
+				break;
+		}
+	}
+
+	if (i >= 10000) {
+		netdev_err(tp->dev, "%s timed out, %s CPU\n",
+			   __func__, offset == RX_CPU_BASE ? "RX" : "TX");
+		return -ENODEV;
+	}
+
+	/* Clear firmware's nvram arbitration. */
+	if (tg3_flag(tp, NVRAM))
+		tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
+	return 0;
+}
+
+struct fw_info {
+	unsigned int fw_base;
+	unsigned int fw_len;
+	const __be32 *fw_data;
+};
+
+/* tp->lock is held. */
+static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
+				 int cpu_scratch_size, struct fw_info *info)
+{
+	int err, lock_err, i;
+	void (*write_op)(struct tg3 *, u32, u32);
+
+	if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
+		netdev_err(tp->dev,
+			   "%s: Trying to load TX cpu firmware which is 5705\n",
+			   __func__);
+		return -EINVAL;
+	}
+
+	if (tg3_flag(tp, 5705_PLUS))
+		write_op = tg3_write_mem;
+	else
+		write_op = tg3_write_indirect_reg32;
+
+	/* It is possible that bootcode is still loading at this point.
+	 * Get the nvram lock first before halting the cpu.
+	 */
+	lock_err = tg3_nvram_lock(tp);
+	err = tg3_halt_cpu(tp, cpu_base);
+	if (!lock_err)
+		tg3_nvram_unlock(tp);
+	if (err)
+		goto out;
+
+	for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
+		write_op(tp, cpu_scratch_base + i, 0);
+	tw32(cpu_base + CPU_STATE, 0xffffffff);
+	tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
+	for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
+		write_op(tp, (cpu_scratch_base +
+			      (info->fw_base & 0xffff) +
+			      (i * sizeof(u32))),
+			      be32_to_cpu(info->fw_data[i]));
+
+	err = 0;
+
+out:
+	return err;
+}
+
+/* tp->lock is held. */
+static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
+{
+	struct fw_info info;
+	const __be32 *fw_data;
+	int err, i;
+
+	fw_data = (void *)tp->fw->data;
+
+	/* Firmware blob starts with version numbers, followed by
+	   start address and length. We are setting complete length.
+	   length = end_address_of_bss - start_address_of_text.
+	   Remainder is the blob to be loaded contiguously
+	   from start address. */
+
+	info.fw_base = be32_to_cpu(fw_data[1]);
+	info.fw_len = tp->fw->size - 12;
+	info.fw_data = &fw_data[3];
+
+	err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
+				    RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
+				    &info);
+	if (err)
+		return err;
+
+	err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
+				    TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
+				    &info);
+	if (err)
+		return err;
+
+	/* Now startup only the RX cpu. */
+	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+	tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
+
+	for (i = 0; i < 5; i++) {
+		if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
+			break;
+		tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+		tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
+		tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
+		udelay(1000);
+	}
+	if (i >= 5) {
+		netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
+			   "should be %08x\n", __func__,
+			   tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
+		return -ENODEV;
+	}
+	tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
+	tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
+
+	return 0;
+}
+
+/* tp->lock is held. */
+static int tg3_load_tso_firmware(struct tg3 *tp)
+{
+	struct fw_info info;
+	const __be32 *fw_data;
+	unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
+	int err, i;
+
+	if (tg3_flag(tp, HW_TSO_1) ||
+	    tg3_flag(tp, HW_TSO_2) ||
+	    tg3_flag(tp, HW_TSO_3))
+		return 0;
+
+	fw_data = (void *)tp->fw->data;
+
+	/* Firmware blob starts with version numbers, followed by
+	   start address and length. We are setting complete length.
+	   length = end_address_of_bss - start_address_of_text.
+	   Remainder is the blob to be loaded contiguously
+	   from start address. */
+
+	info.fw_base = be32_to_cpu(fw_data[1]);
+	cpu_scratch_size = tp->fw_len;
+	info.fw_len = tp->fw->size - 12;
+	info.fw_data = &fw_data[3];
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+		cpu_base = RX_CPU_BASE;
+		cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
+	} else {
+		cpu_base = TX_CPU_BASE;
+		cpu_scratch_base = TX_CPU_SCRATCH_BASE;
+		cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
+	}
+
+	err = tg3_load_firmware_cpu(tp, cpu_base,
+				    cpu_scratch_base, cpu_scratch_size,
+				    &info);
+	if (err)
+		return err;
+
+	/* Now startup the cpu. */
+	tw32(cpu_base + CPU_STATE, 0xffffffff);
+	tw32_f(cpu_base + CPU_PC, info.fw_base);
+
+	for (i = 0; i < 5; i++) {
+		if (tr32(cpu_base + CPU_PC) == info.fw_base)
+			break;
+		tw32(cpu_base + CPU_STATE, 0xffffffff);
+		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
+		tw32_f(cpu_base + CPU_PC, info.fw_base);
+		udelay(1000);
+	}
+	if (i >= 5) {
+		netdev_err(tp->dev,
+			   "%s fails to set CPU PC, is %08x should be %08x\n",
+			   __func__, tr32(cpu_base + CPU_PC), info.fw_base);
+		return -ENODEV;
+	}
+	tw32(cpu_base + CPU_STATE, 0xffffffff);
+	tw32_f(cpu_base + CPU_MODE,  0x00000000);
+	return 0;
+}
+
+
+static int tg3_set_mac_addr(struct net_device *dev, void *p)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	struct sockaddr *addr = p;
+	int err = 0, skip_mac_1 = 0;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EINVAL;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	if (!netif_running(dev))
+		return 0;
+
+	if (tg3_flag(tp, ENABLE_ASF)) {
+		u32 addr0_high, addr0_low, addr1_high, addr1_low;
+
+		addr0_high = tr32(MAC_ADDR_0_HIGH);
+		addr0_low = tr32(MAC_ADDR_0_LOW);
+		addr1_high = tr32(MAC_ADDR_1_HIGH);
+		addr1_low = tr32(MAC_ADDR_1_LOW);
+
+		/* Skip MAC addr 1 if ASF is using it. */
+		if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
+		    !(addr1_high == 0 && addr1_low == 0))
+			skip_mac_1 = 1;
+	}
+	spin_lock_bh(&tp->lock);
+	__tg3_set_mac_addr(tp, skip_mac_1);
+	spin_unlock_bh(&tp->lock);
+
+	return err;
+}
+
+/* tp->lock is held. */
+static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
+			   dma_addr_t mapping, u32 maxlen_flags,
+			   u32 nic_addr)
+{
+	tg3_write_mem(tp,
+		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
+		      ((u64) mapping >> 32));
+	tg3_write_mem(tp,
+		      (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
+		      ((u64) mapping & 0xffffffff));
+	tg3_write_mem(tp,
+		      (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
+		       maxlen_flags);
+
+	if (!tg3_flag(tp, 5705_PLUS))
+		tg3_write_mem(tp,
+			      (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
+			      nic_addr);
+}
+
+static void __tg3_set_rx_mode(struct net_device *);
+static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
+{
+	int i;
+
+	if (!tg3_flag(tp, ENABLE_TSS)) {
+		tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
+		tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
+		tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
+	} else {
+		tw32(HOSTCC_TXCOL_TICKS, 0);
+		tw32(HOSTCC_TXMAX_FRAMES, 0);
+		tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+	}
+
+	if (!tg3_flag(tp, ENABLE_RSS)) {
+		tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
+		tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
+		tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
+	} else {
+		tw32(HOSTCC_RXCOL_TICKS, 0);
+		tw32(HOSTCC_RXMAX_FRAMES, 0);
+		tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
+	}
+
+	if (!tg3_flag(tp, 5705_PLUS)) {
+		u32 val = ec->stats_block_coalesce_usecs;
+
+		tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
+		tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
+
+		if (!netif_carrier_ok(tp->dev))
+			val = 0;
+
+		tw32(HOSTCC_STAT_COAL_TICKS, val);
+	}
+
+	for (i = 0; i < tp->irq_cnt - 1; i++) {
+		u32 reg;
+
+		reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
+		tw32(reg, ec->rx_coalesce_usecs);
+		reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
+		tw32(reg, ec->rx_max_coalesced_frames);
+		reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
+		tw32(reg, ec->rx_max_coalesced_frames_irq);
+
+		if (tg3_flag(tp, ENABLE_TSS)) {
+			reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
+			tw32(reg, ec->tx_coalesce_usecs);
+			reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
+			tw32(reg, ec->tx_max_coalesced_frames);
+			reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
+			tw32(reg, ec->tx_max_coalesced_frames_irq);
+		}
+	}
+
+	for (; i < tp->irq_max - 1; i++) {
+		tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
+		tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
+		tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+
+		if (tg3_flag(tp, ENABLE_TSS)) {
+			tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
+			tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
+			tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+		}
+	}
+}
+
+/* tp->lock is held. */
+static void tg3_rings_reset(struct tg3 *tp)
+{
+	int i;
+	u32 stblk, txrcb, rxrcb, limit;
+	struct tg3_napi *tnapi = &tp->napi[0];
+
+	/* Disable all transmit rings but the first. */
+	if (!tg3_flag(tp, 5705_PLUS))
+		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
+	else if (tg3_flag(tp, 5717_PLUS))
+		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
+	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
+	else
+		limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
+
+	for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
+	     txrcb < limit; txrcb += TG3_BDINFO_SIZE)
+		tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
+			      BDINFO_FLAGS_DISABLED);
+
+
+	/* Disable all receive return rings but the first. */
+	if (tg3_flag(tp, 5717_PLUS))
+		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
+	else if (!tg3_flag(tp, 5705_PLUS))
+		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
+	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
+	else
+		limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
+
+	for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
+	     rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
+		tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
+			      BDINFO_FLAGS_DISABLED);
+
+	/* Disable interrupts */
+	tw32_mailbox_f(tp->napi[0].int_mbox, 1);
+	tp->napi[0].chk_msi_cnt = 0;
+	tp->napi[0].last_rx_cons = 0;
+	tp->napi[0].last_tx_cons = 0;
+
+	/* Zero mailbox registers. */
+	if (tg3_flag(tp, SUPPORT_MSIX)) {
+		for (i = 1; i < tp->irq_max; i++) {
+			tp->napi[i].tx_prod = 0;
+			tp->napi[i].tx_cons = 0;
+			if (tg3_flag(tp, ENABLE_TSS))
+				tw32_mailbox(tp->napi[i].prodmbox, 0);
+			tw32_rx_mbox(tp->napi[i].consmbox, 0);
+			tw32_mailbox_f(tp->napi[i].int_mbox, 1);
+			tp->napi[0].chk_msi_cnt = 0;
+			tp->napi[i].last_rx_cons = 0;
+			tp->napi[i].last_tx_cons = 0;
+		}
+		if (!tg3_flag(tp, ENABLE_TSS))
+			tw32_mailbox(tp->napi[0].prodmbox, 0);
+	} else {
+		tp->napi[0].tx_prod = 0;
+		tp->napi[0].tx_cons = 0;
+		tw32_mailbox(tp->napi[0].prodmbox, 0);
+		tw32_rx_mbox(tp->napi[0].consmbox, 0);
+	}
+
+	/* Make sure the NIC-based send BD rings are disabled. */
+	if (!tg3_flag(tp, 5705_PLUS)) {
+		u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+		for (i = 0; i < 16; i++)
+			tw32_tx_mbox(mbox + i * 8, 0);
+	}
+
+	txrcb = NIC_SRAM_SEND_RCB;
+	rxrcb = NIC_SRAM_RCV_RET_RCB;
+
+	/* Clear status block in ram. */
+	memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+	/* Set status block DMA address */
+	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
+	     ((u64) tnapi->status_mapping >> 32));
+	tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
+	     ((u64) tnapi->status_mapping & 0xffffffff));
+
+	if (tnapi->tx_ring) {
+		tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
+			       (TG3_TX_RING_SIZE <<
+				BDINFO_FLAGS_MAXLEN_SHIFT),
+			       NIC_SRAM_TX_BUFFER_DESC);
+		txrcb += TG3_BDINFO_SIZE;
+	}
+
+	if (tnapi->rx_rcb) {
+		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
+			       (tp->rx_ret_ring_mask + 1) <<
+				BDINFO_FLAGS_MAXLEN_SHIFT, 0);
+		rxrcb += TG3_BDINFO_SIZE;
+	}
+
+	stblk = HOSTCC_STATBLCK_RING1;
+
+	for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
+		u64 mapping = (u64)tnapi->status_mapping;
+		tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
+		tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
+
+		/* Clear status block in ram. */
+		memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+		if (tnapi->tx_ring) {
+			tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
+				       (TG3_TX_RING_SIZE <<
+					BDINFO_FLAGS_MAXLEN_SHIFT),
+				       NIC_SRAM_TX_BUFFER_DESC);
+			txrcb += TG3_BDINFO_SIZE;
+		}
+
+		tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
+			       ((tp->rx_ret_ring_mask + 1) <<
+				BDINFO_FLAGS_MAXLEN_SHIFT), 0);
+
+		stblk += 8;
+		rxrcb += TG3_BDINFO_SIZE;
+	}
+}
+
+static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
+{
+	u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
+
+	if (!tg3_flag(tp, 5750_PLUS) ||
+	    tg3_flag(tp, 5780_CLASS) ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
+	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
+		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
+	else
+		bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
+
+	nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
+	host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
+
+	val = min(nic_rep_thresh, host_rep_thresh);
+	tw32(RCVBDI_STD_THRESH, val);
+
+	if (tg3_flag(tp, 57765_PLUS))
+		tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
+
+	if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
+		return;
+
+	if (!tg3_flag(tp, 5705_PLUS))
+		bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
+	else
+		bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
+
+	host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
+
+	val = min(bdcache_maxcnt / 2, host_rep_thresh);
+	tw32(RCVBDI_JUMBO_THRESH, val);
+
+	if (tg3_flag(tp, 57765_PLUS))
+		tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
+}
+
+/* tp->lock is held. */
+static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
+{
+	u32 val, rdmac_mode;
+	int i, err, limit;
+	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
+
+	tg3_disable_ints(tp);
+
+	tg3_stop_fw(tp);
+
+	tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
+
+	if (tg3_flag(tp, INIT_COMPLETE))
+		tg3_abort_hw(tp, 1);
+
+	/* Enable MAC control of LPI */
+	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
+		tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
+		       TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
+		       TG3_CPMU_EEE_LNKIDL_UART_IDL);
+
+		tw32_f(TG3_CPMU_EEE_CTRL,
+		       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
+
+		val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
+		      TG3_CPMU_EEEMD_LPI_IN_TX |
+		      TG3_CPMU_EEEMD_LPI_IN_RX |
+		      TG3_CPMU_EEEMD_EEE_ENABLE;
+
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+			val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
+
+		if (tg3_flag(tp, ENABLE_APE))
+			val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
+
+		tw32_f(TG3_CPMU_EEE_MODE, val);
+
+		tw32_f(TG3_CPMU_EEE_DBTMR1,
+		       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
+		       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
+
+		tw32_f(TG3_CPMU_EEE_DBTMR2,
+		       TG3_CPMU_DBTMR2_APE_TX_2047US |
+		       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
+	}
+
+	if (reset_phy)
+		tg3_phy_reset(tp);
+
+	err = tg3_chip_reset(tp);
+	if (err)
+		return err;
+
+	tg3_write_sig_legacy(tp, RESET_KIND_INIT);
+
+	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
+		val = tr32(TG3_CPMU_CTRL);
+		val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
+		tw32(TG3_CPMU_CTRL, val);
+
+		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
+		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
+		val |= CPMU_LSPD_10MB_MACCLK_6_25;
+		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
+
+		val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
+		val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
+		val |= CPMU_LNK_AWARE_MACCLK_6_25;
+		tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
+
+		val = tr32(TG3_CPMU_HST_ACC);
+		val &= ~CPMU_HST_ACC_MACCLK_MASK;
+		val |= CPMU_HST_ACC_MACCLK_6_25;
+		tw32(TG3_CPMU_HST_ACC, val);
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+		val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
+		val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
+		       PCIE_PWR_MGMT_L1_THRESH_4MS;
+		tw32(PCIE_PWR_MGMT_THRESH, val);
+
+		val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
+		tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
+
+		tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
+
+		val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
+		tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
+	}
+
+	if (tg3_flag(tp, L1PLLPD_EN)) {
+		u32 grc_mode = tr32(GRC_MODE);
+
+		/* Access the lower 1K of PL PCIE block registers. */
+		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
+		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
+		     val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
+
+		tw32(GRC_MODE, grc_mode);
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+			u32 grc_mode = tr32(GRC_MODE);
+
+			/* Access the lower 1K of PL PCIE block registers. */
+			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+			val = tr32(TG3_PCIE_TLDLPL_PORT +
+				   TG3_PCIE_PL_LO_PHYCTL5);
+			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
+			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
+
+			tw32(GRC_MODE, grc_mode);
+		}
+
+		if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
+			u32 grc_mode = tr32(GRC_MODE);
+
+			/* Access the lower 1K of DL PCIE block registers. */
+			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+			tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
+
+			val = tr32(TG3_PCIE_TLDLPL_PORT +
+				   TG3_PCIE_DL_LO_FTSMAX);
+			val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
+			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
+			     val | TG3_PCIE_DL_LO_FTSMAX_VAL);
+
+			tw32(GRC_MODE, grc_mode);
+		}
+
+		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
+		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
+		val |= CPMU_LSPD_10MB_MACCLK_6_25;
+		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
+	}
+
+	/* This works around an issue with Athlon chipsets on
+	 * B3 tigon3 silicon.  This bit has no effect on any
+	 * other revision.  But do not set this on PCI Express
+	 * chips and don't even touch the clocks if the CPMU is present.
+	 */
+	if (!tg3_flag(tp, CPMU_PRESENT)) {
+		if (!tg3_flag(tp, PCI_EXPRESS))
+			tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
+		tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
+	}
+
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
+	    tg3_flag(tp, PCIX_MODE)) {
+		val = tr32(TG3PCI_PCISTATE);
+		val |= PCISTATE_RETRY_SAME_DMA;
+		tw32(TG3PCI_PCISTATE, val);
+	}
+
+	if (tg3_flag(tp, ENABLE_APE)) {
+		/* Allow reads and writes to the
+		 * APE register and memory space.
+		 */
+		val = tr32(TG3PCI_PCISTATE);
+		val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
+		       PCISTATE_ALLOW_APE_SHMEM_WR |
+		       PCISTATE_ALLOW_APE_PSPACE_WR;
+		tw32(TG3PCI_PCISTATE, val);
+	}
+
+	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
+		/* Enable some hw fixes.  */
+		val = tr32(TG3PCI_MSI_DATA);
+		val |= (1 << 26) | (1 << 28) | (1 << 29);
+		tw32(TG3PCI_MSI_DATA, val);
+	}
+
+	/* Descriptor ring init may make accesses to the
+	 * NIC SRAM area to setup the TX descriptors, so we
+	 * can only do this after the hardware has been
+	 * successfully reset.
+	 */
+	err = tg3_init_rings(tp);
+	if (err)
+		return err;
+
+	if (tg3_flag(tp, 57765_PLUS)) {
+		val = tr32(TG3PCI_DMA_RW_CTRL) &
+		      ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
+			val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
+		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+			val |= DMA_RWCTRL_TAGGED_STAT_WA;
+		tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
+	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
+		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
+		/* This value is determined during the probe time DMA
+		 * engine test, tg3_test_dma.
+		 */
+		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+	}
+
+	tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
+			  GRC_MODE_4X_NIC_SEND_RINGS |
+			  GRC_MODE_NO_TX_PHDR_CSUM |
+			  GRC_MODE_NO_RX_PHDR_CSUM);
+	tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
+
+	/* Pseudo-header checksum is done by hardware logic and not
+	 * the offload processers, so make the chip do the pseudo-
+	 * header checksums on receive.  For transmit it is more
+	 * convenient to do the pseudo-header checksum in software
+	 * as Linux does that on transmit for us in all cases.
+	 */
+	tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
+
+	tw32(GRC_MODE,
+	     tp->grc_mode |
+	     (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
+
+	/* Setup the timer prescalar register.  Clock is always 66Mhz. */
+	val = tr32(GRC_MISC_CFG);
+	val &= ~0xff;
+	val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
+	tw32(GRC_MISC_CFG, val);
+
+	/* Initialize MBUF/DESC pool. */
+	if (tg3_flag(tp, 5750_PLUS)) {
+		/* Do nothing.  */
+	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
+		tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
+		else
+			tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
+		tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
+		tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
+	} else if (tg3_flag(tp, TSO_CAPABLE)) {
+		int fw_len;
+
+		fw_len = tp->fw_len;
+		fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
+		tw32(BUFMGR_MB_POOL_ADDR,
+		     NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
+		tw32(BUFMGR_MB_POOL_SIZE,
+		     NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
+	}
+
+	if (tp->dev->mtu <= ETH_DATA_LEN) {
+		tw32(BUFMGR_MB_RDMA_LOW_WATER,
+		     tp->bufmgr_config.mbuf_read_dma_low_water);
+		tw32(BUFMGR_MB_MACRX_LOW_WATER,
+		     tp->bufmgr_config.mbuf_mac_rx_low_water);
+		tw32(BUFMGR_MB_HIGH_WATER,
+		     tp->bufmgr_config.mbuf_high_water);
+	} else {
+		tw32(BUFMGR_MB_RDMA_LOW_WATER,
+		     tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
+		tw32(BUFMGR_MB_MACRX_LOW_WATER,
+		     tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
+		tw32(BUFMGR_MB_HIGH_WATER,
+		     tp->bufmgr_config.mbuf_high_water_jumbo);
+	}
+	tw32(BUFMGR_DMA_LOW_WATER,
+	     tp->bufmgr_config.dma_low_water);
+	tw32(BUFMGR_DMA_HIGH_WATER,
+	     tp->bufmgr_config.dma_high_water);
+
+	val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+		val |= BUFMGR_MODE_NO_TX_UNDERRUN;
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
+		val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
+	tw32(BUFMGR_MODE, val);
+	for (i = 0; i < 2000; i++) {
+		if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
+			break;
+		udelay(10);
+	}
+	if (i >= 2000) {
+		netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
+		return -ENODEV;
+	}
+
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
+		tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
+
+	tg3_setup_rxbd_thresholds(tp);
+
+	/* Initialize TG3_BDINFO's at:
+	 *  RCVDBDI_STD_BD:	standard eth size rx ring
+	 *  RCVDBDI_JUMBO_BD:	jumbo frame rx ring
+	 *  RCVDBDI_MINI_BD:	small frame rx ring (??? does not work)
+	 *
+	 * like so:
+	 *  TG3_BDINFO_HOST_ADDR:	high/low parts of DMA address of ring
+	 *  TG3_BDINFO_MAXLEN_FLAGS:	(rx max buffer size << 16) |
+	 *                              ring attribute flags
+	 *  TG3_BDINFO_NIC_ADDR:	location of descriptors in nic SRAM
+	 *
+	 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
+	 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
+	 *
+	 * The size of each ring is fixed in the firmware, but the location is
+	 * configurable.
+	 */
+	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
+	     ((u64) tpr->rx_std_mapping >> 32));
+	tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
+	     ((u64) tpr->rx_std_mapping & 0xffffffff));
+	if (!tg3_flag(tp, 5717_PLUS))
+		tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
+		     NIC_SRAM_RX_BUFFER_DESC);
+
+	/* Disable the mini ring */
+	if (!tg3_flag(tp, 5705_PLUS))
+		tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
+		     BDINFO_FLAGS_DISABLED);
+
+	/* Program the jumbo buffer descriptor ring control
+	 * blocks on those devices that have them.
+	 */
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+	    (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
+
+		if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
+			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
+			     ((u64) tpr->rx_jmb_mapping >> 32));
+			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
+			     ((u64) tpr->rx_jmb_mapping & 0xffffffff));
+			val = TG3_RX_JMB_RING_SIZE(tp) <<
+			      BDINFO_FLAGS_MAXLEN_SHIFT;
+			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
+			     val | BDINFO_FLAGS_USE_EXT_RECV);
+			if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
+			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+				tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
+				     NIC_SRAM_RX_JUMBO_BUFFER_DESC);
+		} else {
+			tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
+			     BDINFO_FLAGS_DISABLED);
+		}
+
+		if (tg3_flag(tp, 57765_PLUS)) {
+			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+				val = TG3_RX_STD_MAX_SIZE_5700;
+			else
+				val = TG3_RX_STD_MAX_SIZE_5717;
+			val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
+			val |= (TG3_RX_STD_DMA_SZ << 2);
+		} else
+			val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
+	} else
+		val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
+
+	tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
+
+	tpr->rx_std_prod_idx = tp->rx_pending;
+	tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
+
+	tpr->rx_jmb_prod_idx =
+		tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
+	tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
+
+	tg3_rings_reset(tp);
+
+	/* Initialize MAC address and backoff seed. */
+	__tg3_set_mac_addr(tp, 0);
+
+	/* MTU + ethernet header + FCS + optional VLAN tag */
+	tw32(MAC_RX_MTU_SIZE,
+	     tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+
+	/* The slot time is changed by tg3_setup_phy if we
+	 * run at gigabit with half duplex.
+	 */
+	val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
+	      (6 << TX_LENGTHS_IPG_SHIFT) |
+	      (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+		val |= tr32(MAC_TX_LENGTHS) &
+		       (TX_LENGTHS_JMB_FRM_LEN_MSK |
+			TX_LENGTHS_CNT_DWN_VAL_MSK);
+
+	tw32(MAC_TX_LENGTHS, val);
+
+	/* Receive rules. */
+	tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
+	tw32(RCVLPC_CONFIG, 0x0181);
+
+	/* Calculate RDMAC_MODE setting early, we need it to determine
+	 * the RCVLPC_STATE_ENABLE mask.
+	 */
+	rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
+		      RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
+		      RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
+		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
+		      RDMAC_MODE_LNGREAD_ENAB);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+		rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
+			      RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
+			      RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+		if (tg3_flag(tp, TSO_CAPABLE) &&
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+			rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
+		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
+			   !tg3_flag(tp, IS_5788)) {
+			rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+		}
+	}
+
+	if (tg3_flag(tp, PCI_EXPRESS))
+		rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+
+	if (tg3_flag(tp, HW_TSO_1) ||
+	    tg3_flag(tp, HW_TSO_2) ||
+	    tg3_flag(tp, HW_TSO_3))
+		rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
+
+	if (tg3_flag(tp, 57765_PLUS) ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+		rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+		rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+	    tg3_flag(tp, 57765_PLUS)) {
+		val = tr32(TG3_RDMA_RSRVCTRL_REG);
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+			val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
+				 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
+				 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
+			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
+			       TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
+			       TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
+		}
+		tw32(TG3_RDMA_RSRVCTRL_REG,
+		     val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+		val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
+		tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
+		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
+		     TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
+	}
+
+	/* Receive/send statistics. */
+	if (tg3_flag(tp, 5750_PLUS)) {
+		val = tr32(RCVLPC_STATS_ENABLE);
+		val &= ~RCVLPC_STATSENAB_DACK_FIX;
+		tw32(RCVLPC_STATS_ENABLE, val);
+	} else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
+		   tg3_flag(tp, TSO_CAPABLE)) {
+		val = tr32(RCVLPC_STATS_ENABLE);
+		val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
+		tw32(RCVLPC_STATS_ENABLE, val);
+	} else {
+		tw32(RCVLPC_STATS_ENABLE, 0xffffff);
+	}
+	tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
+	tw32(SNDDATAI_STATSENAB, 0xffffff);
+	tw32(SNDDATAI_STATSCTRL,
+	     (SNDDATAI_SCTRL_ENABLE |
+	      SNDDATAI_SCTRL_FASTUPD));
+
+	/* Setup host coalescing engine. */
+	tw32(HOSTCC_MODE, 0);
+	for (i = 0; i < 2000; i++) {
+		if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
+			break;
+		udelay(10);
+	}
+
+	__tg3_set_coalesce(tp, &tp->coal);
+
+	if (!tg3_flag(tp, 5705_PLUS)) {
+		/* Status/statistics block address.  See tg3_timer,
+		 * the tg3_periodic_fetch_stats call there, and
+		 * tg3_get_stats to see how this works for 5705/5750 chips.
+		 */
+		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
+		     ((u64) tp->stats_mapping >> 32));
+		tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
+		     ((u64) tp->stats_mapping & 0xffffffff));
+		tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
+
+		tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
+
+		/* Clear statistics and status block memory areas */
+		for (i = NIC_SRAM_STATS_BLK;
+		     i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
+		     i += sizeof(u32)) {
+			tg3_write_mem(tp, i, 0);
+			udelay(40);
+		}
+	}
+
+	tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
+
+	tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
+	tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
+	if (!tg3_flag(tp, 5705_PLUS))
+		tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
+
+	if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
+		tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
+		/* reset to prevent losing 1st rx packet intermittently */
+		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+		udelay(10);
+	}
+
+	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
+			MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
+			MAC_MODE_FHDE_ENABLE;
+	if (tg3_flag(tp, ENABLE_APE))
+		tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+	if (!tg3_flag(tp, 5705_PLUS) &&
+	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+		tp->mac_mode |= MAC_MODE_LINK_POLARITY;
+	tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
+	udelay(40);
+
+	/* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
+	 * If TG3_FLAG_IS_NIC is zero, we should read the
+	 * register to preserve the GPIO settings for LOMs. The GPIOs,
+	 * whether used as inputs or outputs, are set by boot code after
+	 * reset.
+	 */
+	if (!tg3_flag(tp, IS_NIC)) {
+		u32 gpio_mask;
+
+		gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
+			    GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
+			    GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
+
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+			gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
+				     GRC_LCLCTRL_GPIO_OUTPUT3;
+
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+			gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
+
+		tp->grc_local_ctrl &= ~gpio_mask;
+		tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
+
+		/* GPIO1 must be driven high for eeprom write protect */
+		if (tg3_flag(tp, EEPROM_WRITE_PROT))
+			tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
+					       GRC_LCLCTRL_GPIO_OUTPUT1);
+	}
+	tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+	udelay(100);
+
+	if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
+		val = tr32(MSGINT_MODE);
+		val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
+		tw32(MSGINT_MODE, val);
+	}
+
+	if (!tg3_flag(tp, 5705_PLUS)) {
+		tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
+		udelay(40);
+	}
+
+	val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
+	       WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
+	       WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
+	       WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
+	       WDMAC_MODE_LNGREAD_ENAB);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+	    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+		if (tg3_flag(tp, TSO_CAPABLE) &&
+		    (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
+		     tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
+			/* nothing */
+		} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
+			   !tg3_flag(tp, IS_5788)) {
+			val |= WDMAC_MODE_RX_ACCEL;
+		}
+	}
+
+	/* Enable host coalescing bug fix */
+	if (tg3_flag(tp, 5755_PLUS))
+		val |= WDMAC_MODE_STATUS_TAG_FIX;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+		val |= WDMAC_MODE_BURST_ALL_DATA;
+
+	tw32_f(WDMAC_MODE, val);
+	udelay(40);
+
+	if (tg3_flag(tp, PCIX_MODE)) {
+		u16 pcix_cmd;
+
+		pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+				     &pcix_cmd);
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
+			pcix_cmd &= ~PCI_X_CMD_MAX_READ;
+			pcix_cmd |= PCI_X_CMD_READ_2K;
+		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+			pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
+			pcix_cmd |= PCI_X_CMD_READ_2K;
+		}
+		pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
+				      pcix_cmd);
+	}
+
+	tw32_f(RDMAC_MODE, rdmac_mode);
+	udelay(40);
+
+	tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
+	if (!tg3_flag(tp, 5705_PLUS))
+		tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+		tw32(SNDDATAC_MODE,
+		     SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
+	else
+		tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
+
+	tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
+	tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
+	val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
+	if (tg3_flag(tp, LRG_PROD_RING_CAP))
+		val |= RCVDBDI_MODE_LRG_RING_SZ;
+	tw32(RCVDBDI_MODE, val);
+	tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
+	if (tg3_flag(tp, HW_TSO_1) ||
+	    tg3_flag(tp, HW_TSO_2) ||
+	    tg3_flag(tp, HW_TSO_3))
+		tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
+	val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
+	if (tg3_flag(tp, ENABLE_TSS))
+		val |= SNDBDI_MODE_MULTI_TXQ_EN;
+	tw32(SNDBDI_MODE, val);
+	tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
+
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+		err = tg3_load_5701_a0_firmware_fix(tp);
+		if (err)
+			return err;
+	}
+
+	if (tg3_flag(tp, TSO_CAPABLE)) {
+		err = tg3_load_tso_firmware(tp);
+		if (err)
+			return err;
+	}
+
+	tp->tx_mode = TX_MODE_ENABLE;
+
+	if (tg3_flag(tp, 5755_PLUS) ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+		tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+		val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
+		tp->tx_mode &= ~val;
+		tp->tx_mode |= tr32(MAC_TX_MODE) & val;
+	}
+
+	tw32_f(MAC_TX_MODE, tp->tx_mode);
+	udelay(100);
+
+	if (tg3_flag(tp, ENABLE_RSS)) {
+		int i = 0;
+		u32 reg = MAC_RSS_INDIR_TBL_0;
+
+		if (tp->irq_cnt == 2) {
+			for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
+				tw32(reg, 0x0);
+				reg += 4;
+			}
+		} else {
+			u32 val;
+
+			while (i < TG3_RSS_INDIR_TBL_SIZE) {
+				val = i % (tp->irq_cnt - 1);
+				i++;
+				for (; i % 8; i++) {
+					val <<= 4;
+					val |= (i % (tp->irq_cnt - 1));
+				}
+				tw32(reg, val);
+				reg += 4;
+			}
+		}
+
+		/* Setup the "secret" hash key. */
+		tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
+		tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
+		tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
+		tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
+		tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
+		tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
+		tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
+		tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
+		tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
+		tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
+	}
+
+	tp->rx_mode = RX_MODE_ENABLE;
+	if (tg3_flag(tp, 5755_PLUS))
+		tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
+
+	if (tg3_flag(tp, ENABLE_RSS))
+		tp->rx_mode |= RX_MODE_RSS_ENABLE |
+			       RX_MODE_RSS_ITBL_HASH_BITS_7 |
+			       RX_MODE_RSS_IPV6_HASH_EN |
+			       RX_MODE_RSS_TCP_IPV6_HASH_EN |
+			       RX_MODE_RSS_IPV4_HASH_EN |
+			       RX_MODE_RSS_TCP_IPV4_HASH_EN;
+
+	tw32_f(MAC_RX_MODE, tp->rx_mode);
+	udelay(10);
+
+	tw32(MAC_LED_CTRL, tp->led_ctrl);
+
+	tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+		tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+		udelay(10);
+	}
+	tw32_f(MAC_RX_MODE, tp->rx_mode);
+	udelay(10);
+
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
+			!(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
+			/* Set drive transmission level to 1.2V  */
+			/* only if the signal pre-emphasis bit is not set  */
+			val = tr32(MAC_SERDES_CFG);
+			val &= 0xfffff000;
+			val |= 0x880;
+			tw32(MAC_SERDES_CFG, val);
+		}
+		if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
+			tw32(MAC_SERDES_CFG, 0x616000);
+	}
+
+	/* Prevent chip from dropping frames when flow control
+	 * is enabled.
+	 */
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		val = 1;
+	else
+		val = 2;
+	tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
+	    (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
+		/* Use hardware link auto-negotiation */
+		tg3_flag_set(tp, HW_AUTONEG);
+	}
+
+	if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+		u32 tmp;
+
+		tmp = tr32(SERDES_RX_CTRL);
+		tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
+		tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
+		tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
+		tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+	}
+
+	if (!tg3_flag(tp, USE_PHYLIB)) {
+		if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
+			tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
+			tp->link_config.speed = tp->link_config.orig_speed;
+			tp->link_config.duplex = tp->link_config.orig_duplex;
+			tp->link_config.autoneg = tp->link_config.orig_autoneg;
+		}
+
+		err = tg3_setup_phy(tp, 0);
+		if (err)
+			return err;
+
+		if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+		    !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
+			u32 tmp;
+
+			/* Clear CRC stats. */
+			if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
+				tg3_writephy(tp, MII_TG3_TEST1,
+					     tmp | MII_TG3_TEST1_CRC_EN);
+				tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
+			}
+		}
+	}
+
+	__tg3_set_rx_mode(tp->dev);
+
+	/* Initialize receive rules. */
+	tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
+	tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
+	tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
+	tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
+
+	if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
+		limit = 8;
+	else
+		limit = 16;
+	if (tg3_flag(tp, ENABLE_ASF))
+		limit -= 4;
+	switch (limit) {
+	case 16:
+		tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
+	case 15:
+		tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
+	case 14:
+		tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
+	case 13:
+		tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
+	case 12:
+		tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
+	case 11:
+		tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
+	case 10:
+		tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
+	case 9:
+		tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
+	case 8:
+		tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
+	case 7:
+		tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
+	case 6:
+		tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
+	case 5:
+		tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
+	case 4:
+		/* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
+	case 3:
+		/* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
+	case 2:
+	case 1:
+
+	default:
+		break;
+	}
+
+	if (tg3_flag(tp, ENABLE_APE))
+		/* Write our heartbeat update interval to APE. */
+		tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
+				APE_HOST_HEARTBEAT_INT_DISABLE);
+
+	tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
+
+	return 0;
+}
+
+/* Called at device open time to get the chip ready for
+ * packet processing.  Invoked with tp->lock held.
+ */
+static int tg3_init_hw(struct tg3 *tp, int reset_phy)
+{
+	tg3_switch_clocks(tp);
+
+	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
+	return tg3_reset_hw(tp, reset_phy);
+}
+
+#define TG3_STAT_ADD32(PSTAT, REG) \
+do {	u32 __val = tr32(REG); \
+	(PSTAT)->low += __val; \
+	if ((PSTAT)->low < __val) \
+		(PSTAT)->high += 1; \
+} while (0)
+
+static void tg3_periodic_fetch_stats(struct tg3 *tp)
+{
+	struct tg3_hw_stats *sp = tp->hw_stats;
+
+	if (!netif_carrier_ok(tp->dev))
+		return;
+
+	TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
+	TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
+	TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
+	TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
+	TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
+	TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
+	TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
+	TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
+	TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
+	TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
+	TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
+	TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
+	TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
+
+	TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
+	TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
+	TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
+	TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
+	TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
+	TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
+	TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
+	TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
+	TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
+	TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
+	TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
+	TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
+	TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
+	TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
+
+	TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
+	    tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
+		TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
+	} else {
+		u32 val = tr32(HOSTCC_FLOW_ATTN);
+		val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
+		if (val) {
+			tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
+			sp->rx_discards.low += val;
+			if (sp->rx_discards.low < val)
+				sp->rx_discards.high += 1;
+		}
+		sp->mbuf_lwm_thresh_hit = sp->rx_discards;
+	}
+	TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
+}
+
+static void tg3_chk_missed_msi(struct tg3 *tp)
+{
+	u32 i;
+
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		if (tg3_has_work(tnapi)) {
+			if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
+			    tnapi->last_tx_cons == tnapi->tx_cons) {
+				if (tnapi->chk_msi_cnt < 1) {
+					tnapi->chk_msi_cnt++;
+					return;
+				}
+				tw32_mailbox(tnapi->int_mbox,
+					     tnapi->last_tag << 24);
+			}
+		}
+		tnapi->chk_msi_cnt = 0;
+		tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
+		tnapi->last_tx_cons = tnapi->tx_cons;
+	}
+}
+
+static void tg3_timer(unsigned long __opaque)
+{
+	struct tg3 *tp = (struct tg3 *) __opaque;
+
+	if (tp->irq_sync)
+		goto restart_timer;
+
+	spin_lock(&tp->lock);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		tg3_chk_missed_msi(tp);
+
+	if (!tg3_flag(tp, TAGGED_STATUS)) {
+		/* All of this garbage is because when using non-tagged
+		 * IRQ status the mailbox/status_block protocol the chip
+		 * uses with the cpu is race prone.
+		 */
+		if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
+			tw32(GRC_LOCAL_CTRL,
+			     tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
+		} else {
+			tw32(HOSTCC_MODE, tp->coalesce_mode |
+			     HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
+		}
+
+		if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+			tg3_flag_set(tp, RESTART_TIMER);
+			spin_unlock(&tp->lock);
+			schedule_work(&tp->reset_task);
+			return;
+		}
+	}
+
+	/* This part only runs once per second. */
+	if (!--tp->timer_counter) {
+		if (tg3_flag(tp, 5705_PLUS))
+			tg3_periodic_fetch_stats(tp);
+
+		if (tp->setlpicnt && !--tp->setlpicnt)
+			tg3_phy_eee_enable(tp);
+
+		if (tg3_flag(tp, USE_LINKCHG_REG)) {
+			u32 mac_stat;
+			int phy_event;
+
+			mac_stat = tr32(MAC_STATUS);
+
+			phy_event = 0;
+			if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
+				if (mac_stat & MAC_STATUS_MI_INTERRUPT)
+					phy_event = 1;
+			} else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
+				phy_event = 1;
+
+			if (phy_event)
+				tg3_setup_phy(tp, 0);
+		} else if (tg3_flag(tp, POLL_SERDES)) {
+			u32 mac_stat = tr32(MAC_STATUS);
+			int need_setup = 0;
+
+			if (netif_carrier_ok(tp->dev) &&
+			    (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
+				need_setup = 1;
+			}
+			if (!netif_carrier_ok(tp->dev) &&
+			    (mac_stat & (MAC_STATUS_PCS_SYNCED |
+					 MAC_STATUS_SIGNAL_DET))) {
+				need_setup = 1;
+			}
+			if (need_setup) {
+				if (!tp->serdes_counter) {
+					tw32_f(MAC_MODE,
+					     (tp->mac_mode &
+					      ~MAC_MODE_PORT_MODE_MASK));
+					udelay(40);
+					tw32_f(MAC_MODE, tp->mac_mode);
+					udelay(40);
+				}
+				tg3_setup_phy(tp, 0);
+			}
+		} else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
+			   tg3_flag(tp, 5780_CLASS)) {
+			tg3_serdes_parallel_detect(tp);
+		}
+
+		tp->timer_counter = tp->timer_multiplier;
+	}
+
+	/* Heartbeat is only sent once every 2 seconds.
+	 *
+	 * The heartbeat is to tell the ASF firmware that the host
+	 * driver is still alive.  In the event that the OS crashes,
+	 * ASF needs to reset the hardware to free up the FIFO space
+	 * that may be filled with rx packets destined for the host.
+	 * If the FIFO is full, ASF will no longer function properly.
+	 *
+	 * Unintended resets have been reported on real time kernels
+	 * where the timer doesn't run on time.  Netpoll will also have
+	 * same problem.
+	 *
+	 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
+	 * to check the ring condition when the heartbeat is expiring
+	 * before doing the reset.  This will prevent most unintended
+	 * resets.
+	 */
+	if (!--tp->asf_counter) {
+		if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
+			tg3_wait_for_event_ack(tp);
+
+			tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
+				      FWCMD_NICDRV_ALIVE3);
+			tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
+			tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
+				      TG3_FW_UPDATE_TIMEOUT_SEC);
+
+			tg3_generate_fw_event(tp);
+		}
+		tp->asf_counter = tp->asf_multiplier;
+	}
+
+	spin_unlock(&tp->lock);
+
+restart_timer:
+	tp->timer.expires = jiffies + tp->timer_offset;
+	add_timer(&tp->timer);
+}
+
+static int tg3_request_irq(struct tg3 *tp, int irq_num)
+{
+	irq_handler_t fn;
+	unsigned long flags;
+	char *name;
+	struct tg3_napi *tnapi = &tp->napi[irq_num];
+
+	if (tp->irq_cnt == 1)
+		name = tp->dev->name;
+	else {
+		name = &tnapi->irq_lbl[0];
+		snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
+		name[IFNAMSIZ-1] = 0;
+	}
+
+	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
+		fn = tg3_msi;
+		if (tg3_flag(tp, 1SHOT_MSI))
+			fn = tg3_msi_1shot;
+		flags = 0;
+	} else {
+		fn = tg3_interrupt;
+		if (tg3_flag(tp, TAGGED_STATUS))
+			fn = tg3_interrupt_tagged;
+		flags = IRQF_SHARED;
+	}
+
+	return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
+}
+
+static int tg3_test_interrupt(struct tg3 *tp)
+{
+	struct tg3_napi *tnapi = &tp->napi[0];
+	struct net_device *dev = tp->dev;
+	int err, i, intr_ok = 0;
+	u32 val;
+
+	if (!netif_running(dev))
+		return -ENODEV;
+
+	tg3_disable_ints(tp);
+
+	free_irq(tnapi->irq_vec, tnapi);
+
+	/*
+	 * Turn off MSI one shot mode.  Otherwise this test has no
+	 * observable way to know whether the interrupt was delivered.
+	 */
+	if (tg3_flag(tp, 57765_PLUS)) {
+		val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
+		tw32(MSGINT_MODE, val);
+	}
+
+	err = request_irq(tnapi->irq_vec, tg3_test_isr,
+			  IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
+	if (err)
+		return err;
+
+	tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
+	tg3_enable_ints(tp);
+
+	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+	       tnapi->coal_now);
+
+	for (i = 0; i < 5; i++) {
+		u32 int_mbox, misc_host_ctrl;
+
+		int_mbox = tr32_mailbox(tnapi->int_mbox);
+		misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
+
+		if ((int_mbox != 0) ||
+		    (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
+			intr_ok = 1;
+			break;
+		}
+
+		if (tg3_flag(tp, 57765_PLUS) &&
+		    tnapi->hw_status->status_tag != tnapi->last_tag)
+			tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+
+		msleep(10);
+	}
+
+	tg3_disable_ints(tp);
+
+	free_irq(tnapi->irq_vec, tnapi);
+
+	err = tg3_request_irq(tp, 0);
+
+	if (err)
+		return err;
+
+	if (intr_ok) {
+		/* Reenable MSI one shot mode. */
+		if (tg3_flag(tp, 57765_PLUS)) {
+			val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
+			tw32(MSGINT_MODE, val);
+		}
+		return 0;
+	}
+
+	return -EIO;
+}
+
+/* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
+ * successfully restored
+ */
+static int tg3_test_msi(struct tg3 *tp)
+{
+	int err;
+	u16 pci_cmd;
+
+	if (!tg3_flag(tp, USING_MSI))
+		return 0;
+
+	/* Turn off SERR reporting in case MSI terminates with Master
+	 * Abort.
+	 */
+	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+	pci_write_config_word(tp->pdev, PCI_COMMAND,
+			      pci_cmd & ~PCI_COMMAND_SERR);
+
+	err = tg3_test_interrupt(tp);
+
+	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+
+	if (!err)
+		return 0;
+
+	/* other failures */
+	if (err != -EIO)
+		return err;
+
+	/* MSI test failed, go back to INTx mode */
+	netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
+		    "to INTx mode. Please report this failure to the PCI "
+		    "maintainer and include system chipset information\n");
+
+	free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
+
+	pci_disable_msi(tp->pdev);
+
+	tg3_flag_clear(tp, USING_MSI);
+	tp->napi[0].irq_vec = tp->pdev->irq;
+
+	err = tg3_request_irq(tp, 0);
+	if (err)
+		return err;
+
+	/* Need to reset the chip because the MSI cycle may have terminated
+	 * with Master Abort.
+	 */
+	tg3_full_lock(tp, 1);
+
+	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+	err = tg3_init_hw(tp, 1);
+
+	tg3_full_unlock(tp);
+
+	if (err)
+		free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
+
+	return err;
+}
+
+static int tg3_request_firmware(struct tg3 *tp)
+{
+	const __be32 *fw_data;
+
+	if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
+		netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
+			   tp->fw_needed);
+		return -ENOENT;
+	}
+
+	fw_data = (void *)tp->fw->data;
+
+	/* Firmware blob starts with version numbers, followed by
+	 * start address and _full_ length including BSS sections
+	 * (which must be longer than the actual data, of course
+	 */
+
+	tp->fw_len = be32_to_cpu(fw_data[2]);	/* includes bss */
+	if (tp->fw_len < (tp->fw->size - 12)) {
+		netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
+			   tp->fw_len, tp->fw_needed);
+		release_firmware(tp->fw);
+		tp->fw = NULL;
+		return -EINVAL;
+	}
+
+	/* We no longer need firmware; we have it. */
+	tp->fw_needed = NULL;
+	return 0;
+}
+
+static bool tg3_enable_msix(struct tg3 *tp)
+{
+	int i, rc, cpus = num_online_cpus();
+	struct msix_entry msix_ent[tp->irq_max];
+
+	if (cpus == 1)
+		/* Just fallback to the simpler MSI mode. */
+		return false;
+
+	/*
+	 * We want as many rx rings enabled as there are cpus.
+	 * The first MSIX vector only deals with link interrupts, etc,
+	 * so we add one to the number of vectors we are requesting.
+	 */
+	tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
+
+	for (i = 0; i < tp->irq_max; i++) {
+		msix_ent[i].entry  = i;
+		msix_ent[i].vector = 0;
+	}
+
+	rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
+	if (rc < 0) {
+		return false;
+	} else if (rc != 0) {
+		if (pci_enable_msix(tp->pdev, msix_ent, rc))
+			return false;
+		netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
+			      tp->irq_cnt, rc);
+		tp->irq_cnt = rc;
+	}
+
+	for (i = 0; i < tp->irq_max; i++)
+		tp->napi[i].irq_vec = msix_ent[i].vector;
+
+	netif_set_real_num_tx_queues(tp->dev, 1);
+	rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
+	if (netif_set_real_num_rx_queues(tp->dev, rc)) {
+		pci_disable_msix(tp->pdev);
+		return false;
+	}
+
+	if (tp->irq_cnt > 1) {
+		tg3_flag_set(tp, ENABLE_RSS);
+
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
+			tg3_flag_set(tp, ENABLE_TSS);
+			netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
+		}
+	}
+
+	return true;
+}
+
+static void tg3_ints_init(struct tg3 *tp)
+{
+	if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
+	    !tg3_flag(tp, TAGGED_STATUS)) {
+		/* All MSI supporting chips should support tagged
+		 * status.  Assert that this is the case.
+		 */
+		netdev_warn(tp->dev,
+			    "MSI without TAGGED_STATUS? Not using MSI\n");
+		goto defcfg;
+	}
+
+	if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
+		tg3_flag_set(tp, USING_MSIX);
+	else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
+		tg3_flag_set(tp, USING_MSI);
+
+	if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
+		u32 msi_mode = tr32(MSGINT_MODE);
+		if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
+			msi_mode |= MSGINT_MODE_MULTIVEC_EN;
+		tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
+	}
+defcfg:
+	if (!tg3_flag(tp, USING_MSIX)) {
+		tp->irq_cnt = 1;
+		tp->napi[0].irq_vec = tp->pdev->irq;
+		netif_set_real_num_tx_queues(tp->dev, 1);
+		netif_set_real_num_rx_queues(tp->dev, 1);
+	}
+}
+
+static void tg3_ints_fini(struct tg3 *tp)
+{
+	if (tg3_flag(tp, USING_MSIX))
+		pci_disable_msix(tp->pdev);
+	else if (tg3_flag(tp, USING_MSI))
+		pci_disable_msi(tp->pdev);
+	tg3_flag_clear(tp, USING_MSI);
+	tg3_flag_clear(tp, USING_MSIX);
+	tg3_flag_clear(tp, ENABLE_RSS);
+	tg3_flag_clear(tp, ENABLE_TSS);
+}
+
+static int tg3_open(struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int i, err;
+
+	if (tp->fw_needed) {
+		err = tg3_request_firmware(tp);
+		if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
+			if (err)
+				return err;
+		} else if (err) {
+			netdev_warn(tp->dev, "TSO capability disabled\n");
+			tg3_flag_clear(tp, TSO_CAPABLE);
+		} else if (!tg3_flag(tp, TSO_CAPABLE)) {
+			netdev_notice(tp->dev, "TSO capability restored\n");
+			tg3_flag_set(tp, TSO_CAPABLE);
+		}
+	}
+
+	netif_carrier_off(tp->dev);
+
+	err = tg3_power_up(tp);
+	if (err)
+		return err;
+
+	tg3_full_lock(tp, 0);
+
+	tg3_disable_ints(tp);
+	tg3_flag_clear(tp, INIT_COMPLETE);
+
+	tg3_full_unlock(tp);
+
+	/*
+	 * Setup interrupts first so we know how
+	 * many NAPI resources to allocate
+	 */
+	tg3_ints_init(tp);
+
+	/* The placement of this call is tied
+	 * to the setup and use of Host TX descriptors.
+	 */
+	err = tg3_alloc_consistent(tp);
+	if (err)
+		goto err_out1;
+
+	tg3_napi_init(tp);
+
+	tg3_napi_enable(tp);
+
+	for (i = 0; i < tp->irq_cnt; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+		err = tg3_request_irq(tp, i);
+		if (err) {
+			for (i--; i >= 0; i--)
+				free_irq(tnapi->irq_vec, tnapi);
+			break;
+		}
+	}
+
+	if (err)
+		goto err_out2;
+
+	tg3_full_lock(tp, 0);
+
+	err = tg3_init_hw(tp, 1);
+	if (err) {
+		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+		tg3_free_rings(tp);
+	} else {
+		if (tg3_flag(tp, TAGGED_STATUS) &&
+			GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+			GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
+			tp->timer_offset = HZ;
+		else
+			tp->timer_offset = HZ / 10;
+
+		BUG_ON(tp->timer_offset > HZ);
+		tp->timer_counter = tp->timer_multiplier =
+			(HZ / tp->timer_offset);
+		tp->asf_counter = tp->asf_multiplier =
+			((HZ / tp->timer_offset) * 2);
+
+		init_timer(&tp->timer);
+		tp->timer.expires = jiffies + tp->timer_offset;
+		tp->timer.data = (unsigned long) tp;
+		tp->timer.function = tg3_timer;
+	}
+
+	tg3_full_unlock(tp);
+
+	if (err)
+		goto err_out3;
+
+	if (tg3_flag(tp, USING_MSI)) {
+		err = tg3_test_msi(tp);
+
+		if (err) {
+			tg3_full_lock(tp, 0);
+			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+			tg3_free_rings(tp);
+			tg3_full_unlock(tp);
+
+			goto err_out2;
+		}
+
+		if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
+			u32 val = tr32(PCIE_TRANSACTION_CFG);
+
+			tw32(PCIE_TRANSACTION_CFG,
+			     val | PCIE_TRANS_CFG_1SHOT_MSI);
+		}
+	}
+
+	tg3_phy_start(tp);
+
+	tg3_full_lock(tp, 0);
+
+	add_timer(&tp->timer);
+	tg3_flag_set(tp, INIT_COMPLETE);
+	tg3_enable_ints(tp);
+
+	tg3_full_unlock(tp);
+
+	netif_tx_start_all_queues(dev);
+
+	/*
+	 * Reset loopback feature if it was turned on while the device was down
+	 * make sure that it's installed properly now.
+	 */
+	if (dev->features & NETIF_F_LOOPBACK)
+		tg3_set_loopback(dev, dev->features);
+
+	return 0;
+
+err_out3:
+	for (i = tp->irq_cnt - 1; i >= 0; i--) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+		free_irq(tnapi->irq_vec, tnapi);
+	}
+
+err_out2:
+	tg3_napi_disable(tp);
+	tg3_napi_fini(tp);
+	tg3_free_consistent(tp);
+
+err_out1:
+	tg3_ints_fini(tp);
+	tg3_frob_aux_power(tp, false);
+	pci_set_power_state(tp->pdev, PCI_D3hot);
+	return err;
+}
+
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
+						 struct rtnl_link_stats64 *);
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
+
+static int tg3_close(struct net_device *dev)
+{
+	int i;
+	struct tg3 *tp = netdev_priv(dev);
+
+	tg3_napi_disable(tp);
+	cancel_work_sync(&tp->reset_task);
+
+	netif_tx_stop_all_queues(dev);
+
+	del_timer_sync(&tp->timer);
+
+	tg3_phy_stop(tp);
+
+	tg3_full_lock(tp, 1);
+
+	tg3_disable_ints(tp);
+
+	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+	tg3_free_rings(tp);
+	tg3_flag_clear(tp, INIT_COMPLETE);
+
+	tg3_full_unlock(tp);
+
+	for (i = tp->irq_cnt - 1; i >= 0; i--) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+		free_irq(tnapi->irq_vec, tnapi);
+	}
+
+	tg3_ints_fini(tp);
+
+	tg3_get_stats64(tp->dev, &tp->net_stats_prev);
+
+	memcpy(&tp->estats_prev, tg3_get_estats(tp),
+	       sizeof(tp->estats_prev));
+
+	tg3_napi_fini(tp);
+
+	tg3_free_consistent(tp);
+
+	tg3_power_down(tp);
+
+	netif_carrier_off(tp->dev);
+
+	return 0;
+}
+
+static inline u64 get_stat64(tg3_stat64_t *val)
+{
+       return ((u64)val->high << 32) | ((u64)val->low);
+}
+
+static u64 calc_crc_errors(struct tg3 *tp)
+{
+	struct tg3_hw_stats *hw_stats = tp->hw_stats;
+
+	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
+		u32 val;
+
+		spin_lock_bh(&tp->lock);
+		if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
+			tg3_writephy(tp, MII_TG3_TEST1,
+				     val | MII_TG3_TEST1_CRC_EN);
+			tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
+		} else
+			val = 0;
+		spin_unlock_bh(&tp->lock);
+
+		tp->phy_crc_errors += val;
+
+		return tp->phy_crc_errors;
+	}
+
+	return get_stat64(&hw_stats->rx_fcs_errors);
+}
+
+#define ESTAT_ADD(member) \
+	estats->member =	old_estats->member + \
+				get_stat64(&hw_stats->member)
+
+static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
+{
+	struct tg3_ethtool_stats *estats = &tp->estats;
+	struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
+	struct tg3_hw_stats *hw_stats = tp->hw_stats;
+
+	if (!hw_stats)
+		return old_estats;
+
+	ESTAT_ADD(rx_octets);
+	ESTAT_ADD(rx_fragments);
+	ESTAT_ADD(rx_ucast_packets);
+	ESTAT_ADD(rx_mcast_packets);
+	ESTAT_ADD(rx_bcast_packets);
+	ESTAT_ADD(rx_fcs_errors);
+	ESTAT_ADD(rx_align_errors);
+	ESTAT_ADD(rx_xon_pause_rcvd);
+	ESTAT_ADD(rx_xoff_pause_rcvd);
+	ESTAT_ADD(rx_mac_ctrl_rcvd);
+	ESTAT_ADD(rx_xoff_entered);
+	ESTAT_ADD(rx_frame_too_long_errors);
+	ESTAT_ADD(rx_jabbers);
+	ESTAT_ADD(rx_undersize_packets);
+	ESTAT_ADD(rx_in_length_errors);
+	ESTAT_ADD(rx_out_length_errors);
+	ESTAT_ADD(rx_64_or_less_octet_packets);
+	ESTAT_ADD(rx_65_to_127_octet_packets);
+	ESTAT_ADD(rx_128_to_255_octet_packets);
+	ESTAT_ADD(rx_256_to_511_octet_packets);
+	ESTAT_ADD(rx_512_to_1023_octet_packets);
+	ESTAT_ADD(rx_1024_to_1522_octet_packets);
+	ESTAT_ADD(rx_1523_to_2047_octet_packets);
+	ESTAT_ADD(rx_2048_to_4095_octet_packets);
+	ESTAT_ADD(rx_4096_to_8191_octet_packets);
+	ESTAT_ADD(rx_8192_to_9022_octet_packets);
+
+	ESTAT_ADD(tx_octets);
+	ESTAT_ADD(tx_collisions);
+	ESTAT_ADD(tx_xon_sent);
+	ESTAT_ADD(tx_xoff_sent);
+	ESTAT_ADD(tx_flow_control);
+	ESTAT_ADD(tx_mac_errors);
+	ESTAT_ADD(tx_single_collisions);
+	ESTAT_ADD(tx_mult_collisions);
+	ESTAT_ADD(tx_deferred);
+	ESTAT_ADD(tx_excessive_collisions);
+	ESTAT_ADD(tx_late_collisions);
+	ESTAT_ADD(tx_collide_2times);
+	ESTAT_ADD(tx_collide_3times);
+	ESTAT_ADD(tx_collide_4times);
+	ESTAT_ADD(tx_collide_5times);
+	ESTAT_ADD(tx_collide_6times);
+	ESTAT_ADD(tx_collide_7times);
+	ESTAT_ADD(tx_collide_8times);
+	ESTAT_ADD(tx_collide_9times);
+	ESTAT_ADD(tx_collide_10times);
+	ESTAT_ADD(tx_collide_11times);
+	ESTAT_ADD(tx_collide_12times);
+	ESTAT_ADD(tx_collide_13times);
+	ESTAT_ADD(tx_collide_14times);
+	ESTAT_ADD(tx_collide_15times);
+	ESTAT_ADD(tx_ucast_packets);
+	ESTAT_ADD(tx_mcast_packets);
+	ESTAT_ADD(tx_bcast_packets);
+	ESTAT_ADD(tx_carrier_sense_errors);
+	ESTAT_ADD(tx_discards);
+	ESTAT_ADD(tx_errors);
+
+	ESTAT_ADD(dma_writeq_full);
+	ESTAT_ADD(dma_write_prioq_full);
+	ESTAT_ADD(rxbds_empty);
+	ESTAT_ADD(rx_discards);
+	ESTAT_ADD(rx_errors);
+	ESTAT_ADD(rx_threshold_hit);
+
+	ESTAT_ADD(dma_readq_full);
+	ESTAT_ADD(dma_read_prioq_full);
+	ESTAT_ADD(tx_comp_queue_full);
+
+	ESTAT_ADD(ring_set_send_prod_index);
+	ESTAT_ADD(ring_status_update);
+	ESTAT_ADD(nic_irqs);
+	ESTAT_ADD(nic_avoided_irqs);
+	ESTAT_ADD(nic_tx_threshold_hit);
+
+	ESTAT_ADD(mbuf_lwm_thresh_hit);
+
+	return estats;
+}
+
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
+						 struct rtnl_link_stats64 *stats)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
+	struct tg3_hw_stats *hw_stats = tp->hw_stats;
+
+	if (!hw_stats)
+		return old_stats;
+
+	stats->rx_packets = old_stats->rx_packets +
+		get_stat64(&hw_stats->rx_ucast_packets) +
+		get_stat64(&hw_stats->rx_mcast_packets) +
+		get_stat64(&hw_stats->rx_bcast_packets);
+
+	stats->tx_packets = old_stats->tx_packets +
+		get_stat64(&hw_stats->tx_ucast_packets) +
+		get_stat64(&hw_stats->tx_mcast_packets) +
+		get_stat64(&hw_stats->tx_bcast_packets);
+
+	stats->rx_bytes = old_stats->rx_bytes +
+		get_stat64(&hw_stats->rx_octets);
+	stats->tx_bytes = old_stats->tx_bytes +
+		get_stat64(&hw_stats->tx_octets);
+
+	stats->rx_errors = old_stats->rx_errors +
+		get_stat64(&hw_stats->rx_errors);
+	stats->tx_errors = old_stats->tx_errors +
+		get_stat64(&hw_stats->tx_errors) +
+		get_stat64(&hw_stats->tx_mac_errors) +
+		get_stat64(&hw_stats->tx_carrier_sense_errors) +
+		get_stat64(&hw_stats->tx_discards);
+
+	stats->multicast = old_stats->multicast +
+		get_stat64(&hw_stats->rx_mcast_packets);
+	stats->collisions = old_stats->collisions +
+		get_stat64(&hw_stats->tx_collisions);
+
+	stats->rx_length_errors = old_stats->rx_length_errors +
+		get_stat64(&hw_stats->rx_frame_too_long_errors) +
+		get_stat64(&hw_stats->rx_undersize_packets);
+
+	stats->rx_over_errors = old_stats->rx_over_errors +
+		get_stat64(&hw_stats->rxbds_empty);
+	stats->rx_frame_errors = old_stats->rx_frame_errors +
+		get_stat64(&hw_stats->rx_align_errors);
+	stats->tx_aborted_errors = old_stats->tx_aborted_errors +
+		get_stat64(&hw_stats->tx_discards);
+	stats->tx_carrier_errors = old_stats->tx_carrier_errors +
+		get_stat64(&hw_stats->tx_carrier_sense_errors);
+
+	stats->rx_crc_errors = old_stats->rx_crc_errors +
+		calc_crc_errors(tp);
+
+	stats->rx_missed_errors = old_stats->rx_missed_errors +
+		get_stat64(&hw_stats->rx_discards);
+
+	stats->rx_dropped = tp->rx_dropped;
+
+	return stats;
+}
+
+static inline u32 calc_crc(unsigned char *buf, int len)
+{
+	u32 reg;
+	u32 tmp;
+	int j, k;
+
+	reg = 0xffffffff;
+
+	for (j = 0; j < len; j++) {
+		reg ^= buf[j];
+
+		for (k = 0; k < 8; k++) {
+			tmp = reg & 0x01;
+
+			reg >>= 1;
+
+			if (tmp)
+				reg ^= 0xedb88320;
+		}
+	}
+
+	return ~reg;
+}
+
+static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
+{
+	/* accept or reject all multicast frames */
+	tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
+	tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
+	tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
+	tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
+}
+
+static void __tg3_set_rx_mode(struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	u32 rx_mode;
+
+	rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
+				  RX_MODE_KEEP_VLAN_TAG);
+
+#if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
+	/* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
+	 * flag clear.
+	 */
+	if (!tg3_flag(tp, ENABLE_ASF))
+		rx_mode |= RX_MODE_KEEP_VLAN_TAG;
+#endif
+
+	if (dev->flags & IFF_PROMISC) {
+		/* Promiscuous mode. */
+		rx_mode |= RX_MODE_PROMISC;
+	} else if (dev->flags & IFF_ALLMULTI) {
+		/* Accept all multicast. */
+		tg3_set_multi(tp, 1);
+	} else if (netdev_mc_empty(dev)) {
+		/* Reject all multicast. */
+		tg3_set_multi(tp, 0);
+	} else {
+		/* Accept one or more multicast(s). */
+		struct netdev_hw_addr *ha;
+		u32 mc_filter[4] = { 0, };
+		u32 regidx;
+		u32 bit;
+		u32 crc;
+
+		netdev_for_each_mc_addr(ha, dev) {
+			crc = calc_crc(ha->addr, ETH_ALEN);
+			bit = ~crc & 0x7f;
+			regidx = (bit & 0x60) >> 5;
+			bit &= 0x1f;
+			mc_filter[regidx] |= (1 << bit);
+		}
+
+		tw32(MAC_HASH_REG_0, mc_filter[0]);
+		tw32(MAC_HASH_REG_1, mc_filter[1]);
+		tw32(MAC_HASH_REG_2, mc_filter[2]);
+		tw32(MAC_HASH_REG_3, mc_filter[3]);
+	}
+
+	if (rx_mode != tp->rx_mode) {
+		tp->rx_mode = rx_mode;
+		tw32_f(MAC_RX_MODE, rx_mode);
+		udelay(10);
+	}
+}
+
+static void tg3_set_rx_mode(struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return;
+
+	tg3_full_lock(tp, 0);
+	__tg3_set_rx_mode(dev);
+	tg3_full_unlock(tp);
+}
+
+static int tg3_get_regs_len(struct net_device *dev)
+{
+	return TG3_REG_BLK_SIZE;
+}
+
+static void tg3_get_regs(struct net_device *dev,
+		struct ethtool_regs *regs, void *_p)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	regs->version = 0;
+
+	memset(_p, 0, TG3_REG_BLK_SIZE);
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+		return;
+
+	tg3_full_lock(tp, 0);
+
+	tg3_dump_legacy_regs(tp, (u32 *)_p);
+
+	tg3_full_unlock(tp);
+}
+
+static int tg3_get_eeprom_len(struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	return tp->nvram_size;
+}
+
+static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int ret;
+	u8  *pd;
+	u32 i, offset, len, b_offset, b_count;
+	__be32 val;
+
+	if (tg3_flag(tp, NO_NVRAM))
+		return -EINVAL;
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+		return -EAGAIN;
+
+	offset = eeprom->offset;
+	len = eeprom->len;
+	eeprom->len = 0;
+
+	eeprom->magic = TG3_EEPROM_MAGIC;
+
+	if (offset & 3) {
+		/* adjustments to start on required 4 byte boundary */
+		b_offset = offset & 3;
+		b_count = 4 - b_offset;
+		if (b_count > len) {
+			/* i.e. offset=1 len=2 */
+			b_count = len;
+		}
+		ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
+		if (ret)
+			return ret;
+		memcpy(data, ((char *)&val) + b_offset, b_count);
+		len -= b_count;
+		offset += b_count;
+		eeprom->len += b_count;
+	}
+
+	/* read bytes up to the last 4 byte boundary */
+	pd = &data[eeprom->len];
+	for (i = 0; i < (len - (len & 3)); i += 4) {
+		ret = tg3_nvram_read_be32(tp, offset + i, &val);
+		if (ret) {
+			eeprom->len += i;
+			return ret;
+		}
+		memcpy(pd + i, &val, 4);
+	}
+	eeprom->len += i;
+
+	if (len & 3) {
+		/* read last bytes not ending on 4 byte boundary */
+		pd = &data[eeprom->len];
+		b_count = len & 3;
+		b_offset = offset + len - b_count;
+		ret = tg3_nvram_read_be32(tp, b_offset, &val);
+		if (ret)
+			return ret;
+		memcpy(pd, &val, b_count);
+		eeprom->len += b_count;
+	}
+	return 0;
+}
+
+static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
+
+static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int ret;
+	u32 offset, len, b_offset, odd_len;
+	u8 *buf;
+	__be32 start, end;
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+		return -EAGAIN;
+
+	if (tg3_flag(tp, NO_NVRAM) ||
+	    eeprom->magic != TG3_EEPROM_MAGIC)
+		return -EINVAL;
+
+	offset = eeprom->offset;
+	len = eeprom->len;
+
+	if ((b_offset = (offset & 3))) {
+		/* adjustments to start on required 4 byte boundary */
+		ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
+		if (ret)
+			return ret;
+		len += b_offset;
+		offset &= ~3;
+		if (len < 4)
+			len = 4;
+	}
+
+	odd_len = 0;
+	if (len & 3) {
+		/* adjustments to end on required 4 byte boundary */
+		odd_len = 1;
+		len = (len + 3) & ~3;
+		ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
+		if (ret)
+			return ret;
+	}
+
+	buf = data;
+	if (b_offset || odd_len) {
+		buf = kmalloc(len, GFP_KERNEL);
+		if (!buf)
+			return -ENOMEM;
+		if (b_offset)
+			memcpy(buf, &start, 4);
+		if (odd_len)
+			memcpy(buf+len-4, &end, 4);
+		memcpy(buf + b_offset, data, eeprom->len);
+	}
+
+	ret = tg3_nvram_write_block(tp, offset, len, buf);
+
+	if (buf != data)
+		kfree(buf);
+
+	return ret;
+}
+
+static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (tg3_flag(tp, USE_PHYLIB)) {
+		struct phy_device *phydev;
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+			return -EAGAIN;
+		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+		return phy_ethtool_gset(phydev, cmd);
+	}
+
+	cmd->supported = (SUPPORTED_Autoneg);
+
+	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+		cmd->supported |= (SUPPORTED_1000baseT_Half |
+				   SUPPORTED_1000baseT_Full);
+
+	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+		cmd->supported |= (SUPPORTED_100baseT_Half |
+				  SUPPORTED_100baseT_Full |
+				  SUPPORTED_10baseT_Half |
+				  SUPPORTED_10baseT_Full |
+				  SUPPORTED_TP);
+		cmd->port = PORT_TP;
+	} else {
+		cmd->supported |= SUPPORTED_FIBRE;
+		cmd->port = PORT_FIBRE;
+	}
+
+	cmd->advertising = tp->link_config.advertising;
+	if (tg3_flag(tp, PAUSE_AUTONEG)) {
+		if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
+			if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
+				cmd->advertising |= ADVERTISED_Pause;
+			} else {
+				cmd->advertising |= ADVERTISED_Pause |
+						    ADVERTISED_Asym_Pause;
+			}
+		} else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
+			cmd->advertising |= ADVERTISED_Asym_Pause;
+		}
+	}
+	if (netif_running(dev)) {
+		ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
+		cmd->duplex = tp->link_config.active_duplex;
+	} else {
+		ethtool_cmd_speed_set(cmd, SPEED_INVALID);
+		cmd->duplex = DUPLEX_INVALID;
+	}
+	cmd->phy_address = tp->phy_addr;
+	cmd->transceiver = XCVR_INTERNAL;
+	cmd->autoneg = tp->link_config.autoneg;
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+	return 0;
+}
+
+static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	u32 speed = ethtool_cmd_speed(cmd);
+
+	if (tg3_flag(tp, USE_PHYLIB)) {
+		struct phy_device *phydev;
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+			return -EAGAIN;
+		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+		return phy_ethtool_sset(phydev, cmd);
+	}
+
+	if (cmd->autoneg != AUTONEG_ENABLE &&
+	    cmd->autoneg != AUTONEG_DISABLE)
+		return -EINVAL;
+
+	if (cmd->autoneg == AUTONEG_DISABLE &&
+	    cmd->duplex != DUPLEX_FULL &&
+	    cmd->duplex != DUPLEX_HALF)
+		return -EINVAL;
+
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		u32 mask = ADVERTISED_Autoneg |
+			   ADVERTISED_Pause |
+			   ADVERTISED_Asym_Pause;
+
+		if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+			mask |= ADVERTISED_1000baseT_Half |
+				ADVERTISED_1000baseT_Full;
+
+		if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+			mask |= ADVERTISED_100baseT_Half |
+				ADVERTISED_100baseT_Full |
+				ADVERTISED_10baseT_Half |
+				ADVERTISED_10baseT_Full |
+				ADVERTISED_TP;
+		else
+			mask |= ADVERTISED_FIBRE;
+
+		if (cmd->advertising & ~mask)
+			return -EINVAL;
+
+		mask &= (ADVERTISED_1000baseT_Half |
+			 ADVERTISED_1000baseT_Full |
+			 ADVERTISED_100baseT_Half |
+			 ADVERTISED_100baseT_Full |
+			 ADVERTISED_10baseT_Half |
+			 ADVERTISED_10baseT_Full);
+
+		cmd->advertising &= mask;
+	} else {
+		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
+			if (speed != SPEED_1000)
+				return -EINVAL;
+
+			if (cmd->duplex != DUPLEX_FULL)
+				return -EINVAL;
+		} else {
+			if (speed != SPEED_100 &&
+			    speed != SPEED_10)
+				return -EINVAL;
+		}
+	}
+
+	tg3_full_lock(tp, 0);
+
+	tp->link_config.autoneg = cmd->autoneg;
+	if (cmd->autoneg == AUTONEG_ENABLE) {
+		tp->link_config.advertising = (cmd->advertising |
+					      ADVERTISED_Autoneg);
+		tp->link_config.speed = SPEED_INVALID;
+		tp->link_config.duplex = DUPLEX_INVALID;
+	} else {
+		tp->link_config.advertising = 0;
+		tp->link_config.speed = speed;
+		tp->link_config.duplex = cmd->duplex;
+	}
+
+	tp->link_config.orig_speed = tp->link_config.speed;
+	tp->link_config.orig_duplex = tp->link_config.duplex;
+	tp->link_config.orig_autoneg = tp->link_config.autoneg;
+
+	if (netif_running(dev))
+		tg3_setup_phy(tp, 1);
+
+	tg3_full_unlock(tp);
+
+	return 0;
+}
+
+static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	strcpy(info->driver, DRV_MODULE_NAME);
+	strcpy(info->version, DRV_MODULE_VERSION);
+	strcpy(info->fw_version, tp->fw_ver);
+	strcpy(info->bus_info, pci_name(tp->pdev));
+}
+
+static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
+		wol->supported = WAKE_MAGIC;
+	else
+		wol->supported = 0;
+	wol->wolopts = 0;
+	if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
+		wol->wolopts = WAKE_MAGIC;
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	struct device *dp = &tp->pdev->dev;
+
+	if (wol->wolopts & ~WAKE_MAGIC)
+		return -EINVAL;
+	if ((wol->wolopts & WAKE_MAGIC) &&
+	    !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
+		return -EINVAL;
+
+	device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
+
+	spin_lock_bh(&tp->lock);
+	if (device_may_wakeup(dp))
+		tg3_flag_set(tp, WOL_ENABLE);
+	else
+		tg3_flag_clear(tp, WOL_ENABLE);
+	spin_unlock_bh(&tp->lock);
+
+	return 0;
+}
+
+static u32 tg3_get_msglevel(struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	return tp->msg_enable;
+}
+
+static void tg3_set_msglevel(struct net_device *dev, u32 value)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	tp->msg_enable = value;
+}
+
+static int tg3_nway_reset(struct net_device *dev)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int r;
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+		return -EINVAL;
+
+	if (tg3_flag(tp, USE_PHYLIB)) {
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+			return -EAGAIN;
+		r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
+	} else {
+		u32 bmcr;
+
+		spin_lock_bh(&tp->lock);
+		r = -EINVAL;
+		tg3_readphy(tp, MII_BMCR, &bmcr);
+		if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
+		    ((bmcr & BMCR_ANENABLE) ||
+		     (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
+			tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
+						   BMCR_ANENABLE);
+			r = 0;
+		}
+		spin_unlock_bh(&tp->lock);
+	}
+
+	return r;
+}
+
+static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	ering->rx_max_pending = tp->rx_std_ring_mask;
+	ering->rx_mini_max_pending = 0;
+	if (tg3_flag(tp, JUMBO_RING_ENABLE))
+		ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
+	else
+		ering->rx_jumbo_max_pending = 0;
+
+	ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
+
+	ering->rx_pending = tp->rx_pending;
+	ering->rx_mini_pending = 0;
+	if (tg3_flag(tp, JUMBO_RING_ENABLE))
+		ering->rx_jumbo_pending = tp->rx_jumbo_pending;
+	else
+		ering->rx_jumbo_pending = 0;
+
+	ering->tx_pending = tp->napi[0].tx_pending;
+}
+
+static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int i, irq_sync = 0, err = 0;
+
+	if ((ering->rx_pending > tp->rx_std_ring_mask) ||
+	    (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
+	    (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
+	    (ering->tx_pending <= MAX_SKB_FRAGS) ||
+	    (tg3_flag(tp, TSO_BUG) &&
+	     (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
+		return -EINVAL;
+
+	if (netif_running(dev)) {
+		tg3_phy_stop(tp);
+		tg3_netif_stop(tp);
+		irq_sync = 1;
+	}
+
+	tg3_full_lock(tp, irq_sync);
+
+	tp->rx_pending = ering->rx_pending;
+
+	if (tg3_flag(tp, MAX_RXPEND_64) &&
+	    tp->rx_pending > 63)
+		tp->rx_pending = 63;
+	tp->rx_jumbo_pending = ering->rx_jumbo_pending;
+
+	for (i = 0; i < tp->irq_max; i++)
+		tp->napi[i].tx_pending = ering->tx_pending;
+
+	if (netif_running(dev)) {
+		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+		err = tg3_restart_hw(tp, 1);
+		if (!err)
+			tg3_netif_start(tp);
+	}
+
+	tg3_full_unlock(tp);
+
+	if (irq_sync && !err)
+		tg3_phy_start(tp);
+
+	return err;
+}
+
+static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
+
+	if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
+		epause->rx_pause = 1;
+	else
+		epause->rx_pause = 0;
+
+	if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
+		epause->tx_pause = 1;
+	else
+		epause->tx_pause = 0;
+}
+
+static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	int err = 0;
+
+	if (tg3_flag(tp, USE_PHYLIB)) {
+		u32 newadv;
+		struct phy_device *phydev;
+
+		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+
+		if (!(phydev->supported & SUPPORTED_Pause) ||
+		    (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+		     (epause->rx_pause != epause->tx_pause)))
+			return -EINVAL;
+
+		tp->link_config.flowctrl = 0;
+		if (epause->rx_pause) {
+			tp->link_config.flowctrl |= FLOW_CTRL_RX;
+
+			if (epause->tx_pause) {
+				tp->link_config.flowctrl |= FLOW_CTRL_TX;
+				newadv = ADVERTISED_Pause;
+			} else
+				newadv = ADVERTISED_Pause |
+					 ADVERTISED_Asym_Pause;
+		} else if (epause->tx_pause) {
+			tp->link_config.flowctrl |= FLOW_CTRL_TX;
+			newadv = ADVERTISED_Asym_Pause;
+		} else
+			newadv = 0;
+
+		if (epause->autoneg)
+			tg3_flag_set(tp, PAUSE_AUTONEG);
+		else
+			tg3_flag_clear(tp, PAUSE_AUTONEG);
+
+		if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
+			u32 oldadv = phydev->advertising &
+				     (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+			if (oldadv != newadv) {
+				phydev->advertising &=
+					~(ADVERTISED_Pause |
+					  ADVERTISED_Asym_Pause);
+				phydev->advertising |= newadv;
+				if (phydev->autoneg) {
+					/*
+					 * Always renegotiate the link to
+					 * inform our link partner of our
+					 * flow control settings, even if the
+					 * flow control is forced.  Let
+					 * tg3_adjust_link() do the final
+					 * flow control setup.
+					 */
+					return phy_start_aneg(phydev);
+				}
+			}
+
+			if (!epause->autoneg)
+				tg3_setup_flow_control(tp, 0, 0);
+		} else {
+			tp->link_config.orig_advertising &=
+					~(ADVERTISED_Pause |
+					  ADVERTISED_Asym_Pause);
+			tp->link_config.orig_advertising |= newadv;
+		}
+	} else {
+		int irq_sync = 0;
+
+		if (netif_running(dev)) {
+			tg3_netif_stop(tp);
+			irq_sync = 1;
+		}
+
+		tg3_full_lock(tp, irq_sync);
+
+		if (epause->autoneg)
+			tg3_flag_set(tp, PAUSE_AUTONEG);
+		else
+			tg3_flag_clear(tp, PAUSE_AUTONEG);
+		if (epause->rx_pause)
+			tp->link_config.flowctrl |= FLOW_CTRL_RX;
+		else
+			tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
+		if (epause->tx_pause)
+			tp->link_config.flowctrl |= FLOW_CTRL_TX;
+		else
+			tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
+
+		if (netif_running(dev)) {
+			tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+			err = tg3_restart_hw(tp, 1);
+			if (!err)
+				tg3_netif_start(tp);
+		}
+
+		tg3_full_unlock(tp);
+	}
+
+	return err;
+}
+
+static int tg3_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_TEST:
+		return TG3_NUM_TEST;
+	case ETH_SS_STATS:
+		return TG3_NUM_STATS;
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
+		break;
+	case ETH_SS_TEST:
+		memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
+		break;
+	default:
+		WARN_ON(1);	/* we need a WARN() */
+		break;
+	}
+}
+
+static int tg3_set_phys_id(struct net_device *dev,
+			    enum ethtool_phys_id_state state)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if (!netif_running(tp->dev))
+		return -EAGAIN;
+
+	switch (state) {
+	case ETHTOOL_ID_ACTIVE:
+		return 1;	/* cycle on/off once per second */
+
+	case ETHTOOL_ID_ON:
+		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
+		     LED_CTRL_1000MBPS_ON |
+		     LED_CTRL_100MBPS_ON |
+		     LED_CTRL_10MBPS_ON |
+		     LED_CTRL_TRAFFIC_OVERRIDE |
+		     LED_CTRL_TRAFFIC_BLINK |
+		     LED_CTRL_TRAFFIC_LED);
+		break;
+
+	case ETHTOOL_ID_OFF:
+		tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
+		     LED_CTRL_TRAFFIC_OVERRIDE);
+		break;
+
+	case ETHTOOL_ID_INACTIVE:
+		tw32(MAC_LED_CTRL, tp->led_ctrl);
+		break;
+	}
+
+	return 0;
+}
+
+static void tg3_get_ethtool_stats(struct net_device *dev,
+				   struct ethtool_stats *estats, u64 *tmp_stats)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
+}
+
+static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
+{
+	int i;
+	__be32 *buf;
+	u32 offset = 0, len = 0;
+	u32 magic, val;
+
+	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
+		return NULL;
+
+	if (magic == TG3_EEPROM_MAGIC) {
+		for (offset = TG3_NVM_DIR_START;
+		     offset < TG3_NVM_DIR_END;
+		     offset += TG3_NVM_DIRENT_SIZE) {
+			if (tg3_nvram_read(tp, offset, &val))
+				return NULL;
+
+			if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
+			    TG3_NVM_DIRTYPE_EXTVPD)
+				break;
+		}
+
+		if (offset != TG3_NVM_DIR_END) {
+			len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
+			if (tg3_nvram_read(tp, offset + 4, &offset))
+				return NULL;
+
+			offset = tg3_nvram_logical_addr(tp, offset);
+		}
+	}
+
+	if (!offset || !len) {
+		offset = TG3_NVM_VPD_OFF;
+		len = TG3_NVM_VPD_LEN;
+	}
+
+	buf = kmalloc(len, GFP_KERNEL);
+	if (buf == NULL)
+		return NULL;
+
+	if (magic == TG3_EEPROM_MAGIC) {
+		for (i = 0; i < len; i += 4) {
+			/* The data is in little-endian format in NVRAM.
+			 * Use the big-endian read routines to preserve
+			 * the byte order as it exists in NVRAM.
+			 */
+			if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
+				goto error;
+		}
+	} else {
+		u8 *ptr;
+		ssize_t cnt;
+		unsigned int pos = 0;
+
+		ptr = (u8 *)&buf[0];
+		for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
+			cnt = pci_read_vpd(tp->pdev, pos,
+					   len - pos, ptr);
+			if (cnt == -ETIMEDOUT || cnt == -EINTR)
+				cnt = 0;
+			else if (cnt < 0)
+				goto error;
+		}
+		if (pos != len)
+			goto error;
+	}
+
+	*vpdlen = len;
+
+	return buf;
+
+error:
+	kfree(buf);
+	return NULL;
+}
+
+#define NVRAM_TEST_SIZE 0x100
+#define NVRAM_SELFBOOT_FORMAT1_0_SIZE	0x14
+#define NVRAM_SELFBOOT_FORMAT1_2_SIZE	0x18
+#define NVRAM_SELFBOOT_FORMAT1_3_SIZE	0x1c
+#define NVRAM_SELFBOOT_FORMAT1_4_SIZE	0x20
+#define NVRAM_SELFBOOT_FORMAT1_5_SIZE	0x24
+#define NVRAM_SELFBOOT_FORMAT1_6_SIZE	0x50
+#define NVRAM_SELFBOOT_HW_SIZE 0x20
+#define NVRAM_SELFBOOT_DATA_SIZE 0x1c
+
+static int tg3_test_nvram(struct tg3 *tp)
+{
+	u32 csum, magic, len;
+	__be32 *buf;
+	int i, j, k, err = 0, size;
+
+	if (tg3_flag(tp, NO_NVRAM))
+		return 0;
+
+	if (tg3_nvram_read(tp, 0, &magic) != 0)
+		return -EIO;
+
+	if (magic == TG3_EEPROM_MAGIC)
+		size = NVRAM_TEST_SIZE;
+	else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
+		if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
+		    TG3_EEPROM_SB_FORMAT_1) {
+			switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
+			case TG3_EEPROM_SB_REVISION_0:
+				size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
+				break;
+			case TG3_EEPROM_SB_REVISION_2:
+				size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
+				break;
+			case TG3_EEPROM_SB_REVISION_3:
+				size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
+				break;
+			case TG3_EEPROM_SB_REVISION_4:
+				size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
+				break;
+			case TG3_EEPROM_SB_REVISION_5:
+				size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
+				break;
+			case TG3_EEPROM_SB_REVISION_6:
+				size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
+				break;
+			default:
+				return -EIO;
+			}
+		} else
+			return 0;
+	} else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
+		size = NVRAM_SELFBOOT_HW_SIZE;
+	else
+		return -EIO;
+
+	buf = kmalloc(size, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	err = -EIO;
+	for (i = 0, j = 0; i < size; i += 4, j++) {
+		err = tg3_nvram_read_be32(tp, i, &buf[j]);
+		if (err)
+			break;
+	}
+	if (i < size)
+		goto out;
+
+	/* Selfboot format */
+	magic = be32_to_cpu(buf[0]);
+	if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
+	    TG3_EEPROM_MAGIC_FW) {
+		u8 *buf8 = (u8 *) buf, csum8 = 0;
+
+		if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
+		    TG3_EEPROM_SB_REVISION_2) {
+			/* For rev 2, the csum doesn't include the MBA. */
+			for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
+				csum8 += buf8[i];
+			for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
+				csum8 += buf8[i];
+		} else {
+			for (i = 0; i < size; i++)
+				csum8 += buf8[i];
+		}
+
+		if (csum8 == 0) {
+			err = 0;
+			goto out;
+		}
+
+		err = -EIO;
+		goto out;
+	}
+
+	if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
+	    TG3_EEPROM_MAGIC_HW) {
+		u8 data[NVRAM_SELFBOOT_DATA_SIZE];
+		u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
+		u8 *buf8 = (u8 *) buf;
+
+		/* Separate the parity bits and the data bytes.  */
+		for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
+			if ((i == 0) || (i == 8)) {
+				int l;
+				u8 msk;
+
+				for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
+					parity[k++] = buf8[i] & msk;
+				i++;
+			} else if (i == 16) {
+				int l;
+				u8 msk;
+
+				for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
+					parity[k++] = buf8[i] & msk;
+				i++;
+
+				for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
+					parity[k++] = buf8[i] & msk;
+				i++;
+			}
+			data[j++] = buf8[i];
+		}
+
+		err = -EIO;
+		for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
+			u8 hw8 = hweight8(data[i]);
+
+			if ((hw8 & 0x1) && parity[i])
+				goto out;
+			else if (!(hw8 & 0x1) && !parity[i])
+				goto out;
+		}
+		err = 0;
+		goto out;
+	}
+
+	err = -EIO;
+
+	/* Bootstrap checksum at offset 0x10 */
+	csum = calc_crc((unsigned char *) buf, 0x10);
+	if (csum != le32_to_cpu(buf[0x10/4]))
+		goto out;
+
+	/* Manufacturing block starts at offset 0x74, checksum at 0xfc */
+	csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
+	if (csum != le32_to_cpu(buf[0xfc/4]))
+		goto out;
+
+	kfree(buf);
+
+	buf = tg3_vpd_readblock(tp, &len);
+	if (!buf)
+		return -ENOMEM;
+
+	i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
+	if (i > 0) {
+		j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
+		if (j < 0)
+			goto out;
+
+		if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
+			goto out;
+
+		i += PCI_VPD_LRDT_TAG_SIZE;
+		j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
+					      PCI_VPD_RO_KEYWORD_CHKSUM);
+		if (j > 0) {
+			u8 csum8 = 0;
+
+			j += PCI_VPD_INFO_FLD_HDR_SIZE;
+
+			for (i = 0; i <= j; i++)
+				csum8 += ((u8 *)buf)[i];
+
+			if (csum8)
+				goto out;
+		}
+	}
+
+	err = 0;
+
+out:
+	kfree(buf);
+	return err;
+}
+
+#define TG3_SERDES_TIMEOUT_SEC	2
+#define TG3_COPPER_TIMEOUT_SEC	6
+
+static int tg3_test_link(struct tg3 *tp)
+{
+	int i, max;
+
+	if (!netif_running(tp->dev))
+		return -ENODEV;
+
+	if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+		max = TG3_SERDES_TIMEOUT_SEC;
+	else
+		max = TG3_COPPER_TIMEOUT_SEC;
+
+	for (i = 0; i < max; i++) {
+		if (netif_carrier_ok(tp->dev))
+			return 0;
+
+		if (msleep_interruptible(1000))
+			break;
+	}
+
+	return -EIO;
+}
+
+/* Only test the commonly used registers */
+static int tg3_test_registers(struct tg3 *tp)
+{
+	int i, is_5705, is_5750;
+	u32 offset, read_mask, write_mask, val, save_val, read_val;
+	static struct {
+		u16 offset;
+		u16 flags;
+#define TG3_FL_5705	0x1
+#define TG3_FL_NOT_5705	0x2
+#define TG3_FL_NOT_5788	0x4
+#define TG3_FL_NOT_5750	0x8
+		u32 read_mask;
+		u32 write_mask;
+	} reg_tbl[] = {
+		/* MAC Control Registers */
+		{ MAC_MODE, TG3_FL_NOT_5705,
+			0x00000000, 0x00ef6f8c },
+		{ MAC_MODE, TG3_FL_5705,
+			0x00000000, 0x01ef6b8c },
+		{ MAC_STATUS, TG3_FL_NOT_5705,
+			0x03800107, 0x00000000 },
+		{ MAC_STATUS, TG3_FL_5705,
+			0x03800100, 0x00000000 },
+		{ MAC_ADDR_0_HIGH, 0x0000,
+			0x00000000, 0x0000ffff },
+		{ MAC_ADDR_0_LOW, 0x0000,
+			0x00000000, 0xffffffff },
+		{ MAC_RX_MTU_SIZE, 0x0000,
+			0x00000000, 0x0000ffff },
+		{ MAC_TX_MODE, 0x0000,
+			0x00000000, 0x00000070 },
+		{ MAC_TX_LENGTHS, 0x0000,
+			0x00000000, 0x00003fff },
+		{ MAC_RX_MODE, TG3_FL_NOT_5705,
+			0x00000000, 0x000007fc },
+		{ MAC_RX_MODE, TG3_FL_5705,
+			0x00000000, 0x000007dc },
+		{ MAC_HASH_REG_0, 0x0000,
+			0x00000000, 0xffffffff },
+		{ MAC_HASH_REG_1, 0x0000,
+			0x00000000, 0xffffffff },
+		{ MAC_HASH_REG_2, 0x0000,
+			0x00000000, 0xffffffff },
+		{ MAC_HASH_REG_3, 0x0000,
+			0x00000000, 0xffffffff },
+
+		/* Receive Data and Receive BD Initiator Control Registers. */
+		{ RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
+			0x00000000, 0x00000003 },
+		{ RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ RCVDBDI_STD_BD+0, 0x0000,
+			0x00000000, 0xffffffff },
+		{ RCVDBDI_STD_BD+4, 0x0000,
+			0x00000000, 0xffffffff },
+		{ RCVDBDI_STD_BD+8, 0x0000,
+			0x00000000, 0xffff0002 },
+		{ RCVDBDI_STD_BD+0xc, 0x0000,
+			0x00000000, 0xffffffff },
+
+		/* Receive BD Initiator Control Registers. */
+		{ RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ RCVBDI_STD_THRESH, TG3_FL_5705,
+			0x00000000, 0x000003ff },
+		{ RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+
+		/* Host Coalescing Control Registers. */
+		{ HOSTCC_MODE, TG3_FL_NOT_5705,
+			0x00000000, 0x00000004 },
+		{ HOSTCC_MODE, TG3_FL_5705,
+			0x00000000, 0x000000f6 },
+		{ HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_RXCOL_TICKS, TG3_FL_5705,
+			0x00000000, 0x000003ff },
+		{ HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_TXCOL_TICKS, TG3_FL_5705,
+			0x00000000, 0x000003ff },
+		{ HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
+			0x00000000, 0x000000ff },
+		{ HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
+			0x00000000, 0x000000ff },
+		{ HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
+			0x00000000, 0x000000ff },
+		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
+			0x00000000, 0x000000ff },
+		{ HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
+			0x00000000, 0xffffffff },
+		{ HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
+			0xffffffff, 0x00000000 },
+		{ HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
+			0xffffffff, 0x00000000 },
+
+		/* Buffer Manager Control Registers. */
+		{ BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
+			0x00000000, 0x007fff80 },
+		{ BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
+			0x00000000, 0x007fffff },
+		{ BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
+			0x00000000, 0x0000003f },
+		{ BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
+			0x00000000, 0x000001ff },
+		{ BUFMGR_MB_HIGH_WATER, 0x0000,
+			0x00000000, 0x000001ff },
+		{ BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
+			0xffffffff, 0x00000000 },
+		{ BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
+			0xffffffff, 0x00000000 },
+
+		/* Mailbox Registers */
+		{ GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
+			0x00000000, 0x000001ff },
+		{ GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
+			0x00000000, 0x000001ff },
+		{ GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
+			0x00000000, 0x000007ff },
+		{ GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
+			0x00000000, 0x000001ff },
+
+		{ 0xffff, 0x0000, 0x00000000, 0x00000000 },
+	};
+
+	is_5705 = is_5750 = 0;
+	if (tg3_flag(tp, 5705_PLUS)) {
+		is_5705 = 1;
+		if (tg3_flag(tp, 5750_PLUS))
+			is_5750 = 1;
+	}
+
+	for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
+		if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
+			continue;
+
+		if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
+			continue;
+
+		if (tg3_flag(tp, IS_5788) &&
+		    (reg_tbl[i].flags & TG3_FL_NOT_5788))
+			continue;
+
+		if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
+			continue;
+
+		offset = (u32) reg_tbl[i].offset;
+		read_mask = reg_tbl[i].read_mask;
+		write_mask = reg_tbl[i].write_mask;
+
+		/* Save the original register content */
+		save_val = tr32(offset);
+
+		/* Determine the read-only value. */
+		read_val = save_val & read_mask;
+
+		/* Write zero to the register, then make sure the read-only bits
+		 * are not changed and the read/write bits are all zeros.
+		 */
+		tw32(offset, 0);
+
+		val = tr32(offset);
+
+		/* Test the read-only and read/write bits. */
+		if (((val & read_mask) != read_val) || (val & write_mask))
+			goto out;
+
+		/* Write ones to all the bits defined by RdMask and WrMask, then
+		 * make sure the read-only bits are not changed and the
+		 * read/write bits are all ones.
+		 */
+		tw32(offset, read_mask | write_mask);
+
+		val = tr32(offset);
+
+		/* Test the read-only bits. */
+		if ((val & read_mask) != read_val)
+			goto out;
+
+		/* Test the read/write bits. */
+		if ((val & write_mask) != write_mask)
+			goto out;
+
+		tw32(offset, save_val);
+	}
+
+	return 0;
+
+out:
+	if (netif_msg_hw(tp))
+		netdev_err(tp->dev,
+			   "Register test failed at offset %x\n", offset);
+	tw32(offset, save_val);
+	return -EIO;
+}
+
+static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
+{
+	static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
+	int i;
+	u32 j;
+
+	for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
+		for (j = 0; j < len; j += 4) {
+			u32 val;
+
+			tg3_write_mem(tp, offset + j, test_pattern[i]);
+			tg3_read_mem(tp, offset + j, &val);
+			if (val != test_pattern[i])
+				return -EIO;
+		}
+	}
+	return 0;
+}
+
+static int tg3_test_memory(struct tg3 *tp)
+{
+	static struct mem_entry {
+		u32 offset;
+		u32 len;
+	} mem_tbl_570x[] = {
+		{ 0x00000000, 0x00b50},
+		{ 0x00002000, 0x1c000},
+		{ 0xffffffff, 0x00000}
+	}, mem_tbl_5705[] = {
+		{ 0x00000100, 0x0000c},
+		{ 0x00000200, 0x00008},
+		{ 0x00004000, 0x00800},
+		{ 0x00006000, 0x01000},
+		{ 0x00008000, 0x02000},
+		{ 0x00010000, 0x0e000},
+		{ 0xffffffff, 0x00000}
+	}, mem_tbl_5755[] = {
+		{ 0x00000200, 0x00008},
+		{ 0x00004000, 0x00800},
+		{ 0x00006000, 0x00800},
+		{ 0x00008000, 0x02000},
+		{ 0x00010000, 0x0c000},
+		{ 0xffffffff, 0x00000}
+	}, mem_tbl_5906[] = {
+		{ 0x00000200, 0x00008},
+		{ 0x00004000, 0x00400},
+		{ 0x00006000, 0x00400},
+		{ 0x00008000, 0x01000},
+		{ 0x00010000, 0x01000},
+		{ 0xffffffff, 0x00000}
+	}, mem_tbl_5717[] = {
+		{ 0x00000200, 0x00008},
+		{ 0x00010000, 0x0a000},
+		{ 0x00020000, 0x13c00},
+		{ 0xffffffff, 0x00000}
+	}, mem_tbl_57765[] = {
+		{ 0x00000200, 0x00008},
+		{ 0x00004000, 0x00800},
+		{ 0x00006000, 0x09800},
+		{ 0x00010000, 0x0a000},
+		{ 0xffffffff, 0x00000}
+	};
+	struct mem_entry *mem_tbl;
+	int err = 0;
+	int i;
+
+	if (tg3_flag(tp, 5717_PLUS))
+		mem_tbl = mem_tbl_5717;
+	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		mem_tbl = mem_tbl_57765;
+	else if (tg3_flag(tp, 5755_PLUS))
+		mem_tbl = mem_tbl_5755;
+	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+		mem_tbl = mem_tbl_5906;
+	else if (tg3_flag(tp, 5705_PLUS))
+		mem_tbl = mem_tbl_5705;
+	else
+		mem_tbl = mem_tbl_570x;
+
+	for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
+		err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
+		if (err)
+			break;
+	}
+
+	return err;
+}
+
+#define TG3_MAC_LOOPBACK	0
+#define TG3_PHY_LOOPBACK	1
+#define TG3_TSO_LOOPBACK	2
+
+#define TG3_TSO_MSS		500
+
+#define TG3_TSO_IP_HDR_LEN	20
+#define TG3_TSO_TCP_HDR_LEN	20
+#define TG3_TSO_TCP_OPT_LEN	12
+
+static const u8 tg3_tso_header[] = {
+0x08, 0x00,
+0x45, 0x00, 0x00, 0x00,
+0x00, 0x00, 0x40, 0x00,
+0x40, 0x06, 0x00, 0x00,
+0x0a, 0x00, 0x00, 0x01,
+0x0a, 0x00, 0x00, 0x02,
+0x0d, 0x00, 0xe0, 0x00,
+0x00, 0x00, 0x01, 0x00,
+0x00, 0x00, 0x02, 0x00,
+0x80, 0x10, 0x10, 0x00,
+0x14, 0x09, 0x00, 0x00,
+0x01, 0x01, 0x08, 0x0a,
+0x11, 0x11, 0x11, 0x11,
+0x11, 0x11, 0x11, 0x11,
+};
+
+static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
+{
+	u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
+	u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
+	u32 budget;
+	struct sk_buff *skb, *rx_skb;
+	u8 *tx_data;
+	dma_addr_t map;
+	int num_pkts, tx_len, rx_len, i, err;
+	struct tg3_rx_buffer_desc *desc;
+	struct tg3_napi *tnapi, *rnapi;
+	struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
+
+	tnapi = &tp->napi[0];
+	rnapi = &tp->napi[0];
+	if (tp->irq_cnt > 1) {
+		if (tg3_flag(tp, ENABLE_RSS))
+			rnapi = &tp->napi[1];
+		if (tg3_flag(tp, ENABLE_TSS))
+			tnapi = &tp->napi[1];
+	}
+	coal_now = tnapi->coal_now | rnapi->coal_now;
+
+	if (loopback_mode == TG3_MAC_LOOPBACK) {
+		/* HW errata - mac loopback fails in some cases on 5780.
+		 * Normal traffic and PHY loopback are not affected by
+		 * errata.  Also, the MAC loopback test is deprecated for
+		 * all newer ASIC revisions.
+		 */
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
+		    tg3_flag(tp, CPMU_PRESENT))
+			return 0;
+
+		mac_mode = tp->mac_mode &
+			   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+		mac_mode |= MAC_MODE_PORT_INT_LPBACK;
+		if (!tg3_flag(tp, 5705_PLUS))
+			mac_mode |= MAC_MODE_LINK_POLARITY;
+		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+			mac_mode |= MAC_MODE_PORT_MODE_MII;
+		else
+			mac_mode |= MAC_MODE_PORT_MODE_GMII;
+		tw32(MAC_MODE, mac_mode);
+	} else {
+		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+			tg3_phy_fet_toggle_apd(tp, false);
+			val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
+		} else
+			val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
+
+		tg3_phy_toggle_automdix(tp, 0);
+
+		tg3_writephy(tp, MII_BMCR, val);
+		udelay(40);
+
+		mac_mode = tp->mac_mode &
+			   ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
+		if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+			tg3_writephy(tp, MII_TG3_FET_PTEST,
+				     MII_TG3_FET_PTEST_FRC_TX_LINK |
+				     MII_TG3_FET_PTEST_FRC_TX_LOCK);
+			/* The write needs to be flushed for the AC131 */
+			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+				tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
+			mac_mode |= MAC_MODE_PORT_MODE_MII;
+		} else
+			mac_mode |= MAC_MODE_PORT_MODE_GMII;
+
+		/* reset to prevent losing 1st rx packet intermittently */
+		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
+			tw32_f(MAC_RX_MODE, RX_MODE_RESET);
+			udelay(10);
+			tw32_f(MAC_RX_MODE, tp->rx_mode);
+		}
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
+			u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
+			if (masked_phy_id == TG3_PHY_ID_BCM5401)
+				mac_mode &= ~MAC_MODE_LINK_POLARITY;
+			else if (masked_phy_id == TG3_PHY_ID_BCM5411)
+				mac_mode |= MAC_MODE_LINK_POLARITY;
+			tg3_writephy(tp, MII_TG3_EXT_CTRL,
+				     MII_TG3_EXT_CTRL_LNK3_LED_MODE);
+		}
+		tw32(MAC_MODE, mac_mode);
+
+		/* Wait for link */
+		for (i = 0; i < 100; i++) {
+			if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
+				break;
+			mdelay(1);
+		}
+	}
+
+	err = -EIO;
+
+	tx_len = pktsz;
+	skb = netdev_alloc_skb(tp->dev, tx_len);
+	if (!skb)
+		return -ENOMEM;
+
+	tx_data = skb_put(skb, tx_len);
+	memcpy(tx_data, tp->dev->dev_addr, 6);
+	memset(tx_data + 6, 0x0, 8);
+
+	tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
+
+	if (loopback_mode == TG3_TSO_LOOPBACK) {
+		struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
+
+		u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
+			      TG3_TSO_TCP_OPT_LEN;
+
+		memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
+		       sizeof(tg3_tso_header));
+		mss = TG3_TSO_MSS;
+
+		val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
+		num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
+
+		/* Set the total length field in the IP header */
+		iph->tot_len = htons((u16)(mss + hdr_len));
+
+		base_flags = (TXD_FLAG_CPU_PRE_DMA |
+			      TXD_FLAG_CPU_POST_DMA);
+
+		if (tg3_flag(tp, HW_TSO_1) ||
+		    tg3_flag(tp, HW_TSO_2) ||
+		    tg3_flag(tp, HW_TSO_3)) {
+			struct tcphdr *th;
+			val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
+			th = (struct tcphdr *)&tx_data[val];
+			th->check = 0;
+		} else
+			base_flags |= TXD_FLAG_TCPUDP_CSUM;
+
+		if (tg3_flag(tp, HW_TSO_3)) {
+			mss |= (hdr_len & 0xc) << 12;
+			if (hdr_len & 0x10)
+				base_flags |= 0x00000010;
+			base_flags |= (hdr_len & 0x3e0) << 5;
+		} else if (tg3_flag(tp, HW_TSO_2))
+			mss |= hdr_len << 9;
+		else if (tg3_flag(tp, HW_TSO_1) ||
+			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
+			mss |= (TG3_TSO_TCP_OPT_LEN << 9);
+		} else {
+			base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
+		}
+
+		data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
+	} else {
+		num_pkts = 1;
+		data_off = ETH_HLEN;
+	}
+
+	for (i = data_off; i < tx_len; i++)
+		tx_data[i] = (u8) (i & 0xff);
+
+	map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
+	if (pci_dma_mapping_error(tp->pdev, map)) {
+		dev_kfree_skb(skb);
+		return -EIO;
+	}
+
+	val = tnapi->tx_prod;
+	tnapi->tx_buffers[val].skb = skb;
+	dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
+
+	tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+	       rnapi->coal_now);
+
+	udelay(10);
+
+	rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
+
+	budget = tg3_tx_avail(tnapi);
+	if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
+			    base_flags | TXD_FLAG_END, mss, 0)) {
+		tnapi->tx_buffers[val].skb = NULL;
+		dev_kfree_skb(skb);
+		return -EIO;
+	}
+
+	tnapi->tx_prod++;
+
+	tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
+	tr32_mailbox(tnapi->prodmbox);
+
+	udelay(10);
+
+	/* 350 usec to allow enough time on some 10/100 Mbps devices.  */
+	for (i = 0; i < 35; i++) {
+		tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
+		       coal_now);
+
+		udelay(10);
+
+		tx_idx = tnapi->hw_status->idx[0].tx_consumer;
+		rx_idx = rnapi->hw_status->idx[0].rx_producer;
+		if ((tx_idx == tnapi->tx_prod) &&
+		    (rx_idx == (rx_start_idx + num_pkts)))
+			break;
+	}
+
+	tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, 0);
+	dev_kfree_skb(skb);
+
+	if (tx_idx != tnapi->tx_prod)
+		goto out;
+
+	if (rx_idx != rx_start_idx + num_pkts)
+		goto out;
+
+	val = data_off;
+	while (rx_idx != rx_start_idx) {
+		desc = &rnapi->rx_rcb[rx_start_idx++];
+		desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
+		opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
+
+		if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
+		    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
+			goto out;
+
+		rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
+			 - ETH_FCS_LEN;
+
+		if (loopback_mode != TG3_TSO_LOOPBACK) {
+			if (rx_len != tx_len)
+				goto out;
+
+			if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
+				if (opaque_key != RXD_OPAQUE_RING_STD)
+					goto out;
+			} else {
+				if (opaque_key != RXD_OPAQUE_RING_JUMBO)
+					goto out;
+			}
+		} else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
+			   (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
+			    >> RXD_TCPCSUM_SHIFT != 0xffff) {
+			goto out;
+		}
+
+		if (opaque_key == RXD_OPAQUE_RING_STD) {
+			rx_skb = tpr->rx_std_buffers[desc_idx].skb;
+			map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
+					     mapping);
+		} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
+			rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
+			map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
+					     mapping);
+		} else
+			goto out;
+
+		pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
+					    PCI_DMA_FROMDEVICE);
+
+		for (i = data_off; i < rx_len; i++, val++) {
+			if (*(rx_skb->data + i) != (u8) (val & 0xff))
+				goto out;
+		}
+	}
+
+	err = 0;
+
+	/* tg3_free_rings will unmap and free the rx_skb */
+out:
+	return err;
+}
+
+#define TG3_STD_LOOPBACK_FAILED		1
+#define TG3_JMB_LOOPBACK_FAILED		2
+#define TG3_TSO_LOOPBACK_FAILED		4
+
+#define TG3_MAC_LOOPBACK_SHIFT		0
+#define TG3_PHY_LOOPBACK_SHIFT		4
+#define TG3_LOOPBACK_FAILED		0x00000077
+
+static int tg3_test_loopback(struct tg3 *tp)
+{
+	int err = 0;
+	u32 eee_cap, cpmuctrl = 0;
+
+	if (!netif_running(tp->dev))
+		return TG3_LOOPBACK_FAILED;
+
+	eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
+	tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
+
+	err = tg3_reset_hw(tp, 1);
+	if (err) {
+		err = TG3_LOOPBACK_FAILED;
+		goto done;
+	}
+
+	if (tg3_flag(tp, ENABLE_RSS)) {
+		int i;
+
+		/* Reroute all rx packets to the 1st queue */
+		for (i = MAC_RSS_INDIR_TBL_0;
+		     i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
+			tw32(i, 0x0);
+	}
+
+	/* Turn off gphy autopowerdown. */
+	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
+		tg3_phy_toggle_apd(tp, false);
+
+	if (tg3_flag(tp, CPMU_PRESENT)) {
+		int i;
+		u32 status;
+
+		tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
+
+		/* Wait for up to 40 microseconds to acquire lock. */
+		for (i = 0; i < 4; i++) {
+			status = tr32(TG3_CPMU_MUTEX_GNT);
+			if (status == CPMU_MUTEX_GNT_DRIVER)
+				break;
+			udelay(10);
+		}
+
+		if (status != CPMU_MUTEX_GNT_DRIVER) {
+			err = TG3_LOOPBACK_FAILED;
+			goto done;
+		}
+
+		/* Turn off link-based power management. */
+		cpmuctrl = tr32(TG3_CPMU_CTRL);
+		tw32(TG3_CPMU_CTRL,
+		     cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
+				  CPMU_CTRL_LINK_AWARE_MODE));
+	}
+
+	if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
+		err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
+
+	if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+	    tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
+		err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
+
+	if (tg3_flag(tp, CPMU_PRESENT)) {
+		tw32(TG3_CPMU_CTRL, cpmuctrl);
+
+		/* Release the mutex */
+		tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
+	}
+
+	if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
+	    !tg3_flag(tp, USE_PHYLIB)) {
+		if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
+			err |= TG3_STD_LOOPBACK_FAILED <<
+			       TG3_PHY_LOOPBACK_SHIFT;
+		if (tg3_flag(tp, TSO_CAPABLE) &&
+		    tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
+			err |= TG3_TSO_LOOPBACK_FAILED <<
+			       TG3_PHY_LOOPBACK_SHIFT;
+		if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
+		    tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
+			err |= TG3_JMB_LOOPBACK_FAILED <<
+			       TG3_PHY_LOOPBACK_SHIFT;
+	}
+
+	/* Re-enable gphy autopowerdown. */
+	if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
+		tg3_phy_toggle_apd(tp, true);
+
+done:
+	tp->phy_flags |= eee_cap;
+
+	return err;
+}
+
+static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
+			  u64 *data)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
+	    tg3_power_up(tp)) {
+		etest->flags |= ETH_TEST_FL_FAILED;
+		memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
+		return;
+	}
+
+	memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
+
+	if (tg3_test_nvram(tp) != 0) {
+		etest->flags |= ETH_TEST_FL_FAILED;
+		data[0] = 1;
+	}
+	if (tg3_test_link(tp) != 0) {
+		etest->flags |= ETH_TEST_FL_FAILED;
+		data[1] = 1;
+	}
+	if (etest->flags & ETH_TEST_FL_OFFLINE) {
+		int err, err2 = 0, irq_sync = 0;
+
+		if (netif_running(dev)) {
+			tg3_phy_stop(tp);
+			tg3_netif_stop(tp);
+			irq_sync = 1;
+		}
+
+		tg3_full_lock(tp, irq_sync);
+
+		tg3_halt(tp, RESET_KIND_SUSPEND, 1);
+		err = tg3_nvram_lock(tp);
+		tg3_halt_cpu(tp, RX_CPU_BASE);
+		if (!tg3_flag(tp, 5705_PLUS))
+			tg3_halt_cpu(tp, TX_CPU_BASE);
+		if (!err)
+			tg3_nvram_unlock(tp);
+
+		if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
+			tg3_phy_reset(tp);
+
+		if (tg3_test_registers(tp) != 0) {
+			etest->flags |= ETH_TEST_FL_FAILED;
+			data[2] = 1;
+		}
+		if (tg3_test_memory(tp) != 0) {
+			etest->flags |= ETH_TEST_FL_FAILED;
+			data[3] = 1;
+		}
+		if ((data[4] = tg3_test_loopback(tp)) != 0)
+			etest->flags |= ETH_TEST_FL_FAILED;
+
+		tg3_full_unlock(tp);
+
+		if (tg3_test_interrupt(tp) != 0) {
+			etest->flags |= ETH_TEST_FL_FAILED;
+			data[5] = 1;
+		}
+
+		tg3_full_lock(tp, 0);
+
+		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+		if (netif_running(dev)) {
+			tg3_flag_set(tp, INIT_COMPLETE);
+			err2 = tg3_restart_hw(tp, 1);
+			if (!err2)
+				tg3_netif_start(tp);
+		}
+
+		tg3_full_unlock(tp);
+
+		if (irq_sync && !err2)
+			tg3_phy_start(tp);
+	}
+	if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
+		tg3_power_down(tp);
+
+}
+
+static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct mii_ioctl_data *data = if_mii(ifr);
+	struct tg3 *tp = netdev_priv(dev);
+	int err;
+
+	if (tg3_flag(tp, USE_PHYLIB)) {
+		struct phy_device *phydev;
+		if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
+			return -EAGAIN;
+		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+		return phy_mii_ioctl(phydev, ifr, cmd);
+	}
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = tp->phy_addr;
+
+		/* fallthru */
+	case SIOCGMIIREG: {
+		u32 mii_regval;
+
+		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+			break;			/* We have no PHY */
+
+		if (!netif_running(dev))
+			return -EAGAIN;
+
+		spin_lock_bh(&tp->lock);
+		err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
+		spin_unlock_bh(&tp->lock);
+
+		data->val_out = mii_regval;
+
+		return err;
+	}
+
+	case SIOCSMIIREG:
+		if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+			break;			/* We have no PHY */
+
+		if (!netif_running(dev))
+			return -EAGAIN;
+
+		spin_lock_bh(&tp->lock);
+		err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
+		spin_unlock_bh(&tp->lock);
+
+		return err;
+
+	default:
+		/* do nothing */
+		break;
+	}
+	return -EOPNOTSUPP;
+}
+
+static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+	struct tg3 *tp = netdev_priv(dev);
+
+	memcpy(ec, &tp->coal, sizeof(*ec));
+	return 0;
+}
+
+static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
+{
+	struct tg3 *tp = netdev_priv(dev);
+	u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
+	u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
+
+	if (!tg3_flag(tp, 5705_PLUS)) {
+		max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
+		max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
+		max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
+		min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
+	}
+
+	if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
+	    (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
+	    (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
+	    (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
+	    (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
+	    (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
+	    (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
+	    (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
+	    (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
+	    (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
+		return -EINVAL;
+
+	/* No rx interrupts will be generated if both are zero */
+	if ((ec->rx_coalesce_usecs == 0) &&
+	    (ec->rx_max_coalesced_frames == 0))
+		return -EINVAL;
+
+	/* No tx interrupts will be generated if both are zero */
+	if ((ec->tx_coalesce_usecs == 0) &&
+	    (ec->tx_max_coalesced_frames == 0))
+		return -EINVAL;
+
+	/* Only copy relevant parameters, ignore all others. */
+	tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
+	tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
+	tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
+	tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
+	tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
+	tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
+	tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
+	tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
+	tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
+
+	if (netif_running(dev)) {
+		tg3_full_lock(tp, 0);
+		__tg3_set_coalesce(tp, &tp->coal);
+		tg3_full_unlock(tp);
+	}
+	return 0;
+}
+
+static const struct ethtool_ops tg3_ethtool_ops = {
+	.get_settings		= tg3_get_settings,
+	.set_settings		= tg3_set_settings,
+	.get_drvinfo		= tg3_get_drvinfo,
+	.get_regs_len		= tg3_get_regs_len,
+	.get_regs		= tg3_get_regs,
+	.get_wol		= tg3_get_wol,
+	.set_wol		= tg3_set_wol,
+	.get_msglevel		= tg3_get_msglevel,
+	.set_msglevel		= tg3_set_msglevel,
+	.nway_reset		= tg3_nway_reset,
+	.get_link		= ethtool_op_get_link,
+	.get_eeprom_len		= tg3_get_eeprom_len,
+	.get_eeprom		= tg3_get_eeprom,
+	.set_eeprom		= tg3_set_eeprom,
+	.get_ringparam		= tg3_get_ringparam,
+	.set_ringparam		= tg3_set_ringparam,
+	.get_pauseparam		= tg3_get_pauseparam,
+	.set_pauseparam		= tg3_set_pauseparam,
+	.self_test		= tg3_self_test,
+	.get_strings		= tg3_get_strings,
+	.set_phys_id		= tg3_set_phys_id,
+	.get_ethtool_stats	= tg3_get_ethtool_stats,
+	.get_coalesce		= tg3_get_coalesce,
+	.set_coalesce		= tg3_set_coalesce,
+	.get_sset_count		= tg3_get_sset_count,
+};
+
+static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
+{
+	u32 cursize, val, magic;
+
+	tp->nvram_size = EEPROM_CHIP_SIZE;
+
+	if (tg3_nvram_read(tp, 0, &magic) != 0)
+		return;
+
+	if ((magic != TG3_EEPROM_MAGIC) &&
+	    ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
+	    ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
+		return;
+
+	/*
+	 * Size the chip by reading offsets at increasing powers of two.
+	 * When we encounter our validation signature, we know the addressing
+	 * has wrapped around, and thus have our chip size.
+	 */
+	cursize = 0x10;
+
+	while (cursize < tp->nvram_size) {
+		if (tg3_nvram_read(tp, cursize, &val) != 0)
+			return;
+
+		if (val == magic)
+			break;
+
+		cursize <<= 1;
+	}
+
+	tp->nvram_size = cursize;
+}
+
+static void __devinit tg3_get_nvram_size(struct tg3 *tp)
+{
+	u32 val;
+
+	if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
+		return;
+
+	/* Selfboot format */
+	if (val != TG3_EEPROM_MAGIC) {
+		tg3_get_eeprom_size(tp);
+		return;
+	}
+
+	if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
+		if (val != 0) {
+			/* This is confusing.  We want to operate on the
+			 * 16-bit value at offset 0xf2.  The tg3_nvram_read()
+			 * call will read from NVRAM and byteswap the data
+			 * according to the byteswapping settings for all
+			 * other register accesses.  This ensures the data we
+			 * want will always reside in the lower 16-bits.
+			 * However, the data in NVRAM is in LE format, which
+			 * means the data from the NVRAM read will always be
+			 * opposite the endianness of the CPU.  The 16-bit
+			 * byteswap then brings the data to CPU endianness.
+			 */
+			tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
+			return;
+		}
+	}
+	tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+}
+
+static void __devinit tg3_get_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+	if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
+		tg3_flag_set(tp, FLASH);
+	} else {
+		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+		tw32(NVRAM_CFG1, nvcfg1);
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+	    tg3_flag(tp, 5780_CLASS)) {
+		switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
+		case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
+			tp->nvram_jedecnum = JEDEC_ATMEL;
+			tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
+			tg3_flag_set(tp, NVRAM_BUFFERED);
+			break;
+		case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
+			tp->nvram_jedecnum = JEDEC_ATMEL;
+			tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
+			break;
+		case FLASH_VENDOR_ATMEL_EEPROM:
+			tp->nvram_jedecnum = JEDEC_ATMEL;
+			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+			tg3_flag_set(tp, NVRAM_BUFFERED);
+			break;
+		case FLASH_VENDOR_ST:
+			tp->nvram_jedecnum = JEDEC_ST;
+			tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
+			tg3_flag_set(tp, NVRAM_BUFFERED);
+			break;
+		case FLASH_VENDOR_SAIFUN:
+			tp->nvram_jedecnum = JEDEC_SAIFUN;
+			tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
+			break;
+		case FLASH_VENDOR_SST_SMALL:
+		case FLASH_VENDOR_SST_LARGE:
+			tp->nvram_jedecnum = JEDEC_SST;
+			tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
+			break;
+		}
+	} else {
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+	}
+}
+
+static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
+{
+	switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
+	case FLASH_5752PAGE_SIZE_256:
+		tp->nvram_pagesize = 256;
+		break;
+	case FLASH_5752PAGE_SIZE_512:
+		tp->nvram_pagesize = 512;
+		break;
+	case FLASH_5752PAGE_SIZE_1K:
+		tp->nvram_pagesize = 1024;
+		break;
+	case FLASH_5752PAGE_SIZE_2K:
+		tp->nvram_pagesize = 2048;
+		break;
+	case FLASH_5752PAGE_SIZE_4K:
+		tp->nvram_pagesize = 4096;
+		break;
+	case FLASH_5752PAGE_SIZE_264:
+		tp->nvram_pagesize = 264;
+		break;
+	case FLASH_5752PAGE_SIZE_528:
+		tp->nvram_pagesize = 528;
+		break;
+	}
+}
+
+static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+
+	/* NVRAM protection for TPM */
+	if (nvcfg1 & (1 << 27))
+		tg3_flag_set(tp, PROTECTED_NVRAM);
+
+	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+	case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
+	case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		break;
+	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		break;
+	case FLASH_5752VENDOR_ST_M45PE10:
+	case FLASH_5752VENDOR_ST_M45PE20:
+	case FLASH_5752VENDOR_ST_M45PE40:
+		tp->nvram_jedecnum = JEDEC_ST;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		break;
+	}
+
+	if (tg3_flag(tp, FLASH)) {
+		tg3_nvram_get_pagesize(tp, nvcfg1);
+	} else {
+		/* For eeprom, set pagesize to maximum eeprom size */
+		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+		tw32(NVRAM_CFG1, nvcfg1);
+	}
+}
+
+static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1, protect = 0;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+
+	/* NVRAM protection for TPM */
+	if (nvcfg1 & (1 << 27)) {
+		tg3_flag_set(tp, PROTECTED_NVRAM);
+		protect = 1;
+	}
+
+	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
+	switch (nvcfg1) {
+	case FLASH_5755VENDOR_ATMEL_FLASH_1:
+	case FLASH_5755VENDOR_ATMEL_FLASH_2:
+	case FLASH_5755VENDOR_ATMEL_FLASH_3:
+	case FLASH_5755VENDOR_ATMEL_FLASH_5:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		tp->nvram_pagesize = 264;
+		if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
+		    nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
+			tp->nvram_size = (protect ? 0x3e200 :
+					  TG3_NVRAM_SIZE_512KB);
+		else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
+			tp->nvram_size = (protect ? 0x1f200 :
+					  TG3_NVRAM_SIZE_256KB);
+		else
+			tp->nvram_size = (protect ? 0x1f200 :
+					  TG3_NVRAM_SIZE_128KB);
+		break;
+	case FLASH_5752VENDOR_ST_M45PE10:
+	case FLASH_5752VENDOR_ST_M45PE20:
+	case FLASH_5752VENDOR_ST_M45PE40:
+		tp->nvram_jedecnum = JEDEC_ST;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		tp->nvram_pagesize = 256;
+		if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
+			tp->nvram_size = (protect ?
+					  TG3_NVRAM_SIZE_64KB :
+					  TG3_NVRAM_SIZE_128KB);
+		else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
+			tp->nvram_size = (protect ?
+					  TG3_NVRAM_SIZE_64KB :
+					  TG3_NVRAM_SIZE_256KB);
+		else
+			tp->nvram_size = (protect ?
+					  TG3_NVRAM_SIZE_128KB :
+					  TG3_NVRAM_SIZE_512KB);
+		break;
+	}
+}
+
+static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+
+	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+	case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
+	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
+	case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
+	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+		tw32(NVRAM_CFG1, nvcfg1);
+		break;
+	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+	case FLASH_5755VENDOR_ATMEL_FLASH_1:
+	case FLASH_5755VENDOR_ATMEL_FLASH_2:
+	case FLASH_5755VENDOR_ATMEL_FLASH_3:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		tp->nvram_pagesize = 264;
+		break;
+	case FLASH_5752VENDOR_ST_M45PE10:
+	case FLASH_5752VENDOR_ST_M45PE20:
+	case FLASH_5752VENDOR_ST_M45PE40:
+		tp->nvram_jedecnum = JEDEC_ST;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		tp->nvram_pagesize = 256;
+		break;
+	}
+}
+
+static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1, protect = 0;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+
+	/* NVRAM protection for TPM */
+	if (nvcfg1 & (1 << 27)) {
+		tg3_flag_set(tp, PROTECTED_NVRAM);
+		protect = 1;
+	}
+
+	nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
+	switch (nvcfg1) {
+	case FLASH_5761VENDOR_ATMEL_ADB021D:
+	case FLASH_5761VENDOR_ATMEL_ADB041D:
+	case FLASH_5761VENDOR_ATMEL_ADB081D:
+	case FLASH_5761VENDOR_ATMEL_ADB161D:
+	case FLASH_5761VENDOR_ATMEL_MDB021D:
+	case FLASH_5761VENDOR_ATMEL_MDB041D:
+	case FLASH_5761VENDOR_ATMEL_MDB081D:
+	case FLASH_5761VENDOR_ATMEL_MDB161D:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+		tp->nvram_pagesize = 256;
+		break;
+	case FLASH_5761VENDOR_ST_A_M45PE20:
+	case FLASH_5761VENDOR_ST_A_M45PE40:
+	case FLASH_5761VENDOR_ST_A_M45PE80:
+	case FLASH_5761VENDOR_ST_A_M45PE16:
+	case FLASH_5761VENDOR_ST_M_M45PE20:
+	case FLASH_5761VENDOR_ST_M_M45PE40:
+	case FLASH_5761VENDOR_ST_M_M45PE80:
+	case FLASH_5761VENDOR_ST_M_M45PE16:
+		tp->nvram_jedecnum = JEDEC_ST;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+		tp->nvram_pagesize = 256;
+		break;
+	}
+
+	if (protect) {
+		tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
+	} else {
+		switch (nvcfg1) {
+		case FLASH_5761VENDOR_ATMEL_ADB161D:
+		case FLASH_5761VENDOR_ATMEL_MDB161D:
+		case FLASH_5761VENDOR_ST_A_M45PE16:
+		case FLASH_5761VENDOR_ST_M_M45PE16:
+			tp->nvram_size = TG3_NVRAM_SIZE_2MB;
+			break;
+		case FLASH_5761VENDOR_ATMEL_ADB081D:
+		case FLASH_5761VENDOR_ATMEL_MDB081D:
+		case FLASH_5761VENDOR_ST_A_M45PE80:
+		case FLASH_5761VENDOR_ST_M_M45PE80:
+			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
+			break;
+		case FLASH_5761VENDOR_ATMEL_ADB041D:
+		case FLASH_5761VENDOR_ATMEL_MDB041D:
+		case FLASH_5761VENDOR_ST_A_M45PE40:
+		case FLASH_5761VENDOR_ST_M_M45PE40:
+			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+			break;
+		case FLASH_5761VENDOR_ATMEL_ADB021D:
+		case FLASH_5761VENDOR_ATMEL_MDB021D:
+		case FLASH_5761VENDOR_ST_A_M45PE20:
+		case FLASH_5761VENDOR_ST_M_M45PE20:
+			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+			break;
+		}
+	}
+}
+
+static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
+{
+	tp->nvram_jedecnum = JEDEC_ATMEL;
+	tg3_flag_set(tp, NVRAM_BUFFERED);
+	tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+}
+
+static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+
+	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+	case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
+	case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+		tw32(NVRAM_CFG1, nvcfg1);
+		return;
+	case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+	case FLASH_57780VENDOR_ATMEL_AT45DB011D:
+	case FLASH_57780VENDOR_ATMEL_AT45DB011B:
+	case FLASH_57780VENDOR_ATMEL_AT45DB021D:
+	case FLASH_57780VENDOR_ATMEL_AT45DB021B:
+	case FLASH_57780VENDOR_ATMEL_AT45DB041D:
+	case FLASH_57780VENDOR_ATMEL_AT45DB041B:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+
+		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+		case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
+		case FLASH_57780VENDOR_ATMEL_AT45DB011D:
+		case FLASH_57780VENDOR_ATMEL_AT45DB011B:
+			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+			break;
+		case FLASH_57780VENDOR_ATMEL_AT45DB021D:
+		case FLASH_57780VENDOR_ATMEL_AT45DB021B:
+			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+			break;
+		case FLASH_57780VENDOR_ATMEL_AT45DB041D:
+		case FLASH_57780VENDOR_ATMEL_AT45DB041B:
+			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+			break;
+		}
+		break;
+	case FLASH_5752VENDOR_ST_M45PE10:
+	case FLASH_5752VENDOR_ST_M45PE20:
+	case FLASH_5752VENDOR_ST_M45PE40:
+		tp->nvram_jedecnum = JEDEC_ST;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+
+		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+		case FLASH_5752VENDOR_ST_M45PE10:
+			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+			break;
+		case FLASH_5752VENDOR_ST_M45PE20:
+			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+			break;
+		case FLASH_5752VENDOR_ST_M45PE40:
+			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+			break;
+		}
+		break;
+	default:
+		tg3_flag_set(tp, NO_NVRAM);
+		return;
+	}
+
+	tg3_nvram_get_pagesize(tp, nvcfg1);
+	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
+		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+}
+
+
+static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+
+	switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+	case FLASH_5717VENDOR_ATMEL_EEPROM:
+	case FLASH_5717VENDOR_MICRO_EEPROM:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+
+		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+		tw32(NVRAM_CFG1, nvcfg1);
+		return;
+	case FLASH_5717VENDOR_ATMEL_MDB011D:
+	case FLASH_5717VENDOR_ATMEL_ADB011B:
+	case FLASH_5717VENDOR_ATMEL_ADB011D:
+	case FLASH_5717VENDOR_ATMEL_MDB021D:
+	case FLASH_5717VENDOR_ATMEL_ADB021B:
+	case FLASH_5717VENDOR_ATMEL_ADB021D:
+	case FLASH_5717VENDOR_ATMEL_45USPT:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+
+		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+		case FLASH_5717VENDOR_ATMEL_MDB021D:
+			/* Detect size with tg3_nvram_get_size() */
+			break;
+		case FLASH_5717VENDOR_ATMEL_ADB021B:
+		case FLASH_5717VENDOR_ATMEL_ADB021D:
+			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+			break;
+		default:
+			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+			break;
+		}
+		break;
+	case FLASH_5717VENDOR_ST_M_M25PE10:
+	case FLASH_5717VENDOR_ST_A_M25PE10:
+	case FLASH_5717VENDOR_ST_M_M45PE10:
+	case FLASH_5717VENDOR_ST_A_M45PE10:
+	case FLASH_5717VENDOR_ST_M_M25PE20:
+	case FLASH_5717VENDOR_ST_A_M25PE20:
+	case FLASH_5717VENDOR_ST_M_M45PE20:
+	case FLASH_5717VENDOR_ST_A_M45PE20:
+	case FLASH_5717VENDOR_ST_25USPT:
+	case FLASH_5717VENDOR_ST_45USPT:
+		tp->nvram_jedecnum = JEDEC_ST;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+
+		switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
+		case FLASH_5717VENDOR_ST_M_M25PE20:
+		case FLASH_5717VENDOR_ST_M_M45PE20:
+			/* Detect size with tg3_nvram_get_size() */
+			break;
+		case FLASH_5717VENDOR_ST_A_M25PE20:
+		case FLASH_5717VENDOR_ST_A_M45PE20:
+			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+			break;
+		default:
+			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+			break;
+		}
+		break;
+	default:
+		tg3_flag_set(tp, NO_NVRAM);
+		return;
+	}
+
+	tg3_nvram_get_pagesize(tp, nvcfg1);
+	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
+		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+}
+
+static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
+{
+	u32 nvcfg1, nvmpinstrp;
+
+	nvcfg1 = tr32(NVRAM_CFG1);
+	nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
+
+	switch (nvmpinstrp) {
+	case FLASH_5720_EEPROM_HD:
+	case FLASH_5720_EEPROM_LD:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+
+		nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
+		tw32(NVRAM_CFG1, nvcfg1);
+		if (nvmpinstrp == FLASH_5720_EEPROM_HD)
+			tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
+		else
+			tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
+		return;
+	case FLASH_5720VENDOR_M_ATMEL_DB011D:
+	case FLASH_5720VENDOR_A_ATMEL_DB011B:
+	case FLASH_5720VENDOR_A_ATMEL_DB011D:
+	case FLASH_5720VENDOR_M_ATMEL_DB021D:
+	case FLASH_5720VENDOR_A_ATMEL_DB021B:
+	case FLASH_5720VENDOR_A_ATMEL_DB021D:
+	case FLASH_5720VENDOR_M_ATMEL_DB041D:
+	case FLASH_5720VENDOR_A_ATMEL_DB041B:
+	case FLASH_5720VENDOR_A_ATMEL_DB041D:
+	case FLASH_5720VENDOR_M_ATMEL_DB081D:
+	case FLASH_5720VENDOR_A_ATMEL_DB081D:
+	case FLASH_5720VENDOR_ATMEL_45USPT:
+		tp->nvram_jedecnum = JEDEC_ATMEL;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+
+		switch (nvmpinstrp) {
+		case FLASH_5720VENDOR_M_ATMEL_DB021D:
+		case FLASH_5720VENDOR_A_ATMEL_DB021B:
+		case FLASH_5720VENDOR_A_ATMEL_DB021D:
+			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+			break;
+		case FLASH_5720VENDOR_M_ATMEL_DB041D:
+		case FLASH_5720VENDOR_A_ATMEL_DB041B:
+		case FLASH_5720VENDOR_A_ATMEL_DB041D:
+			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+			break;
+		case FLASH_5720VENDOR_M_ATMEL_DB081D:
+		case FLASH_5720VENDOR_A_ATMEL_DB081D:
+			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
+			break;
+		default:
+			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+			break;
+		}
+		break;
+	case FLASH_5720VENDOR_M_ST_M25PE10:
+	case FLASH_5720VENDOR_M_ST_M45PE10:
+	case FLASH_5720VENDOR_A_ST_M25PE10:
+	case FLASH_5720VENDOR_A_ST_M45PE10:
+	case FLASH_5720VENDOR_M_ST_M25PE20:
+	case FLASH_5720VENDOR_M_ST_M45PE20:
+	case FLASH_5720VENDOR_A_ST_M25PE20:
+	case FLASH_5720VENDOR_A_ST_M45PE20:
+	case FLASH_5720VENDOR_M_ST_M25PE40:
+	case FLASH_5720VENDOR_M_ST_M45PE40:
+	case FLASH_5720VENDOR_A_ST_M25PE40:
+	case FLASH_5720VENDOR_A_ST_M45PE40:
+	case FLASH_5720VENDOR_M_ST_M25PE80:
+	case FLASH_5720VENDOR_M_ST_M45PE80:
+	case FLASH_5720VENDOR_A_ST_M25PE80:
+	case FLASH_5720VENDOR_A_ST_M45PE80:
+	case FLASH_5720VENDOR_ST_25USPT:
+	case FLASH_5720VENDOR_ST_45USPT:
+		tp->nvram_jedecnum = JEDEC_ST;
+		tg3_flag_set(tp, NVRAM_BUFFERED);
+		tg3_flag_set(tp, FLASH);
+
+		switch (nvmpinstrp) {
+		case FLASH_5720VENDOR_M_ST_M25PE20:
+		case FLASH_5720VENDOR_M_ST_M45PE20:
+		case FLASH_5720VENDOR_A_ST_M25PE20:
+		case FLASH_5720VENDOR_A_ST_M45PE20:
+			tp->nvram_size = TG3_NVRAM_SIZE_256KB;
+			break;
+		case FLASH_5720VENDOR_M_ST_M25PE40:
+		case FLASH_5720VENDOR_M_ST_M45PE40:
+		case FLASH_5720VENDOR_A_ST_M25PE40:
+		case FLASH_5720VENDOR_A_ST_M45PE40:
+			tp->nvram_size = TG3_NVRAM_SIZE_512KB;
+			break;
+		case FLASH_5720VENDOR_M_ST_M25PE80:
+		case FLASH_5720VENDOR_M_ST_M45PE80:
+		case FLASH_5720VENDOR_A_ST_M25PE80:
+		case FLASH_5720VENDOR_A_ST_M45PE80:
+			tp->nvram_size = TG3_NVRAM_SIZE_1MB;
+			break;
+		default:
+			tp->nvram_size = TG3_NVRAM_SIZE_128KB;
+			break;
+		}
+		break;
+	default:
+		tg3_flag_set(tp, NO_NVRAM);
+		return;
+	}
+
+	tg3_nvram_get_pagesize(tp, nvcfg1);
+	if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
+		tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
+}
+
+/* Chips other than 5700/5701 use the NVRAM for fetching info. */
+static void __devinit tg3_nvram_init(struct tg3 *tp)
+{
+	tw32_f(GRC_EEPROM_ADDR,
+	     (EEPROM_ADDR_FSM_RESET |
+	      (EEPROM_DEFAULT_CLOCK_PERIOD <<
+	       EEPROM_ADDR_CLKPERD_SHIFT)));
+
+	msleep(1);
+
+	/* Enable seeprom accesses. */
+	tw32_f(GRC_LOCAL_CTRL,
+	     tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
+	udelay(100);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
+		tg3_flag_set(tp, NVRAM);
+
+		if (tg3_nvram_lock(tp)) {
+			netdev_warn(tp->dev,
+				    "Cannot get nvram lock, %s failed\n",
+				    __func__);
+			return;
+		}
+		tg3_enable_nvram_access(tp);
+
+		tp->nvram_size = 0;
+
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+			tg3_get_5752_nvram_info(tp);
+		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+			tg3_get_5755_nvram_info(tp);
+		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+			tg3_get_5787_nvram_info(tp);
+		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
+			tg3_get_5761_nvram_info(tp);
+		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+			tg3_get_5906_nvram_info(tp);
+		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+			tg3_get_57780_nvram_info(tp);
+		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+			 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+			tg3_get_5717_nvram_info(tp);
+		else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+			tg3_get_5720_nvram_info(tp);
+		else
+			tg3_get_nvram_info(tp);
+
+		if (tp->nvram_size == 0)
+			tg3_get_nvram_size(tp);
+
+		tg3_disable_nvram_access(tp);
+		tg3_nvram_unlock(tp);
+
+	} else {
+		tg3_flag_clear(tp, NVRAM);
+		tg3_flag_clear(tp, NVRAM_BUFFERED);
+
+		tg3_get_eeprom_size(tp);
+	}
+}
+
+static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
+				    u32 offset, u32 len, u8 *buf)
+{
+	int i, j, rc = 0;
+	u32 val;
+
+	for (i = 0; i < len; i += 4) {
+		u32 addr;
+		__be32 data;
+
+		addr = offset + i;
+
+		memcpy(&data, buf + i, 4);
+
+		/*
+		 * The SEEPROM interface expects the data to always be opposite
+		 * the native endian format.  We accomplish this by reversing
+		 * all the operations that would have been performed on the
+		 * data from a call to tg3_nvram_read_be32().
+		 */
+		tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
+
+		val = tr32(GRC_EEPROM_ADDR);
+		tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
+
+		val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
+			EEPROM_ADDR_READ);
+		tw32(GRC_EEPROM_ADDR, val |
+			(0 << EEPROM_ADDR_DEVID_SHIFT) |
+			(addr & EEPROM_ADDR_ADDR_MASK) |
+			EEPROM_ADDR_START |
+			EEPROM_ADDR_WRITE);
+
+		for (j = 0; j < 1000; j++) {
+			val = tr32(GRC_EEPROM_ADDR);
+
+			if (val & EEPROM_ADDR_COMPLETE)
+				break;
+			msleep(1);
+		}
+		if (!(val & EEPROM_ADDR_COMPLETE)) {
+			rc = -EBUSY;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
+		u8 *buf)
+{
+	int ret = 0;
+	u32 pagesize = tp->nvram_pagesize;
+	u32 pagemask = pagesize - 1;
+	u32 nvram_cmd;
+	u8 *tmp;
+
+	tmp = kmalloc(pagesize, GFP_KERNEL);
+	if (tmp == NULL)
+		return -ENOMEM;
+
+	while (len) {
+		int j;
+		u32 phy_addr, page_off, size;
+
+		phy_addr = offset & ~pagemask;
+
+		for (j = 0; j < pagesize; j += 4) {
+			ret = tg3_nvram_read_be32(tp, phy_addr + j,
+						  (__be32 *) (tmp + j));
+			if (ret)
+				break;
+		}
+		if (ret)
+			break;
+
+		page_off = offset & pagemask;
+		size = pagesize;
+		if (len < size)
+			size = len;
+
+		len -= size;
+
+		memcpy(tmp + page_off, buf, size);
+
+		offset = offset + (pagesize - page_off);
+
+		tg3_enable_nvram_access(tp);
+
+		/*
+		 * Before we can erase the flash page, we need
+		 * to issue a special "write enable" command.
+		 */
+		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+
+		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+			break;
+
+		/* Erase the target page */
+		tw32(NVRAM_ADDR, phy_addr);
+
+		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
+			NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
+
+		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+			break;
+
+		/* Issue another write enable to start the write. */
+		nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+
+		if (tg3_nvram_exec_cmd(tp, nvram_cmd))
+			break;
+
+		for (j = 0; j < pagesize; j += 4) {
+			__be32 data;
+
+			data = *((__be32 *) (tmp + j));
+
+			tw32(NVRAM_WRDATA, be32_to_cpu(data));
+
+			tw32(NVRAM_ADDR, phy_addr + j);
+
+			nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
+				NVRAM_CMD_WR;
+
+			if (j == 0)
+				nvram_cmd |= NVRAM_CMD_FIRST;
+			else if (j == (pagesize - 4))
+				nvram_cmd |= NVRAM_CMD_LAST;
+
+			if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
+				break;
+		}
+		if (ret)
+			break;
+	}
+
+	nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
+	tg3_nvram_exec_cmd(tp, nvram_cmd);
+
+	kfree(tmp);
+
+	return ret;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
+		u8 *buf)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < len; i += 4, offset += 4) {
+		u32 page_off, phy_addr, nvram_cmd;
+		__be32 data;
+
+		memcpy(&data, buf + i, 4);
+		tw32(NVRAM_WRDATA, be32_to_cpu(data));
+
+		page_off = offset % tp->nvram_pagesize;
+
+		phy_addr = tg3_nvram_phys_addr(tp, offset);
+
+		tw32(NVRAM_ADDR, phy_addr);
+
+		nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
+
+		if (page_off == 0 || i == 0)
+			nvram_cmd |= NVRAM_CMD_FIRST;
+		if (page_off == (tp->nvram_pagesize - 4))
+			nvram_cmd |= NVRAM_CMD_LAST;
+
+		if (i == (len - 4))
+			nvram_cmd |= NVRAM_CMD_LAST;
+
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
+		    !tg3_flag(tp, 5755_PLUS) &&
+		    (tp->nvram_jedecnum == JEDEC_ST) &&
+		    (nvram_cmd & NVRAM_CMD_FIRST)) {
+
+			if ((ret = tg3_nvram_exec_cmd(tp,
+				NVRAM_CMD_WREN | NVRAM_CMD_GO |
+				NVRAM_CMD_DONE)))
+
+				break;
+		}
+		if (!tg3_flag(tp, FLASH)) {
+			/* We always do complete word writes to eeprom. */
+			nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
+		}
+
+		if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
+			break;
+	}
+	return ret;
+}
+
+/* offset and length are dword aligned */
+static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
+{
+	int ret;
+
+	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
+		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
+		       ~GRC_LCLCTRL_GPIO_OUTPUT1);
+		udelay(40);
+	}
+
+	if (!tg3_flag(tp, NVRAM)) {
+		ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
+	} else {
+		u32 grc_mode;
+
+		ret = tg3_nvram_lock(tp);
+		if (ret)
+			return ret;
+
+		tg3_enable_nvram_access(tp);
+		if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
+			tw32(NVRAM_WRITE1, 0x406);
+
+		grc_mode = tr32(GRC_MODE);
+		tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
+
+		if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
+			ret = tg3_nvram_write_block_buffered(tp, offset, len,
+				buf);
+		} else {
+			ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
+				buf);
+		}
+
+		grc_mode = tr32(GRC_MODE);
+		tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
+
+		tg3_disable_nvram_access(tp);
+		tg3_nvram_unlock(tp);
+	}
+
+	if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
+		tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
+		udelay(40);
+	}
+
+	return ret;
+}
+
+struct subsys_tbl_ent {
+	u16 subsys_vendor, subsys_devid;
+	u32 phy_id;
+};
+
+static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
+	/* Broadcom boards. */
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
+	{ TG3PCI_SUBVENDOR_ID_BROADCOM,
+	  TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
+
+	/* 3com boards. */
+	{ TG3PCI_SUBVENDOR_ID_3COM,
+	  TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
+	{ TG3PCI_SUBVENDOR_ID_3COM,
+	  TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_3COM,
+	  TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
+	{ TG3PCI_SUBVENDOR_ID_3COM,
+	  TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_3COM,
+	  TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
+
+	/* DELL boards. */
+	{ TG3PCI_SUBVENDOR_ID_DELL,
+	  TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
+	{ TG3PCI_SUBVENDOR_ID_DELL,
+	  TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
+	{ TG3PCI_SUBVENDOR_ID_DELL,
+	  TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
+	{ TG3PCI_SUBVENDOR_ID_DELL,
+	  TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
+
+	/* Compaq boards. */
+	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
+	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
+	  TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
+	  TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
+	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
+	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
+	{ TG3PCI_SUBVENDOR_ID_COMPAQ,
+	  TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
+
+	/* IBM boards. */
+	{ TG3PCI_SUBVENDOR_ID_IBM,
+	  TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
+};
+
+static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
+		if ((subsys_id_to_phy_id[i].subsys_vendor ==
+		     tp->pdev->subsystem_vendor) &&
+		    (subsys_id_to_phy_id[i].subsys_devid ==
+		     tp->pdev->subsystem_device))
+			return &subsys_id_to_phy_id[i];
+	}
+	return NULL;
+}
+
+static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
+{
+	u32 val;
+
+	tp->phy_id = TG3_PHY_ID_INVALID;
+	tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
+	/* Assume an onboard device and WOL capable by default.  */
+	tg3_flag_set(tp, EEPROM_WRITE_PROT);
+	tg3_flag_set(tp, WOL_CAP);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+		if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
+			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
+			tg3_flag_set(tp, IS_NIC);
+		}
+		val = tr32(VCPU_CFGSHDW);
+		if (val & VCPU_CFGSHDW_ASPM_DBNC)
+			tg3_flag_set(tp, ASPM_WORKAROUND);
+		if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
+		    (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
+			tg3_flag_set(tp, WOL_ENABLE);
+			device_set_wakeup_enable(&tp->pdev->dev, true);
+		}
+		goto done;
+	}
+
+	tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
+	if (val == NIC_SRAM_DATA_SIG_MAGIC) {
+		u32 nic_cfg, led_cfg;
+		u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
+		int eeprom_phy_serdes = 0;
+
+		tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
+		tp->nic_sram_data_cfg = nic_cfg;
+
+		tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
+		ver >>= NIC_SRAM_DATA_VER_SHIFT;
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
+		    (ver > 0) && (ver < 0x100))
+			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
+
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
+
+		if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
+		    NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
+			eeprom_phy_serdes = 1;
+
+		tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
+		if (nic_phy_id != 0) {
+			u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
+			u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
+
+			eeprom_phy_id  = (id1 >> 16) << 10;
+			eeprom_phy_id |= (id2 & 0xfc00) << 16;
+			eeprom_phy_id |= (id2 & 0x03ff) <<  0;
+		} else
+			eeprom_phy_id = 0;
+
+		tp->phy_id = eeprom_phy_id;
+		if (eeprom_phy_serdes) {
+			if (!tg3_flag(tp, 5705_PLUS))
+				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
+			else
+				tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
+		}
+
+		if (tg3_flag(tp, 5750_PLUS))
+			led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
+				    SHASTA_EXT_LED_MODE_MASK);
+		else
+			led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
+
+		switch (led_cfg) {
+		default:
+		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
+			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+			break;
+
+		case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
+			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
+			break;
+
+		case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
+			tp->led_ctrl = LED_CTRL_MODE_MAC;
+
+			/* Default to PHY_1_MODE if 0 (MAC_MODE) is
+			 * read on some older 5700/5701 bootcode.
+			 */
+			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+			    ASIC_REV_5700 ||
+			    GET_ASIC_REV(tp->pci_chip_rev_id) ==
+			    ASIC_REV_5701)
+				tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
+			break;
+
+		case SHASTA_EXT_LED_SHARED:
+			tp->led_ctrl = LED_CTRL_MODE_SHARED;
+			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
+			    tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
+				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
+						 LED_CTRL_MODE_PHY_2);
+			break;
+
+		case SHASTA_EXT_LED_MAC:
+			tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
+			break;
+
+		case SHASTA_EXT_LED_COMBO:
+			tp->led_ctrl = LED_CTRL_MODE_COMBO;
+			if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
+				tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
+						 LED_CTRL_MODE_PHY_2);
+			break;
+
+		}
+
+		if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+		     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
+		    tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
+			tp->led_ctrl = LED_CTRL_MODE_PHY_2;
+
+		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
+			tp->led_ctrl = LED_CTRL_MODE_PHY_1;
+
+		if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
+			tg3_flag_set(tp, EEPROM_WRITE_PROT);
+			if ((tp->pdev->subsystem_vendor ==
+			     PCI_VENDOR_ID_ARIMA) &&
+			    (tp->pdev->subsystem_device == 0x205a ||
+			     tp->pdev->subsystem_device == 0x2063))
+				tg3_flag_clear(tp, EEPROM_WRITE_PROT);
+		} else {
+			tg3_flag_clear(tp, EEPROM_WRITE_PROT);
+			tg3_flag_set(tp, IS_NIC);
+		}
+
+		if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
+			tg3_flag_set(tp, ENABLE_ASF);
+			if (tg3_flag(tp, 5750_PLUS))
+				tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
+		}
+
+		if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
+		    tg3_flag(tp, 5750_PLUS))
+			tg3_flag_set(tp, ENABLE_APE);
+
+		if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
+		    !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
+			tg3_flag_clear(tp, WOL_CAP);
+
+		if (tg3_flag(tp, WOL_CAP) &&
+		    (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
+			tg3_flag_set(tp, WOL_ENABLE);
+			device_set_wakeup_enable(&tp->pdev->dev, true);
+		}
+
+		if (cfg2 & (1 << 17))
+			tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
+
+		/* serdes signal pre-emphasis in register 0x590 set by */
+		/* bootcode if bit 18 is set */
+		if (cfg2 & (1 << 18))
+			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
+
+		if ((tg3_flag(tp, 57765_PLUS) ||
+		     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
+		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
+			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
+
+		if (tg3_flag(tp, PCI_EXPRESS) &&
+		    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+		    !tg3_flag(tp, 57765_PLUS)) {
+			u32 cfg3;
+
+			tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
+			if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
+				tg3_flag_set(tp, ASPM_WORKAROUND);
+		}
+
+		if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
+			tg3_flag_set(tp, RGMII_INBAND_DISABLE);
+		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
+			tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
+		if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
+			tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
+	}
+done:
+	if (tg3_flag(tp, WOL_CAP))
+		device_set_wakeup_enable(&tp->pdev->dev,
+					 tg3_flag(tp, WOL_ENABLE));
+	else
+		device_set_wakeup_capable(&tp->pdev->dev, false);
+}
+
+static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
+{
+	int i;
+	u32 val;
+
+	tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
+	tw32(OTP_CTRL, cmd);
+
+	/* Wait for up to 1 ms for command to execute. */
+	for (i = 0; i < 100; i++) {
+		val = tr32(OTP_STATUS);
+		if (val & OTP_STATUS_CMD_DONE)
+			break;
+		udelay(10);
+	}
+
+	return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
+}
+
+/* Read the gphy configuration from the OTP region of the chip.  The gphy
+ * configuration is a 32-bit value that straddles the alignment boundary.
+ * We do two 32-bit reads and then shift and merge the results.
+ */
+static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
+{
+	u32 bhalf_otp, thalf_otp;
+
+	tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
+
+	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
+		return 0;
+
+	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
+
+	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
+		return 0;
+
+	thalf_otp = tr32(OTP_READ_DATA);
+
+	tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
+
+	if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
+		return 0;
+
+	bhalf_otp = tr32(OTP_READ_DATA);
+
+	return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
+}
+
+static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
+{
+	u32 adv = ADVERTISED_Autoneg |
+		  ADVERTISED_Pause;
+
+	if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
+		adv |= ADVERTISED_1000baseT_Half |
+		       ADVERTISED_1000baseT_Full;
+
+	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+		adv |= ADVERTISED_100baseT_Half |
+		       ADVERTISED_100baseT_Full |
+		       ADVERTISED_10baseT_Half |
+		       ADVERTISED_10baseT_Full |
+		       ADVERTISED_TP;
+	else
+		adv |= ADVERTISED_FIBRE;
+
+	tp->link_config.advertising = adv;
+	tp->link_config.speed = SPEED_INVALID;
+	tp->link_config.duplex = DUPLEX_INVALID;
+	tp->link_config.autoneg = AUTONEG_ENABLE;
+	tp->link_config.active_speed = SPEED_INVALID;
+	tp->link_config.active_duplex = DUPLEX_INVALID;
+	tp->link_config.orig_speed = SPEED_INVALID;
+	tp->link_config.orig_duplex = DUPLEX_INVALID;
+	tp->link_config.orig_autoneg = AUTONEG_INVALID;
+}
+
+static int __devinit tg3_phy_probe(struct tg3 *tp)
+{
+	u32 hw_phy_id_1, hw_phy_id_2;
+	u32 hw_phy_id, hw_phy_id_masked;
+	int err;
+
+	/* flow control autonegotiation is default behavior */
+	tg3_flag_set(tp, PAUSE_AUTONEG);
+	tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
+
+	if (tg3_flag(tp, USE_PHYLIB))
+		return tg3_phy_init(tp);
+
+	/* Reading the PHY ID register can conflict with ASF
+	 * firmware access to the PHY hardware.
+	 */
+	err = 0;
+	if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
+		hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
+	} else {
+		/* Now read the physical PHY_ID from the chip and verify
+		 * that it is sane.  If it doesn't look good, we fall back
+		 * to either the hard-coded table based PHY_ID and failing
+		 * that the value found in the eeprom area.
+		 */
+		err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
+		err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
+
+		hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
+		hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
+		hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
+
+		hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
+	}
+
+	if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
+		tp->phy_id = hw_phy_id;
+		if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
+			tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
+		else
+			tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
+	} else {
+		if (tp->phy_id != TG3_PHY_ID_INVALID) {
+			/* Do nothing, phy ID already set up in
+			 * tg3_get_eeprom_hw_cfg().
+			 */
+		} else {
+			struct subsys_tbl_ent *p;
+
+			/* No eeprom signature?  Try the hardcoded
+			 * subsys device table.
+			 */
+			p = tg3_lookup_by_subsys(tp);
+			if (!p)
+				return -ENODEV;
+
+			tp->phy_id = p->phy_id;
+			if (!tp->phy_id ||
+			    tp->phy_id == TG3_PHY_ID_BCM8002)
+				tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
+		}
+	}
+
+	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+	     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
+	     (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
+	      tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
+	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+	      tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
+		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
+
+	tg3_phy_init_link_config(tp);
+
+	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+	    !tg3_flag(tp, ENABLE_APE) &&
+	    !tg3_flag(tp, ENABLE_ASF)) {
+		u32 bmsr, mask;
+
+		tg3_readphy(tp, MII_BMSR, &bmsr);
+		if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
+		    (bmsr & BMSR_LSTATUS))
+			goto skip_phy_reset;
+
+		err = tg3_phy_reset(tp);
+		if (err)
+			return err;
+
+		tg3_phy_set_wirespeed(tp);
+
+		mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
+			ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
+			ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
+		if (!tg3_copper_is_advertising_all(tp, mask)) {
+			tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
+					    tp->link_config.flowctrl);
+
+			tg3_writephy(tp, MII_BMCR,
+				     BMCR_ANENABLE | BMCR_ANRESTART);
+		}
+	}
+
+skip_phy_reset:
+	if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
+		err = tg3_init_5401phy_dsp(tp);
+		if (err)
+			return err;
+
+		err = tg3_init_5401phy_dsp(tp);
+	}
+
+	return err;
+}
+
+static void __devinit tg3_read_vpd(struct tg3 *tp)
+{
+	u8 *vpd_data;
+	unsigned int block_end, rosize, len;
+	u32 vpdlen;
+	int j, i = 0;
+
+	vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
+	if (!vpd_data)
+		goto out_no_vpd;
+
+	i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
+	if (i < 0)
+		goto out_not_found;
+
+	rosize = pci_vpd_lrdt_size(&vpd_data[i]);
+	block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
+	i += PCI_VPD_LRDT_TAG_SIZE;
+
+	if (block_end > vpdlen)
+		goto out_not_found;
+
+	j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+				      PCI_VPD_RO_KEYWORD_MFR_ID);
+	if (j > 0) {
+		len = pci_vpd_info_field_size(&vpd_data[j]);
+
+		j += PCI_VPD_INFO_FLD_HDR_SIZE;
+		if (j + len > block_end || len != 4 ||
+		    memcmp(&vpd_data[j], "1028", 4))
+			goto partno;
+
+		j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+					      PCI_VPD_RO_KEYWORD_VENDOR0);
+		if (j < 0)
+			goto partno;
+
+		len = pci_vpd_info_field_size(&vpd_data[j]);
+
+		j += PCI_VPD_INFO_FLD_HDR_SIZE;
+		if (j + len > block_end)
+			goto partno;
+
+		memcpy(tp->fw_ver, &vpd_data[j], len);
+		strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
+	}
+
+partno:
+	i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+				      PCI_VPD_RO_KEYWORD_PARTNO);
+	if (i < 0)
+		goto out_not_found;
+
+	len = pci_vpd_info_field_size(&vpd_data[i]);
+
+	i += PCI_VPD_INFO_FLD_HDR_SIZE;
+	if (len > TG3_BPN_SIZE ||
+	    (len + i) > vpdlen)
+		goto out_not_found;
+
+	memcpy(tp->board_part_number, &vpd_data[i], len);
+
+out_not_found:
+	kfree(vpd_data);
+	if (tp->board_part_number[0])
+		return;
+
+out_no_vpd:
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
+			strcpy(tp->board_part_number, "BCM5717");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
+			strcpy(tp->board_part_number, "BCM5718");
+		else
+			goto nomatch;
+	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
+		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
+			strcpy(tp->board_part_number, "BCM57780");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
+			strcpy(tp->board_part_number, "BCM57760");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
+			strcpy(tp->board_part_number, "BCM57790");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
+			strcpy(tp->board_part_number, "BCM57788");
+		else
+			goto nomatch;
+	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
+			strcpy(tp->board_part_number, "BCM57761");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
+			strcpy(tp->board_part_number, "BCM57765");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
+			strcpy(tp->board_part_number, "BCM57781");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
+			strcpy(tp->board_part_number, "BCM57785");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
+			strcpy(tp->board_part_number, "BCM57791");
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+			strcpy(tp->board_part_number, "BCM57795");
+		else
+			goto nomatch;
+	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+		strcpy(tp->board_part_number, "BCM95906");
+	} else {
+nomatch:
+		strcpy(tp->board_part_number, "none");
+	}
+}
+
+static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
+{
+	u32 val;
+
+	if (tg3_nvram_read(tp, offset, &val) ||
+	    (val & 0xfc000000) != 0x0c000000 ||
+	    tg3_nvram_read(tp, offset + 4, &val) ||
+	    val != 0)
+		return 0;
+
+	return 1;
+}
+
+static void __devinit tg3_read_bc_ver(struct tg3 *tp)
+{
+	u32 val, offset, start, ver_offset;
+	int i, dst_off;
+	bool newver = false;
+
+	if (tg3_nvram_read(tp, 0xc, &offset) ||
+	    tg3_nvram_read(tp, 0x4, &start))
+		return;
+
+	offset = tg3_nvram_logical_addr(tp, offset);
+
+	if (tg3_nvram_read(tp, offset, &val))
+		return;
+
+	if ((val & 0xfc000000) == 0x0c000000) {
+		if (tg3_nvram_read(tp, offset + 4, &val))
+			return;
+
+		if (val == 0)
+			newver = true;
+	}
+
+	dst_off = strlen(tp->fw_ver);
+
+	if (newver) {
+		if (TG3_VER_SIZE - dst_off < 16 ||
+		    tg3_nvram_read(tp, offset + 8, &ver_offset))
+			return;
+
+		offset = offset + ver_offset - start;
+		for (i = 0; i < 16; i += 4) {
+			__be32 v;
+			if (tg3_nvram_read_be32(tp, offset + i, &v))
+				return;
+
+			memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
+		}
+	} else {
+		u32 major, minor;
+
+		if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
+			return;
+
+		major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
+			TG3_NVM_BCVER_MAJSFT;
+		minor = ver_offset & TG3_NVM_BCVER_MINMSK;
+		snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
+			 "v%d.%02d", major, minor);
+	}
+}
+
+static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
+{
+	u32 val, major, minor;
+
+	/* Use native endian representation */
+	if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
+		return;
+
+	major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
+		TG3_NVM_HWSB_CFG1_MAJSFT;
+	minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
+		TG3_NVM_HWSB_CFG1_MINSFT;
+
+	snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
+}
+
+static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
+{
+	u32 offset, major, minor, build;
+
+	strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
+
+	if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
+		return;
+
+	switch (val & TG3_EEPROM_SB_REVISION_MASK) {
+	case TG3_EEPROM_SB_REVISION_0:
+		offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
+		break;
+	case TG3_EEPROM_SB_REVISION_2:
+		offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
+		break;
+	case TG3_EEPROM_SB_REVISION_3:
+		offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
+		break;
+	case TG3_EEPROM_SB_REVISION_4:
+		offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
+		break;
+	case TG3_EEPROM_SB_REVISION_5:
+		offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
+		break;
+	case TG3_EEPROM_SB_REVISION_6:
+		offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
+		break;
+	default:
+		return;
+	}
+
+	if (tg3_nvram_read(tp, offset, &val))
+		return;
+
+	build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
+		TG3_EEPROM_SB_EDH_BLD_SHFT;
+	major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
+		TG3_EEPROM_SB_EDH_MAJ_SHFT;
+	minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
+
+	if (minor > 99 || build > 26)
+		return;
+
+	offset = strlen(tp->fw_ver);
+	snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
+		 " v%d.%02d", major, minor);
+
+	if (build > 0) {
+		offset = strlen(tp->fw_ver);
+		if (offset < TG3_VER_SIZE - 1)
+			tp->fw_ver[offset] = 'a' + build - 1;
+	}
+}
+
+static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
+{
+	u32 val, offset, start;
+	int i, vlen;
+
+	for (offset = TG3_NVM_DIR_START;
+	     offset < TG3_NVM_DIR_END;
+	     offset += TG3_NVM_DIRENT_SIZE) {
+		if (tg3_nvram_read(tp, offset, &val))
+			return;
+
+		if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
+			break;
+	}
+
+	if (offset == TG3_NVM_DIR_END)
+		return;
+
+	if (!tg3_flag(tp, 5705_PLUS))
+		start = 0x08000000;
+	else if (tg3_nvram_read(tp, offset - 4, &start))
+		return;
+
+	if (tg3_nvram_read(tp, offset + 4, &offset) ||
+	    !tg3_fw_img_is_valid(tp, offset) ||
+	    tg3_nvram_read(tp, offset + 8, &val))
+		return;
+
+	offset += val - start;
+
+	vlen = strlen(tp->fw_ver);
+
+	tp->fw_ver[vlen++] = ',';
+	tp->fw_ver[vlen++] = ' ';
+
+	for (i = 0; i < 4; i++) {
+		__be32 v;
+		if (tg3_nvram_read_be32(tp, offset, &v))
+			return;
+
+		offset += sizeof(v);
+
+		if (vlen > TG3_VER_SIZE - sizeof(v)) {
+			memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
+			break;
+		}
+
+		memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
+		vlen += sizeof(v);
+	}
+}
+
+static void __devinit tg3_read_dash_ver(struct tg3 *tp)
+{
+	int vlen;
+	u32 apedata;
+	char *fwtype;
+
+	if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
+		return;
+
+	apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
+	if (apedata != APE_SEG_SIG_MAGIC)
+		return;
+
+	apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
+	if (!(apedata & APE_FW_STATUS_READY))
+		return;
+
+	apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
+
+	if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
+		tg3_flag_set(tp, APE_HAS_NCSI);
+		fwtype = "NCSI";
+	} else {
+		fwtype = "DASH";
+	}
+
+	vlen = strlen(tp->fw_ver);
+
+	snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
+		 fwtype,
+		 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
+		 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
+		 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
+		 (apedata & APE_FW_VERSION_BLDMSK));
+}
+
+static void __devinit tg3_read_fw_ver(struct tg3 *tp)
+{
+	u32 val;
+	bool vpd_vers = false;
+
+	if (tp->fw_ver[0] != 0)
+		vpd_vers = true;
+
+	if (tg3_flag(tp, NO_NVRAM)) {
+		strcat(tp->fw_ver, "sb");
+		return;
+	}
+
+	if (tg3_nvram_read(tp, 0, &val))
+		return;
+
+	if (val == TG3_EEPROM_MAGIC)
+		tg3_read_bc_ver(tp);
+	else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
+		tg3_read_sb_ver(tp, val);
+	else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
+		tg3_read_hwsb_ver(tp);
+	else
+		return;
+
+	if (vpd_vers)
+		goto done;
+
+	if (tg3_flag(tp, ENABLE_APE)) {
+		if (tg3_flag(tp, ENABLE_ASF))
+			tg3_read_dash_ver(tp);
+	} else if (tg3_flag(tp, ENABLE_ASF)) {
+		tg3_read_mgmtfw_ver(tp);
+	}
+
+done:
+	tp->fw_ver[TG3_VER_SIZE - 1] = 0;
+}
+
+static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
+
+static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
+{
+	if (tg3_flag(tp, LRG_PROD_RING_CAP))
+		return TG3_RX_RET_MAX_SIZE_5717;
+	else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
+		return TG3_RX_RET_MAX_SIZE_5700;
+	else
+		return TG3_RX_RET_MAX_SIZE_5705;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
+	{ },
+};
+
+static int __devinit tg3_get_invariants(struct tg3 *tp)
+{
+	u32 misc_ctrl_reg;
+	u32 pci_state_reg, grc_misc_cfg;
+	u32 val;
+	u16 pci_cmd;
+	int err;
+
+	/* Force memory write invalidate off.  If we leave it on,
+	 * then on 5700_BX chips we have to enable a workaround.
+	 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
+	 * to match the cacheline size.  The Broadcom driver have this
+	 * workaround but turns MWI off all the times so never uses
+	 * it.  This seems to suggest that the workaround is insufficient.
+	 */
+	pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+	pci_cmd &= ~PCI_COMMAND_INVALIDATE;
+	pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+
+	/* Important! -- Make sure register accesses are byteswapped
+	 * correctly.  Also, for those chips that require it, make
+	 * sure that indirect register accesses are enabled before
+	 * the first operation.
+	 */
+	pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+			      &misc_ctrl_reg);
+	tp->misc_host_ctrl |= (misc_ctrl_reg &
+			       MISC_HOST_CTRL_CHIPREV);
+	pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+			       tp->misc_host_ctrl);
+
+	tp->pci_chip_rev_id = (misc_ctrl_reg >>
+			       MISC_HOST_CTRL_CHIPREV_SHIFT);
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
+		u32 prod_id_asic_rev;
+
+		if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
+		    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
+			pci_read_config_dword(tp->pdev,
+					      TG3PCI_GEN2_PRODID_ASICREV,
+					      &prod_id_asic_rev);
+		else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
+			 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+			pci_read_config_dword(tp->pdev,
+					      TG3PCI_GEN15_PRODID_ASICREV,
+					      &prod_id_asic_rev);
+		else
+			pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
+					      &prod_id_asic_rev);
+
+		tp->pci_chip_rev_id = prod_id_asic_rev;
+	}
+
+	/* Wrong chip ID in 5752 A0. This code can be removed later
+	 * as A0 is not in production.
+	 */
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
+		tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
+
+	/* If we have 5702/03 A1 or A2 on certain ICH chipsets,
+	 * we need to disable memory and use config. cycles
+	 * only to access all registers. The 5702/03 chips
+	 * can mistakenly decode the special cycles from the
+	 * ICH chipsets as memory write cycles, causing corruption
+	 * of register and memory space. Only certain ICH bridges
+	 * will drive special cycles with non-zero data during the
+	 * address phase which can fall within the 5703's address
+	 * range. This is not an ICH bug as the PCI spec allows
+	 * non-zero address during special cycles. However, only
+	 * these ICH bridges are known to drive non-zero addresses
+	 * during special cycles.
+	 *
+	 * Since special cycles do not cross PCI bridges, we only
+	 * enable this workaround if the 5703 is on the secondary
+	 * bus of these ICH bridges.
+	 */
+	if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
+	    (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
+		static struct tg3_dev_id {
+			u32	vendor;
+			u32	device;
+			u32	rev;
+		} ich_chipsets[] = {
+			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
+			  PCI_ANY_ID },
+			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
+			  PCI_ANY_ID },
+			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
+			  0xa },
+			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
+			  PCI_ANY_ID },
+			{ },
+		};
+		struct tg3_dev_id *pci_id = &ich_chipsets[0];
+		struct pci_dev *bridge = NULL;
+
+		while (pci_id->vendor != 0) {
+			bridge = pci_get_device(pci_id->vendor, pci_id->device,
+						bridge);
+			if (!bridge) {
+				pci_id++;
+				continue;
+			}
+			if (pci_id->rev != PCI_ANY_ID) {
+				if (bridge->revision > pci_id->rev)
+					continue;
+			}
+			if (bridge->subordinate &&
+			    (bridge->subordinate->number ==
+			     tp->pdev->bus->number)) {
+				tg3_flag_set(tp, ICH_WORKAROUND);
+				pci_dev_put(bridge);
+				break;
+			}
+		}
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+		static struct tg3_dev_id {
+			u32	vendor;
+			u32	device;
+		} bridge_chipsets[] = {
+			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
+			{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
+			{ },
+		};
+		struct tg3_dev_id *pci_id = &bridge_chipsets[0];
+		struct pci_dev *bridge = NULL;
+
+		while (pci_id->vendor != 0) {
+			bridge = pci_get_device(pci_id->vendor,
+						pci_id->device,
+						bridge);
+			if (!bridge) {
+				pci_id++;
+				continue;
+			}
+			if (bridge->subordinate &&
+			    (bridge->subordinate->number <=
+			     tp->pdev->bus->number) &&
+			    (bridge->subordinate->subordinate >=
+			     tp->pdev->bus->number)) {
+				tg3_flag_set(tp, 5701_DMA_BUG);
+				pci_dev_put(bridge);
+				break;
+			}
+		}
+	}
+
+	/* The EPB bridge inside 5714, 5715, and 5780 cannot support
+	 * DMA addresses > 40-bit. This bridge may have other additional
+	 * 57xx devices behind it in some 4-port NIC designs for example.
+	 * Any tg3 device found behind the bridge will also need the 40-bit
+	 * DMA workaround.
+	 */
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+		tg3_flag_set(tp, 5780_CLASS);
+		tg3_flag_set(tp, 40BIT_DMA_BUG);
+		tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
+	} else {
+		struct pci_dev *bridge = NULL;
+
+		do {
+			bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+						PCI_DEVICE_ID_SERVERWORKS_EPB,
+						bridge);
+			if (bridge && bridge->subordinate &&
+			    (bridge->subordinate->number <=
+			     tp->pdev->bus->number) &&
+			    (bridge->subordinate->subordinate >=
+			     tp->pdev->bus->number)) {
+				tg3_flag_set(tp, 40BIT_DMA_BUG);
+				pci_dev_put(bridge);
+				break;
+			}
+		} while (bridge);
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
+		tp->pdev_peer = tg3_find_peer(tp);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+		tg3_flag_set(tp, 5717_PLUS);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
+	    tg3_flag(tp, 5717_PLUS))
+		tg3_flag_set(tp, 57765_PLUS);
+
+	/* Intentionally exclude ASIC_REV_5906 */
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+	    tg3_flag(tp, 57765_PLUS))
+		tg3_flag_set(tp, 5755_PLUS);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
+	    tg3_flag(tp, 5755_PLUS) ||
+	    tg3_flag(tp, 5780_CLASS))
+		tg3_flag_set(tp, 5750_PLUS);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
+	    tg3_flag(tp, 5750_PLUS))
+		tg3_flag_set(tp, 5705_PLUS);
+
+	/* Determine TSO capabilities */
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
+		; /* Do nothing. HW bug. */
+	else if (tg3_flag(tp, 57765_PLUS))
+		tg3_flag_set(tp, HW_TSO_3);
+	else if (tg3_flag(tp, 5755_PLUS) ||
+		 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+		tg3_flag_set(tp, HW_TSO_2);
+	else if (tg3_flag(tp, 5750_PLUS)) {
+		tg3_flag_set(tp, HW_TSO_1);
+		tg3_flag_set(tp, TSO_BUG);
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
+		    tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
+			tg3_flag_clear(tp, TSO_BUG);
+	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+		   GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+		   tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
+			tg3_flag_set(tp, TSO_BUG);
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
+			tp->fw_needed = FIRMWARE_TG3TSO5;
+		else
+			tp->fw_needed = FIRMWARE_TG3TSO;
+	}
+
+	/* Selectively allow TSO based on operating conditions */
+	if (tg3_flag(tp, HW_TSO_1) ||
+	    tg3_flag(tp, HW_TSO_2) ||
+	    tg3_flag(tp, HW_TSO_3) ||
+	    (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
+		tg3_flag_set(tp, TSO_CAPABLE);
+	else {
+		tg3_flag_clear(tp, TSO_CAPABLE);
+		tg3_flag_clear(tp, TSO_BUG);
+		tp->fw_needed = NULL;
+	}
+
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
+		tp->fw_needed = FIRMWARE_TG3;
+
+	tp->irq_max = 1;
+
+	if (tg3_flag(tp, 5750_PLUS)) {
+		tg3_flag_set(tp, SUPPORT_MSI);
+		if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
+		    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
+		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
+		     tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
+		     tp->pdev_peer == tp->pdev))
+			tg3_flag_clear(tp, SUPPORT_MSI);
+
+		if (tg3_flag(tp, 5755_PLUS) ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+			tg3_flag_set(tp, 1SHOT_MSI);
+		}
+
+		if (tg3_flag(tp, 57765_PLUS)) {
+			tg3_flag_set(tp, SUPPORT_MSIX);
+			tp->irq_max = TG3_IRQ_MAX_VECS;
+		}
+	}
+
+	if (tg3_flag(tp, 5755_PLUS))
+		tg3_flag_set(tp, SHORT_DMA_BUG);
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+		tg3_flag_set(tp, 4K_FIFO_LIMIT);
+
+	if (tg3_flag(tp, 5717_PLUS))
+		tg3_flag_set(tp, LRG_PROD_RING_CAP);
+
+	if (tg3_flag(tp, 57765_PLUS) &&
+	    tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
+		tg3_flag_set(tp, USE_JUMBO_BDFLAG);
+
+	if (!tg3_flag(tp, 5705_PLUS) ||
+	    tg3_flag(tp, 5780_CLASS) ||
+	    tg3_flag(tp, USE_JUMBO_BDFLAG))
+		tg3_flag_set(tp, JUMBO_CAPABLE);
+
+	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+			      &pci_state_reg);
+
+	if (pci_is_pcie(tp->pdev)) {
+		u16 lnkctl;
+
+		tg3_flag_set(tp, PCI_EXPRESS);
+
+		tp->pcie_readrq = 4096;
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+			tp->pcie_readrq = 2048;
+
+		pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+
+		pci_read_config_word(tp->pdev,
+				     pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
+				     &lnkctl);
+		if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
+			if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
+			    ASIC_REV_5906) {
+				tg3_flag_clear(tp, HW_TSO_2);
+				tg3_flag_clear(tp, TSO_CAPABLE);
+			}
+			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
+			    tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
+				tg3_flag_set(tp, CLKREQ_BUG);
+		} else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
+			tg3_flag_set(tp, L1PLLPD_EN);
+		}
+	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
+		/* BCM5785 devices are effectively PCIe devices, and should
+		 * follow PCIe codepaths, but do not have a PCIe capabilities
+		 * section.
+		*/
+		tg3_flag_set(tp, PCI_EXPRESS);
+	} else if (!tg3_flag(tp, 5705_PLUS) ||
+		   tg3_flag(tp, 5780_CLASS)) {
+		tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
+		if (!tp->pcix_cap) {
+			dev_err(&tp->pdev->dev,
+				"Cannot find PCI-X capability, aborting\n");
+			return -EIO;
+		}
+
+		if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
+			tg3_flag_set(tp, PCIX_MODE);
+	}
+
+	/* If we have an AMD 762 or VIA K8T800 chipset, write
+	 * reordering to the mailbox registers done by the host
+	 * controller can cause major troubles.  We read back from
+	 * every mailbox register write to force the writes to be
+	 * posted to the chip in order.
+	 */
+	if (pci_dev_present(tg3_write_reorder_chipsets) &&
+	    !tg3_flag(tp, PCI_EXPRESS))
+		tg3_flag_set(tp, MBOX_WRITE_REORDER);
+
+	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+			     &tp->pci_cacheline_sz);
+	pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+			     &tp->pci_lat_timer);
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+	    tp->pci_lat_timer < 64) {
+		tp->pci_lat_timer = 64;
+		pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+				      tp->pci_lat_timer);
+	}
+
+	/* Important! -- It is critical that the PCI-X hw workaround
+	 * situation is decided before the first MMIO register access.
+	 */
+	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
+		/* 5700 BX chips need to have their TX producer index
+		 * mailboxes written twice to workaround a bug.
+		 */
+		tg3_flag_set(tp, TXD_MBOX_HWBUG);
+
+		/* If we are in PCI-X mode, enable register write workaround.
+		 *
+		 * The workaround is to use indirect register accesses
+		 * for all chip writes not to mailbox registers.
+		 */
+		if (tg3_flag(tp, PCIX_MODE)) {
+			u32 pm_reg;
+
+			tg3_flag_set(tp, PCIX_TARGET_HWBUG);
+
+			/* The chip can have it's power management PCI config
+			 * space registers clobbered due to this bug.
+			 * So explicitly force the chip into D0 here.
+			 */
+			pci_read_config_dword(tp->pdev,
+					      tp->pm_cap + PCI_PM_CTRL,
+					      &pm_reg);
+			pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
+			pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
+			pci_write_config_dword(tp->pdev,
+					       tp->pm_cap + PCI_PM_CTRL,
+					       pm_reg);
+
+			/* Also, force SERR#/PERR# in PCI command. */
+			pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+			pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
+			pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+		}
+	}
+
+	if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
+		tg3_flag_set(tp, PCI_HIGH_SPEED);
+	if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
+		tg3_flag_set(tp, PCI_32BIT);
+
+	/* Chip-specific fixup from Broadcom driver */
+	if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
+	    (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
+		pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
+		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
+	}
+
+	/* Default fast path register access methods */
+	tp->read32 = tg3_read32;
+	tp->write32 = tg3_write32;
+	tp->read32_mbox = tg3_read32;
+	tp->write32_mbox = tg3_write32;
+	tp->write32_tx_mbox = tg3_write32;
+	tp->write32_rx_mbox = tg3_write32;
+
+	/* Various workaround register access methods */
+	if (tg3_flag(tp, PCIX_TARGET_HWBUG))
+		tp->write32 = tg3_write_indirect_reg32;
+	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
+		 (tg3_flag(tp, PCI_EXPRESS) &&
+		  tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
+		/*
+		 * Back to back register writes can cause problems on these
+		 * chips, the workaround is to read back all reg writes
+		 * except those to mailbox regs.
+		 *
+		 * See tg3_write_indirect_reg32().
+		 */
+		tp->write32 = tg3_write_flush_reg32;
+	}
+
+	if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
+		tp->write32_tx_mbox = tg3_write32_tx_mbox;
+		if (tg3_flag(tp, MBOX_WRITE_REORDER))
+			tp->write32_rx_mbox = tg3_write_flush_reg32;
+	}
+
+	if (tg3_flag(tp, ICH_WORKAROUND)) {
+		tp->read32 = tg3_read_indirect_reg32;
+		tp->write32 = tg3_write_indirect_reg32;
+		tp->read32_mbox = tg3_read_indirect_mbox;
+		tp->write32_mbox = tg3_write_indirect_mbox;
+		tp->write32_tx_mbox = tg3_write_indirect_mbox;
+		tp->write32_rx_mbox = tg3_write_indirect_mbox;
+
+		iounmap(tp->regs);
+		tp->regs = NULL;
+
+		pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
+		pci_cmd &= ~PCI_COMMAND_MEMORY;
+		pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
+	}
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+		tp->read32_mbox = tg3_read32_mbox_5906;
+		tp->write32_mbox = tg3_write32_mbox_5906;
+		tp->write32_tx_mbox = tg3_write32_mbox_5906;
+		tp->write32_rx_mbox = tg3_write32_mbox_5906;
+	}
+
+	if (tp->write32 == tg3_write_indirect_reg32 ||
+	    (tg3_flag(tp, PCIX_MODE) &&
+	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+	      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
+		tg3_flag_set(tp, SRAM_USE_CONFIG);
+
+	/* The memory arbiter has to be enabled in order for SRAM accesses
+	 * to succeed.  Normally on powerup the tg3 chip firmware will make
+	 * sure it is enabled, but other entities such as system netboot
+	 * code might disable it.
+	 */
+	val = tr32(MEMARB_MODE);
+	tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+
+	if (tg3_flag(tp, PCIX_MODE)) {
+		pci_read_config_dword(tp->pdev,
+				      tp->pcix_cap + PCI_X_STATUS, &val);
+		tp->pci_fn = val & 0x7;
+	} else {
+		tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
+	}
+
+	/* Get eeprom hw config before calling tg3_set_power_state().
+	 * In particular, the TG3_FLAG_IS_NIC flag must be
+	 * determined before calling tg3_set_power_state() so that
+	 * we know whether or not to switch out of Vaux power.
+	 * When the flag is set, it means that GPIO1 is used for eeprom
+	 * write protect and also implies that it is a LOM where GPIOs
+	 * are not used to switch power.
+	 */
+	tg3_get_eeprom_hw_cfg(tp);
+
+	if (tg3_flag(tp, ENABLE_APE)) {
+		/* Allow reads and writes to the
+		 * APE register and memory space.
+		 */
+		pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
+				 PCISTATE_ALLOW_APE_SHMEM_WR |
+				 PCISTATE_ALLOW_APE_PSPACE_WR;
+		pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
+				       pci_state_reg);
+
+		tg3_ape_lock_init(tp);
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+	    tg3_flag(tp, 57765_PLUS))
+		tg3_flag_set(tp, CPMU_PRESENT);
+
+	/* Set up tp->grc_local_ctrl before calling
+	 * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
+	 * will bring 5700's external PHY out of reset.
+	 * It is also used as eeprom write protect on LOMs.
+	 */
+	tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+	    tg3_flag(tp, EEPROM_WRITE_PROT))
+		tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
+				       GRC_LCLCTRL_GPIO_OUTPUT1);
+	/* Unused GPIO3 must be driven as output on 5752 because there
+	 * are no pull-up resistors on unused GPIO pins.
+	 */
+	else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
+
+	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
+		/* Turn off the debug UART. */
+		tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
+		if (tg3_flag(tp, IS_NIC))
+			/* Keep VMain power. */
+			tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
+					      GRC_LCLCTRL_GPIO_OUTPUT0;
+	}
+
+	/* Switch out of Vaux if it is a NIC */
+	tg3_pwrsrc_switch_to_vmain(tp);
+
+	/* Derive initial jumbo mode from MTU assigned in
+	 * ether_setup() via the alloc_etherdev() call
+	 */
+	if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
+		tg3_flag_set(tp, JUMBO_RING_ENABLE);
+
+	/* Determine WakeOnLan speed to use. */
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+	    tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
+	    tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
+		tg3_flag_clear(tp, WOL_SPEED_100MB);
+	} else {
+		tg3_flag_set(tp, WOL_SPEED_100MB);
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+		tp->phy_flags |= TG3_PHYFLG_IS_FET;
+
+	/* A few boards don't want Ethernet@WireSpeed phy feature */
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
+	     (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
+	    (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
+	    (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
+		tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
+
+	if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
+	    GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
+		tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
+		tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
+
+	if (tg3_flag(tp, 5705_PLUS) &&
+	    !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
+	    !tg3_flag(tp, 57765_PLUS)) {
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
+			if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
+			    tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
+				tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
+			if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
+				tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
+		} else
+			tp->phy_flags |= TG3_PHYFLG_BER_BUG;
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
+		tp->phy_otp = tg3_read_otp_phycfg(tp);
+		if (tp->phy_otp == 0)
+			tp->phy_otp = TG3_OTP_DEFAULT;
+	}
+
+	if (tg3_flag(tp, CPMU_PRESENT))
+		tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
+	else
+		tp->mi_mode = MAC_MI_MODE_BASE;
+
+	tp->coalesce_mode = 0;
+	if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
+	    GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
+		tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
+
+	/* Set these bits to enable statistics workaround. */
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+	    tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
+	    tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
+		tp->coalesce_mode |= HOSTCC_MODE_ATTN;
+		tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+		tg3_flag_set(tp, USE_PHYLIB);
+
+	err = tg3_mdio_init(tp);
+	if (err)
+		return err;
+
+	/* Initialize data/descriptor byte/word swapping. */
+	val = tr32(GRC_MODE);
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
+		val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
+			GRC_MODE_WORD_SWAP_B2HRX_DATA |
+			GRC_MODE_B2HRX_ENABLE |
+			GRC_MODE_HTX2B_ENABLE |
+			GRC_MODE_HOST_STACKUP);
+	else
+		val &= GRC_MODE_HOST_STACKUP;
+
+	tw32(GRC_MODE, val | tp->grc_mode);
+
+	tg3_switch_clocks(tp);
+
+	/* Clear this out for sanity. */
+	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
+	pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
+			      &pci_state_reg);
+	if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
+	    !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
+		u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
+
+		if (chiprevid == CHIPREV_ID_5701_A0 ||
+		    chiprevid == CHIPREV_ID_5701_B0 ||
+		    chiprevid == CHIPREV_ID_5701_B2 ||
+		    chiprevid == CHIPREV_ID_5701_B5) {
+			void __iomem *sram_base;
+
+			/* Write some dummy words into the SRAM status block
+			 * area, see if it reads back correctly.  If the return
+			 * value is bad, force enable the PCIX workaround.
+			 */
+			sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
+
+			writel(0x00000000, sram_base);
+			writel(0x00000000, sram_base + 4);
+			writel(0xffffffff, sram_base + 4);
+			if (readl(sram_base) != 0x00000000)
+				tg3_flag_set(tp, PCIX_TARGET_HWBUG);
+		}
+	}
+
+	udelay(50);
+	tg3_nvram_init(tp);
+
+	grc_misc_cfg = tr32(GRC_MISC_CFG);
+	grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+	    (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
+	     grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
+		tg3_flag_set(tp, IS_5788);
+
+	if (!tg3_flag(tp, IS_5788) &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
+		tg3_flag_set(tp, TAGGED_STATUS);
+	if (tg3_flag(tp, TAGGED_STATUS)) {
+		tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
+				      HOSTCC_MODE_CLRTICK_TXBD);
+
+		tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
+		pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
+				       tp->misc_host_ctrl);
+	}
+
+	/* Preserve the APE MAC_MODE bits */
+	if (tg3_flag(tp, ENABLE_APE))
+		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+	else
+		tp->mac_mode = TG3_DEF_MAC_MODE;
+
+	/* these are limited to 10/100 only */
+	if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
+	     (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
+	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
+	     tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
+	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
+	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
+	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
+	    (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
+	     (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
+	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
+	      tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
+	    (tp->phy_flags & TG3_PHYFLG_IS_FET))
+		tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
+
+	err = tg3_phy_probe(tp);
+	if (err) {
+		dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
+		/* ... but do not return immediately ... */
+		tg3_mdio_fini(tp);
+	}
+
+	tg3_read_vpd(tp);
+	tg3_read_fw_ver(tp);
+
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
+		tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
+	} else {
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+			tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
+		else
+			tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
+	}
+
+	/* 5700 {AX,BX} chips have a broken status block link
+	 * change bit implementation, so we must use the
+	 * status register in those cases.
+	 */
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
+		tg3_flag_set(tp, USE_LINKCHG_REG);
+	else
+		tg3_flag_clear(tp, USE_LINKCHG_REG);
+
+	/* The led_ctrl is set during tg3_phy_probe, here we might
+	 * have to force the link status polling mechanism based
+	 * upon subsystem IDs.
+	 */
+	if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+	    !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
+		tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
+		tg3_flag_set(tp, USE_LINKCHG_REG);
+	}
+
+	/* For all SERDES we poll the MAC status register. */
+	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
+		tg3_flag_set(tp, POLL_SERDES);
+	else
+		tg3_flag_clear(tp, POLL_SERDES);
+
+	tp->rx_offset = NET_IP_ALIGN;
+	tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
+	    tg3_flag(tp, PCIX_MODE)) {
+		tp->rx_offset = 0;
+#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+		tp->rx_copy_thresh = ~(u16)0;
+#endif
+	}
+
+	tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
+	tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
+	tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
+
+	tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
+
+	/* Increment the rx prod index on the rx std ring by at most
+	 * 8 for these chips to workaround hw errata.
+	 */
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
+		tp->rx_std_max_post = 8;
+
+	if (tg3_flag(tp, ASPM_WORKAROUND))
+		tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
+				     PCIE_PWR_MGMT_L1_THRESH_MSK;
+
+	return err;
+}
+
+#ifdef CONFIG_SPARC
+static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
+{
+	struct net_device *dev = tp->dev;
+	struct pci_dev *pdev = tp->pdev;
+	struct device_node *dp = pci_device_to_OF_node(pdev);
+	const unsigned char *addr;
+	int len;
+
+	addr = of_get_property(dp, "local-mac-address", &len);
+	if (addr && len == 6) {
+		memcpy(dev->dev_addr, addr, 6);
+		memcpy(dev->perm_addr, dev->dev_addr, 6);
+		return 0;
+	}
+	return -ENODEV;
+}
+
+static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
+{
+	struct net_device *dev = tp->dev;
+
+	memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
+	memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
+	return 0;
+}
+#endif
+
+static int __devinit tg3_get_device_address(struct tg3 *tp)
+{
+	struct net_device *dev = tp->dev;
+	u32 hi, lo, mac_offset;
+	int addr_ok = 0;
+
+#ifdef CONFIG_SPARC
+	if (!tg3_get_macaddr_sparc(tp))
+		return 0;
+#endif
+
+	mac_offset = 0x7c;
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
+	    tg3_flag(tp, 5780_CLASS)) {
+		if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
+			mac_offset = 0xcc;
+		if (tg3_nvram_lock(tp))
+			tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
+		else
+			tg3_nvram_unlock(tp);
+	} else if (tg3_flag(tp, 5717_PLUS)) {
+		if (tp->pci_fn & 1)
+			mac_offset = 0xcc;
+		if (tp->pci_fn > 1)
+			mac_offset += 0x18c;
+	} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
+		mac_offset = 0x10;
+
+	/* First try to get it from MAC address mailbox. */
+	tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
+	if ((hi >> 16) == 0x484b) {
+		dev->dev_addr[0] = (hi >>  8) & 0xff;
+		dev->dev_addr[1] = (hi >>  0) & 0xff;
+
+		tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
+		dev->dev_addr[2] = (lo >> 24) & 0xff;
+		dev->dev_addr[3] = (lo >> 16) & 0xff;
+		dev->dev_addr[4] = (lo >>  8) & 0xff;
+		dev->dev_addr[5] = (lo >>  0) & 0xff;
+
+		/* Some old bootcode may report a 0 MAC address in SRAM */
+		addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
+	}
+	if (!addr_ok) {
+		/* Next, try NVRAM. */
+		if (!tg3_flag(tp, NO_NVRAM) &&
+		    !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
+		    !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
+			memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
+			memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
+		}
+		/* Finally just fetch it out of the MAC control regs. */
+		else {
+			hi = tr32(MAC_ADDR_0_HIGH);
+			lo = tr32(MAC_ADDR_0_LOW);
+
+			dev->dev_addr[5] = lo & 0xff;
+			dev->dev_addr[4] = (lo >> 8) & 0xff;
+			dev->dev_addr[3] = (lo >> 16) & 0xff;
+			dev->dev_addr[2] = (lo >> 24) & 0xff;
+			dev->dev_addr[1] = hi & 0xff;
+			dev->dev_addr[0] = (hi >> 8) & 0xff;
+		}
+	}
+
+	if (!is_valid_ether_addr(&dev->dev_addr[0])) {
+#ifdef CONFIG_SPARC
+		if (!tg3_get_default_macaddr_sparc(tp))
+			return 0;
+#endif
+		return -EINVAL;
+	}
+	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+	return 0;
+}
+
+#define BOUNDARY_SINGLE_CACHELINE	1
+#define BOUNDARY_MULTI_CACHELINE	2
+
+static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
+{
+	int cacheline_size;
+	u8 byte;
+	int goal;
+
+	pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
+	if (byte == 0)
+		cacheline_size = 1024;
+	else
+		cacheline_size = (int) byte * 4;
+
+	/* On 5703 and later chips, the boundary bits have no
+	 * effect.
+	 */
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
+	    !tg3_flag(tp, PCI_EXPRESS))
+		goto out;
+
+#if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
+	goal = BOUNDARY_MULTI_CACHELINE;
+#else
+#if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
+	goal = BOUNDARY_SINGLE_CACHELINE;
+#else
+	goal = 0;
+#endif
+#endif
+
+	if (tg3_flag(tp, 57765_PLUS)) {
+		val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+		goto out;
+	}
+
+	if (!goal)
+		goto out;
+
+	/* PCI controllers on most RISC systems tend to disconnect
+	 * when a device tries to burst across a cache-line boundary.
+	 * Therefore, letting tg3 do so just wastes PCI bandwidth.
+	 *
+	 * Unfortunately, for PCI-E there are only limited
+	 * write-side controls for this, and thus for reads
+	 * we will still get the disconnects.  We'll also waste
+	 * these PCI cycles for both read and write for chips
+	 * other than 5700 and 5701 which do not implement the
+	 * boundary bits.
+	 */
+	if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
+		switch (cacheline_size) {
+		case 16:
+		case 32:
+		case 64:
+		case 128:
+			if (goal == BOUNDARY_SINGLE_CACHELINE) {
+				val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
+					DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
+			} else {
+				val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
+					DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
+			}
+			break;
+
+		case 256:
+			val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
+				DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
+			break;
+
+		default:
+			val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
+				DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
+			break;
+		}
+	} else if (tg3_flag(tp, PCI_EXPRESS)) {
+		switch (cacheline_size) {
+		case 16:
+		case 32:
+		case 64:
+			if (goal == BOUNDARY_SINGLE_CACHELINE) {
+				val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
+				val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
+				break;
+			}
+			/* fallthrough */
+		case 128:
+		default:
+			val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
+			val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
+			break;
+		}
+	} else {
+		switch (cacheline_size) {
+		case 16:
+			if (goal == BOUNDARY_SINGLE_CACHELINE) {
+				val |= (DMA_RWCTRL_READ_BNDRY_16 |
+					DMA_RWCTRL_WRITE_BNDRY_16);
+				break;
+			}
+			/* fallthrough */
+		case 32:
+			if (goal == BOUNDARY_SINGLE_CACHELINE) {
+				val |= (DMA_RWCTRL_READ_BNDRY_32 |
+					DMA_RWCTRL_WRITE_BNDRY_32);
+				break;
+			}
+			/* fallthrough */
+		case 64:
+			if (goal == BOUNDARY_SINGLE_CACHELINE) {
+				val |= (DMA_RWCTRL_READ_BNDRY_64 |
+					DMA_RWCTRL_WRITE_BNDRY_64);
+				break;
+			}
+			/* fallthrough */
+		case 128:
+			if (goal == BOUNDARY_SINGLE_CACHELINE) {
+				val |= (DMA_RWCTRL_READ_BNDRY_128 |
+					DMA_RWCTRL_WRITE_BNDRY_128);
+				break;
+			}
+			/* fallthrough */
+		case 256:
+			val |= (DMA_RWCTRL_READ_BNDRY_256 |
+				DMA_RWCTRL_WRITE_BNDRY_256);
+			break;
+		case 512:
+			val |= (DMA_RWCTRL_READ_BNDRY_512 |
+				DMA_RWCTRL_WRITE_BNDRY_512);
+			break;
+		case 1024:
+		default:
+			val |= (DMA_RWCTRL_READ_BNDRY_1024 |
+				DMA_RWCTRL_WRITE_BNDRY_1024);
+			break;
+		}
+	}
+
+out:
+	return val;
+}
+
+static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
+{
+	struct tg3_internal_buffer_desc test_desc;
+	u32 sram_dma_descs;
+	int i, ret;
+
+	sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
+
+	tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
+	tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
+	tw32(RDMAC_STATUS, 0);
+	tw32(WDMAC_STATUS, 0);
+
+	tw32(BUFMGR_MODE, 0);
+	tw32(FTQ_RESET, 0);
+
+	test_desc.addr_hi = ((u64) buf_dma) >> 32;
+	test_desc.addr_lo = buf_dma & 0xffffffff;
+	test_desc.nic_mbuf = 0x00002100;
+	test_desc.len = size;
+
+	/*
+	 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
+	 * the *second* time the tg3 driver was getting loaded after an
+	 * initial scan.
+	 *
+	 * Broadcom tells me:
+	 *   ...the DMA engine is connected to the GRC block and a DMA
+	 *   reset may affect the GRC block in some unpredictable way...
+	 *   The behavior of resets to individual blocks has not been tested.
+	 *
+	 * Broadcom noted the GRC reset will also reset all sub-components.
+	 */
+	if (to_device) {
+		test_desc.cqid_sqid = (13 << 8) | 2;
+
+		tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
+		udelay(40);
+	} else {
+		test_desc.cqid_sqid = (16 << 8) | 7;
+
+		tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
+		udelay(40);
+	}
+	test_desc.flags = 0x00000005;
+
+	for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
+		u32 val;
+
+		val = *(((u32 *)&test_desc) + i);
+		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
+				       sram_dma_descs + (i * sizeof(u32)));
+		pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
+	}
+	pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
+
+	if (to_device)
+		tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
+	else
+		tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
+
+	ret = -ENODEV;
+	for (i = 0; i < 40; i++) {
+		u32 val;
+
+		if (to_device)
+			val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
+		else
+			val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
+		if ((val & 0xffff) == sram_dma_descs) {
+			ret = 0;
+			break;
+		}
+
+		udelay(100);
+	}
+
+	return ret;
+}
+
+#define TEST_BUFFER_SIZE	0x2000
+
+static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
+	{ },
+};
+
+static int __devinit tg3_test_dma(struct tg3 *tp)
+{
+	dma_addr_t buf_dma;
+	u32 *buf, saved_dma_rwctrl;
+	int ret = 0;
+
+	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
+				 &buf_dma, GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto out_nofree;
+	}
+
+	tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
+			  (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
+
+	tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
+
+	if (tg3_flag(tp, 57765_PLUS))
+		goto out;
+
+	if (tg3_flag(tp, PCI_EXPRESS)) {
+		/* DMA read watermark not used on PCIE */
+		tp->dma_rwctrl |= 0x00180000;
+	} else if (!tg3_flag(tp, PCIX_MODE)) {
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
+			tp->dma_rwctrl |= 0x003f0000;
+		else
+			tp->dma_rwctrl |= 0x003f000f;
+	} else {
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
+			u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
+			u32 read_water = 0x7;
+
+			/* If the 5704 is behind the EPB bridge, we can
+			 * do the less restrictive ONE_DMA workaround for
+			 * better performance.
+			 */
+			if (tg3_flag(tp, 40BIT_DMA_BUG) &&
+			    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+				tp->dma_rwctrl |= 0x8000;
+			else if (ccval == 0x6 || ccval == 0x7)
+				tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
+
+			if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
+				read_water = 4;
+			/* Set bit 23 to enable PCIX hw bug fix */
+			tp->dma_rwctrl |=
+				(read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
+				(0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
+				(1 << 23);
+		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
+			/* 5780 always in PCIX mode */
+			tp->dma_rwctrl |= 0x00144000;
+		} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
+			/* 5714 always in PCIX mode */
+			tp->dma_rwctrl |= 0x00148000;
+		} else {
+			tp->dma_rwctrl |= 0x001b000f;
+		}
+	}
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
+		tp->dma_rwctrl &= 0xfffffff0;
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
+	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
+		/* Remove this if it causes problems for some boards. */
+		tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
+
+		/* On 5700/5701 chips, we need to set this bit.
+		 * Otherwise the chip will issue cacheline transactions
+		 * to streamable DMA memory with not all the byte
+		 * enables turned on.  This is an error on several
+		 * RISC PCI controllers, in particular sparc64.
+		 *
+		 * On 5703/5704 chips, this bit has been reassigned
+		 * a different meaning.  In particular, it is used
+		 * on those chips to enable a PCI-X workaround.
+		 */
+		tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
+	}
+
+	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+
+#if 0
+	/* Unneeded, already done by tg3_get_invariants.  */
+	tg3_switch_clocks(tp);
+#endif
+
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
+	    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
+		goto out;
+
+	/* It is best to perform DMA test with maximum write burst size
+	 * to expose the 5700/5701 write DMA bug.
+	 */
+	saved_dma_rwctrl = tp->dma_rwctrl;
+	tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+	tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+
+	while (1) {
+		u32 *p = buf, i;
+
+		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
+			p[i] = i;
+
+		/* Send the buffer to the chip. */
+		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
+		if (ret) {
+			dev_err(&tp->pdev->dev,
+				"%s: Buffer write failed. err = %d\n",
+				__func__, ret);
+			break;
+		}
+
+#if 0
+		/* validate data reached card RAM correctly. */
+		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
+			u32 val;
+			tg3_read_mem(tp, 0x2100 + (i*4), &val);
+			if (le32_to_cpu(val) != p[i]) {
+				dev_err(&tp->pdev->dev,
+					"%s: Buffer corrupted on device! "
+					"(%d != %d)\n", __func__, val, i);
+				/* ret = -ENODEV here? */
+			}
+			p[i] = 0;
+		}
+#endif
+		/* Now read it back. */
+		ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
+		if (ret) {
+			dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
+				"err = %d\n", __func__, ret);
+			break;
+		}
+
+		/* Verify it. */
+		for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
+			if (p[i] == i)
+				continue;
+
+			if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
+			    DMA_RWCTRL_WRITE_BNDRY_16) {
+				tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+				tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
+				tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+				break;
+			} else {
+				dev_err(&tp->pdev->dev,
+					"%s: Buffer corrupted on read back! "
+					"(%d != %d)\n", __func__, p[i], i);
+				ret = -ENODEV;
+				goto out;
+			}
+		}
+
+		if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
+			/* Success. */
+			ret = 0;
+			break;
+		}
+	}
+	if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
+	    DMA_RWCTRL_WRITE_BNDRY_16) {
+		/* DMA test passed without adjusting DMA boundary,
+		 * now look for chipsets that are known to expose the
+		 * DMA bug without failing the test.
+		 */
+		if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
+			tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
+			tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
+		} else {
+			/* Safe to use the calculated DMA boundary. */
+			tp->dma_rwctrl = saved_dma_rwctrl;
+		}
+
+		tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
+	}
+
+out:
+	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
+out_nofree:
+	return ret;
+}
+
+static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
+{
+	if (tg3_flag(tp, 57765_PLUS)) {
+		tp->bufmgr_config.mbuf_read_dma_low_water =
+			DEFAULT_MB_RDMA_LOW_WATER_5705;
+		tp->bufmgr_config.mbuf_mac_rx_low_water =
+			DEFAULT_MB_MACRX_LOW_WATER_57765;
+		tp->bufmgr_config.mbuf_high_water =
+			DEFAULT_MB_HIGH_WATER_57765;
+
+		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+			DEFAULT_MB_RDMA_LOW_WATER_5705;
+		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
+		tp->bufmgr_config.mbuf_high_water_jumbo =
+			DEFAULT_MB_HIGH_WATER_JUMBO_57765;
+	} else if (tg3_flag(tp, 5705_PLUS)) {
+		tp->bufmgr_config.mbuf_read_dma_low_water =
+			DEFAULT_MB_RDMA_LOW_WATER_5705;
+		tp->bufmgr_config.mbuf_mac_rx_low_water =
+			DEFAULT_MB_MACRX_LOW_WATER_5705;
+		tp->bufmgr_config.mbuf_high_water =
+			DEFAULT_MB_HIGH_WATER_5705;
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
+			tp->bufmgr_config.mbuf_mac_rx_low_water =
+				DEFAULT_MB_MACRX_LOW_WATER_5906;
+			tp->bufmgr_config.mbuf_high_water =
+				DEFAULT_MB_HIGH_WATER_5906;
+		}
+
+		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+			DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
+		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+			DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
+		tp->bufmgr_config.mbuf_high_water_jumbo =
+			DEFAULT_MB_HIGH_WATER_JUMBO_5780;
+	} else {
+		tp->bufmgr_config.mbuf_read_dma_low_water =
+			DEFAULT_MB_RDMA_LOW_WATER;
+		tp->bufmgr_config.mbuf_mac_rx_low_water =
+			DEFAULT_MB_MACRX_LOW_WATER;
+		tp->bufmgr_config.mbuf_high_water =
+			DEFAULT_MB_HIGH_WATER;
+
+		tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+			DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
+		tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+			DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
+		tp->bufmgr_config.mbuf_high_water_jumbo =
+			DEFAULT_MB_HIGH_WATER_JUMBO;
+	}
+
+	tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
+	tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
+}
+
+static char * __devinit tg3_phy_string(struct tg3 *tp)
+{
+	switch (tp->phy_id & TG3_PHY_ID_MASK) {
+	case TG3_PHY_ID_BCM5400:	return "5400";
+	case TG3_PHY_ID_BCM5401:	return "5401";
+	case TG3_PHY_ID_BCM5411:	return "5411";
+	case TG3_PHY_ID_BCM5701:	return "5701";
+	case TG3_PHY_ID_BCM5703:	return "5703";
+	case TG3_PHY_ID_BCM5704:	return "5704";
+	case TG3_PHY_ID_BCM5705:	return "5705";
+	case TG3_PHY_ID_BCM5750:	return "5750";
+	case TG3_PHY_ID_BCM5752:	return "5752";
+	case TG3_PHY_ID_BCM5714:	return "5714";
+	case TG3_PHY_ID_BCM5780:	return "5780";
+	case TG3_PHY_ID_BCM5755:	return "5755";
+	case TG3_PHY_ID_BCM5787:	return "5787";
+	case TG3_PHY_ID_BCM5784:	return "5784";
+	case TG3_PHY_ID_BCM5756:	return "5722/5756";
+	case TG3_PHY_ID_BCM5906:	return "5906";
+	case TG3_PHY_ID_BCM5761:	return "5761";
+	case TG3_PHY_ID_BCM5718C:	return "5718C";
+	case TG3_PHY_ID_BCM5718S:	return "5718S";
+	case TG3_PHY_ID_BCM57765:	return "57765";
+	case TG3_PHY_ID_BCM5719C:	return "5719C";
+	case TG3_PHY_ID_BCM5720C:	return "5720C";
+	case TG3_PHY_ID_BCM8002:	return "8002/serdes";
+	case 0:			return "serdes";
+	default:		return "unknown";
+	}
+}
+
+static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
+{
+	if (tg3_flag(tp, PCI_EXPRESS)) {
+		strcpy(str, "PCI Express");
+		return str;
+	} else if (tg3_flag(tp, PCIX_MODE)) {
+		u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
+
+		strcpy(str, "PCIX:");
+
+		if ((clock_ctrl == 7) ||
+		    ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
+		     GRC_MISC_CFG_BOARD_ID_5704CIOBE))
+			strcat(str, "133MHz");
+		else if (clock_ctrl == 0)
+			strcat(str, "33MHz");
+		else if (clock_ctrl == 2)
+			strcat(str, "50MHz");
+		else if (clock_ctrl == 4)
+			strcat(str, "66MHz");
+		else if (clock_ctrl == 6)
+			strcat(str, "100MHz");
+	} else {
+		strcpy(str, "PCI:");
+		if (tg3_flag(tp, PCI_HIGH_SPEED))
+			strcat(str, "66MHz");
+		else
+			strcat(str, "33MHz");
+	}
+	if (tg3_flag(tp, PCI_32BIT))
+		strcat(str, ":32-bit");
+	else
+		strcat(str, ":64-bit");
+	return str;
+}
+
+static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
+{
+	struct pci_dev *peer;
+	unsigned int func, devnr = tp->pdev->devfn & ~7;
+
+	for (func = 0; func < 8; func++) {
+		peer = pci_get_slot(tp->pdev->bus, devnr | func);
+		if (peer && peer != tp->pdev)
+			break;
+		pci_dev_put(peer);
+	}
+	/* 5704 can be configured in single-port mode, set peer to
+	 * tp->pdev in that case.
+	 */
+	if (!peer) {
+		peer = tp->pdev;
+		return peer;
+	}
+
+	/*
+	 * We don't need to keep the refcount elevated; there's no way
+	 * to remove one half of this device without removing the other
+	 */
+	pci_dev_put(peer);
+
+	return peer;
+}
+
+static void __devinit tg3_init_coal(struct tg3 *tp)
+{
+	struct ethtool_coalesce *ec = &tp->coal;
+
+	memset(ec, 0, sizeof(*ec));
+	ec->cmd = ETHTOOL_GCOALESCE;
+	ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
+	ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
+	ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
+	ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
+	ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
+	ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
+	ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
+	ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
+	ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
+
+	if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
+				 HOSTCC_MODE_CLRTICK_TXBD)) {
+		ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
+		ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
+		ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
+		ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
+	}
+
+	if (tg3_flag(tp, 5705_PLUS)) {
+		ec->rx_coalesce_usecs_irq = 0;
+		ec->tx_coalesce_usecs_irq = 0;
+		ec->stats_block_coalesce_usecs = 0;
+	}
+}
+
+static const struct net_device_ops tg3_netdev_ops = {
+	.ndo_open		= tg3_open,
+	.ndo_stop		= tg3_close,
+	.ndo_start_xmit		= tg3_start_xmit,
+	.ndo_get_stats64	= tg3_get_stats64,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_set_multicast_list	= tg3_set_rx_mode,
+	.ndo_set_mac_address	= tg3_set_mac_addr,
+	.ndo_do_ioctl		= tg3_ioctl,
+	.ndo_tx_timeout		= tg3_tx_timeout,
+	.ndo_change_mtu		= tg3_change_mtu,
+	.ndo_fix_features	= tg3_fix_features,
+	.ndo_set_features	= tg3_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= tg3_poll_controller,
+#endif
+};
+
+static int __devinit tg3_init_one(struct pci_dev *pdev,
+				  const struct pci_device_id *ent)
+{
+	struct net_device *dev;
+	struct tg3 *tp;
+	int i, err, pm_cap;
+	u32 sndmbx, rcvmbx, intmbx;
+	char str[40];
+	u64 dma_mask, persist_dma_mask;
+	u32 features = 0;
+
+	printk_once(KERN_INFO "%s\n", version);
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
+		return err;
+	}
+
+	err = pci_request_regions(pdev, DRV_MODULE_NAME);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
+		goto err_out_disable_pdev;
+	}
+
+	pci_set_master(pdev);
+
+	/* Find power-management capability. */
+	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+	if (pm_cap == 0) {
+		dev_err(&pdev->dev,
+			"Cannot find Power Management capability, aborting\n");
+		err = -EIO;
+		goto err_out_free_res;
+	}
+
+	err = pci_set_power_state(pdev, PCI_D0);
+	if (err) {
+		dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
+		goto err_out_free_res;
+	}
+
+	dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
+	if (!dev) {
+		dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
+		err = -ENOMEM;
+		goto err_out_power_down;
+	}
+
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	tp = netdev_priv(dev);
+	tp->pdev = pdev;
+	tp->dev = dev;
+	tp->pm_cap = pm_cap;
+	tp->rx_mode = TG3_DEF_RX_MODE;
+	tp->tx_mode = TG3_DEF_TX_MODE;
+
+	if (tg3_debug > 0)
+		tp->msg_enable = tg3_debug;
+	else
+		tp->msg_enable = TG3_DEF_MSG_ENABLE;
+
+	/* The word/byte swap controls here control register access byte
+	 * swapping.  DMA data byte swapping is controlled in the GRC_MODE
+	 * setting below.
+	 */
+	tp->misc_host_ctrl =
+		MISC_HOST_CTRL_MASK_PCI_INT |
+		MISC_HOST_CTRL_WORD_SWAP |
+		MISC_HOST_CTRL_INDIR_ACCESS |
+		MISC_HOST_CTRL_PCISTATE_RW;
+
+	/* The NONFRM (non-frame) byte/word swap controls take effect
+	 * on descriptor entries, anything which isn't packet data.
+	 *
+	 * The StrongARM chips on the board (one for tx, one for rx)
+	 * are running in big-endian mode.
+	 */
+	tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
+			GRC_MODE_WSWAP_NONFRM_DATA);
+#ifdef __BIG_ENDIAN
+	tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
+#endif
+	spin_lock_init(&tp->lock);
+	spin_lock_init(&tp->indirect_lock);
+	INIT_WORK(&tp->reset_task, tg3_reset_task);
+
+	tp->regs = pci_ioremap_bar(pdev, BAR_0);
+	if (!tp->regs) {
+		dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
+		err = -ENOMEM;
+		goto err_out_free_dev;
+	}
+
+	if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
+	    tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
+	    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
+		tg3_flag_set(tp, ENABLE_APE);
+		tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
+		if (!tp->aperegs) {
+			dev_err(&pdev->dev,
+				"Cannot map APE registers, aborting\n");
+			err = -ENOMEM;
+			goto err_out_iounmap;
+		}
+	}
+
+	tp->rx_pending = TG3_DEF_RX_RING_PENDING;
+	tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
+
+	dev->ethtool_ops = &tg3_ethtool_ops;
+	dev->watchdog_timeo = TG3_TX_TIMEOUT;
+	dev->netdev_ops = &tg3_netdev_ops;
+	dev->irq = pdev->irq;
+
+	err = tg3_get_invariants(tp);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Problem fetching invariants of chip, aborting\n");
+		goto err_out_apeunmap;
+	}
+
+	/* The EPB bridge inside 5714, 5715, and 5780 and any
+	 * device behind the EPB cannot support DMA addresses > 40-bit.
+	 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
+	 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
+	 * do DMA address check in tg3_start_xmit().
+	 */
+	if (tg3_flag(tp, IS_5788))
+		persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
+	else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
+		persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
+#ifdef CONFIG_HIGHMEM
+		dma_mask = DMA_BIT_MASK(64);
+#endif
+	} else
+		persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
+
+	/* Configure DMA attributes. */
+	if (dma_mask > DMA_BIT_MASK(32)) {
+		err = pci_set_dma_mask(pdev, dma_mask);
+		if (!err) {
+			features |= NETIF_F_HIGHDMA;
+			err = pci_set_consistent_dma_mask(pdev,
+							  persist_dma_mask);
+			if (err < 0) {
+				dev_err(&pdev->dev, "Unable to obtain 64 bit "
+					"DMA for consistent allocations\n");
+				goto err_out_apeunmap;
+			}
+		}
+	}
+	if (err || dma_mask == DMA_BIT_MASK(32)) {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev,
+				"No usable DMA configuration, aborting\n");
+			goto err_out_apeunmap;
+		}
+	}
+
+	tg3_init_bufmgr_config(tp);
+
+	features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+
+	/* 5700 B0 chips do not support checksumming correctly due
+	 * to hardware bugs.
+	 */
+	if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
+		features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+
+		if (tg3_flag(tp, 5755_PLUS))
+			features |= NETIF_F_IPV6_CSUM;
+	}
+
+	/* TSO is on by default on chips that support hardware TSO.
+	 * Firmware TSO on older chips gives lower performance, so it
+	 * is off by default, but can be enabled using ethtool.
+	 */
+	if ((tg3_flag(tp, HW_TSO_1) ||
+	     tg3_flag(tp, HW_TSO_2) ||
+	     tg3_flag(tp, HW_TSO_3)) &&
+	    (features & NETIF_F_IP_CSUM))
+		features |= NETIF_F_TSO;
+	if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
+		if (features & NETIF_F_IPV6_CSUM)
+			features |= NETIF_F_TSO6;
+		if (tg3_flag(tp, HW_TSO_3) ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
+		    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+		     GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
+		    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+			features |= NETIF_F_TSO_ECN;
+	}
+
+	dev->features |= features;
+	dev->vlan_features |= features;
+
+	/*
+	 * Add loopback capability only for a subset of devices that support
+	 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
+	 * loopback for the remaining devices.
+	 */
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
+	    !tg3_flag(tp, CPMU_PRESENT))
+		/* Add the loopback capability */
+		features |= NETIF_F_LOOPBACK;
+
+	dev->hw_features |= features;
+
+	if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
+	    !tg3_flag(tp, TSO_CAPABLE) &&
+	    !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
+		tg3_flag_set(tp, MAX_RXPEND_64);
+		tp->rx_pending = 63;
+	}
+
+	err = tg3_get_device_address(tp);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Could not obtain valid ethernet address, aborting\n");
+		goto err_out_apeunmap;
+	}
+
+	/*
+	 * Reset chip in case UNDI or EFI driver did not shutdown
+	 * DMA self test will enable WDMAC and we'll see (spurious)
+	 * pending DMA on the PCI bus at that point.
+	 */
+	if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+	    (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+		tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+		tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+	}
+
+	err = tg3_test_dma(tp);
+	if (err) {
+		dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
+		goto err_out_apeunmap;
+	}
+
+	intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
+	rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
+	sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+	for (i = 0; i < tp->irq_max; i++) {
+		struct tg3_napi *tnapi = &tp->napi[i];
+
+		tnapi->tp = tp;
+		tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
+
+		tnapi->int_mbox = intmbx;
+		if (i < 4)
+			intmbx += 0x8;
+		else
+			intmbx += 0x4;
+
+		tnapi->consmbox = rcvmbx;
+		tnapi->prodmbox = sndmbx;
+
+		if (i)
+			tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
+		else
+			tnapi->coal_now = HOSTCC_MODE_NOW;
+
+		if (!tg3_flag(tp, SUPPORT_MSIX))
+			break;
+
+		/*
+		 * If we support MSIX, we'll be using RSS.  If we're using
+		 * RSS, the first vector only handles link interrupts and the
+		 * remaining vectors handle rx and tx interrupts.  Reuse the
+		 * mailbox values for the next iteration.  The values we setup
+		 * above are still useful for the single vectored mode.
+		 */
+		if (!i)
+			continue;
+
+		rcvmbx += 0x8;
+
+		if (sndmbx & 0x4)
+			sndmbx -= 0x4;
+		else
+			sndmbx += 0xc;
+	}
+
+	tg3_init_coal(tp);
+
+	pci_set_drvdata(pdev, dev);
+
+	if (tg3_flag(tp, 5717_PLUS)) {
+		/* Resume a low-power mode */
+		tg3_frob_aux_power(tp, false);
+	}
+
+	err = register_netdev(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot register net device, aborting\n");
+		goto err_out_apeunmap;
+	}
+
+	netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
+		    tp->board_part_number,
+		    tp->pci_chip_rev_id,
+		    tg3_bus_string(tp, str),
+		    dev->dev_addr);
+
+	if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
+		struct phy_device *phydev;
+		phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+		netdev_info(dev,
+			    "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+			    phydev->drv->name, dev_name(&phydev->dev));
+	} else {
+		char *ethtype;
+
+		if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
+			ethtype = "10/100Base-TX";
+		else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
+			ethtype = "1000Base-SX";
+		else
+			ethtype = "10/100/1000Base-T";
+
+		netdev_info(dev, "attached PHY is %s (%s Ethernet) "
+			    "(WireSpeed[%d], EEE[%d])\n",
+			    tg3_phy_string(tp), ethtype,
+			    (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
+			    (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
+	}
+
+	netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
+		    (dev->features & NETIF_F_RXCSUM) != 0,
+		    tg3_flag(tp, USE_LINKCHG_REG) != 0,
+		    (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
+		    tg3_flag(tp, ENABLE_ASF) != 0,
+		    tg3_flag(tp, TSO_CAPABLE) != 0);
+	netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
+		    tp->dma_rwctrl,
+		    pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
+		    ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
+
+	pci_save_state(pdev);
+
+	return 0;
+
+err_out_apeunmap:
+	if (tp->aperegs) {
+		iounmap(tp->aperegs);
+		tp->aperegs = NULL;
+	}
+
+err_out_iounmap:
+	if (tp->regs) {
+		iounmap(tp->regs);
+		tp->regs = NULL;
+	}
+
+err_out_free_dev:
+	free_netdev(dev);
+
+err_out_power_down:
+	pci_set_power_state(pdev, PCI_D3hot);
+
+err_out_free_res:
+	pci_release_regions(pdev);
+
+err_out_disable_pdev:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	return err;
+}
+
+static void __devexit tg3_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+
+	if (dev) {
+		struct tg3 *tp = netdev_priv(dev);
+
+		if (tp->fw)
+			release_firmware(tp->fw);
+
+		cancel_work_sync(&tp->reset_task);
+
+		if (!tg3_flag(tp, USE_PHYLIB)) {
+			tg3_phy_fini(tp);
+			tg3_mdio_fini(tp);
+		}
+
+		unregister_netdev(dev);
+		if (tp->aperegs) {
+			iounmap(tp->aperegs);
+			tp->aperegs = NULL;
+		}
+		if (tp->regs) {
+			iounmap(tp->regs);
+			tp->regs = NULL;
+		}
+		free_netdev(dev);
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+		pci_set_drvdata(pdev, NULL);
+	}
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tg3_suspend(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct tg3 *tp = netdev_priv(dev);
+	int err;
+
+	if (!netif_running(dev))
+		return 0;
+
+	flush_work_sync(&tp->reset_task);
+	tg3_phy_stop(tp);
+	tg3_netif_stop(tp);
+
+	del_timer_sync(&tp->timer);
+
+	tg3_full_lock(tp, 1);
+	tg3_disable_ints(tp);
+	tg3_full_unlock(tp);
+
+	netif_device_detach(dev);
+
+	tg3_full_lock(tp, 0);
+	tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+	tg3_flag_clear(tp, INIT_COMPLETE);
+	tg3_full_unlock(tp);
+
+	err = tg3_power_down_prepare(tp);
+	if (err) {
+		int err2;
+
+		tg3_full_lock(tp, 0);
+
+		tg3_flag_set(tp, INIT_COMPLETE);
+		err2 = tg3_restart_hw(tp, 1);
+		if (err2)
+			goto out;
+
+		tp->timer.expires = jiffies + tp->timer_offset;
+		add_timer(&tp->timer);
+
+		netif_device_attach(dev);
+		tg3_netif_start(tp);
+
+out:
+		tg3_full_unlock(tp);
+
+		if (!err2)
+			tg3_phy_start(tp);
+	}
+
+	return err;
+}
+
+static int tg3_resume(struct device *device)
+{
+	struct pci_dev *pdev = to_pci_dev(device);
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct tg3 *tp = netdev_priv(dev);
+	int err;
+
+	if (!netif_running(dev))
+		return 0;
+
+	netif_device_attach(dev);
+
+	tg3_full_lock(tp, 0);
+
+	tg3_flag_set(tp, INIT_COMPLETE);
+	err = tg3_restart_hw(tp, 1);
+	if (err)
+		goto out;
+
+	tp->timer.expires = jiffies + tp->timer_offset;
+	add_timer(&tp->timer);
+
+	tg3_netif_start(tp);
+
+out:
+	tg3_full_unlock(tp);
+
+	if (!err)
+		tg3_phy_start(tp);
+
+	return err;
+}
+
+static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
+#define TG3_PM_OPS (&tg3_pm_ops)
+
+#else
+
+#define TG3_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
+/**
+ * tg3_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
+					      pci_channel_state_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct tg3 *tp = netdev_priv(netdev);
+	pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
+
+	netdev_info(netdev, "PCI I/O error detected\n");
+
+	rtnl_lock();
+
+	if (!netif_running(netdev))
+		goto done;
+
+	tg3_phy_stop(tp);
+
+	tg3_netif_stop(tp);
+
+	del_timer_sync(&tp->timer);
+	tg3_flag_clear(tp, RESTART_TIMER);
+
+	/* Want to make sure that the reset task doesn't run */
+	cancel_work_sync(&tp->reset_task);
+	tg3_flag_clear(tp, TX_RECOVERY_PENDING);
+	tg3_flag_clear(tp, RESTART_TIMER);
+
+	netif_device_detach(netdev);
+
+	/* Clean up software state, even if MMIO is blocked */
+	tg3_full_lock(tp, 0);
+	tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
+	tg3_full_unlock(tp);
+
+done:
+	if (state == pci_channel_io_perm_failure)
+		err = PCI_ERS_RESULT_DISCONNECT;
+	else
+		pci_disable_device(pdev);
+
+	rtnl_unlock();
+
+	return err;
+}
+
+/**
+ * tg3_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot.
+ * At this point, the card has exprienced a hard reset,
+ * followed by fixups by BIOS, and has its config space
+ * set up identically to what it was at cold boot.
+ */
+static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct tg3 *tp = netdev_priv(netdev);
+	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+	int err;
+
+	rtnl_lock();
+
+	if (pci_enable_device(pdev)) {
+		netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
+		goto done;
+	}
+
+	pci_set_master(pdev);
+	pci_restore_state(pdev);
+	pci_save_state(pdev);
+
+	if (!netif_running(netdev)) {
+		rc = PCI_ERS_RESULT_RECOVERED;
+		goto done;
+	}
+
+	err = tg3_power_up(tp);
+	if (err)
+		goto done;
+
+	rc = PCI_ERS_RESULT_RECOVERED;
+
+done:
+	rtnl_unlock();
+
+	return rc;
+}
+
+/**
+ * tg3_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells
+ * us that its OK to resume normal operation.
+ */
+static void tg3_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct tg3 *tp = netdev_priv(netdev);
+	int err;
+
+	rtnl_lock();
+
+	if (!netif_running(netdev))
+		goto done;
+
+	tg3_full_lock(tp, 0);
+	tg3_flag_set(tp, INIT_COMPLETE);
+	err = tg3_restart_hw(tp, 1);
+	tg3_full_unlock(tp);
+	if (err) {
+		netdev_err(netdev, "Cannot restart hardware after reset.\n");
+		goto done;
+	}
+
+	netif_device_attach(netdev);
+
+	tp->timer.expires = jiffies + tp->timer_offset;
+	add_timer(&tp->timer);
+
+	tg3_netif_start(tp);
+
+	tg3_phy_start(tp);
+
+done:
+	rtnl_unlock();
+}
+
+static struct pci_error_handlers tg3_err_handler = {
+	.error_detected	= tg3_io_error_detected,
+	.slot_reset	= tg3_io_slot_reset,
+	.resume		= tg3_io_resume
+};
+
+static struct pci_driver tg3_driver = {
+	.name		= DRV_MODULE_NAME,
+	.id_table	= tg3_pci_tbl,
+	.probe		= tg3_init_one,
+	.remove		= __devexit_p(tg3_remove_one),
+	.err_handler	= &tg3_err_handler,
+	.driver.pm	= TG3_PM_OPS,
+};
+
+static int __init tg3_init(void)
+{
+	return pci_register_driver(&tg3_driver);
+}
+
+static void __exit tg3_cleanup(void)
+{
+	pci_unregister_driver(&tg3_driver);
+}
+
+module_init(tg3_init);
+module_exit(tg3_cleanup);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
new file mode 100644
index 0000000..2ea456d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -0,0 +1,3183 @@
+/* $Id: tg3.h,v 1.37.2.32 2002/03/11 12:18:18 davem Exp $
+ * tg3.h: Definitions for Broadcom Tigon3 ethernet driver.
+ *
+ * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
+ * Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
+ * Copyright (C) 2004 Sun Microsystems Inc.
+ * Copyright (C) 2007-2011 Broadcom Corporation.
+ */
+
+#ifndef _T3_H
+#define _T3_H
+
+#define TG3_64BIT_REG_HIGH		0x00UL
+#define TG3_64BIT_REG_LOW		0x04UL
+
+/* Descriptor block info. */
+#define TG3_BDINFO_HOST_ADDR		0x0UL /* 64-bit */
+#define TG3_BDINFO_MAXLEN_FLAGS		0x8UL /* 32-bit */
+#define  BDINFO_FLAGS_USE_EXT_RECV	 0x00000001 /* ext rx_buffer_desc */
+#define  BDINFO_FLAGS_DISABLED		 0x00000002
+#define  BDINFO_FLAGS_MAXLEN_MASK	 0xffff0000
+#define  BDINFO_FLAGS_MAXLEN_SHIFT	 16
+#define TG3_BDINFO_NIC_ADDR		0xcUL /* 32-bit */
+#define TG3_BDINFO_SIZE			0x10UL
+
+#define TG3_RX_STD_MAX_SIZE_5700	512
+#define TG3_RX_STD_MAX_SIZE_5717	2048
+#define TG3_RX_JMB_MAX_SIZE_5700	256
+#define TG3_RX_JMB_MAX_SIZE_5717	1024
+#define TG3_RX_RET_MAX_SIZE_5700	1024
+#define TG3_RX_RET_MAX_SIZE_5705	512
+#define TG3_RX_RET_MAX_SIZE_5717	4096
+
+/* First 256 bytes are a mirror of PCI config space. */
+#define TG3PCI_VENDOR			0x00000000
+#define  TG3PCI_VENDOR_BROADCOM		 0x14e4
+#define TG3PCI_DEVICE			0x00000002
+#define  TG3PCI_DEVICE_TIGON3_1		 0x1644 /* BCM5700 */
+#define  TG3PCI_DEVICE_TIGON3_2		 0x1645 /* BCM5701 */
+#define  TG3PCI_DEVICE_TIGON3_3		 0x1646 /* BCM5702 */
+#define  TG3PCI_DEVICE_TIGON3_4		 0x1647 /* BCM5703 */
+#define  TG3PCI_DEVICE_TIGON3_5761S	 0x1688
+#define  TG3PCI_DEVICE_TIGON3_5761SE	 0x1689
+#define  TG3PCI_DEVICE_TIGON3_57780	 0x1692
+#define  TG3PCI_DEVICE_TIGON3_57760	 0x1690
+#define  TG3PCI_DEVICE_TIGON3_57790	 0x1694
+#define  TG3PCI_DEVICE_TIGON3_57788	 0x1691
+#define  TG3PCI_DEVICE_TIGON3_5785_G	 0x1699 /* GPHY */
+#define  TG3PCI_DEVICE_TIGON3_5785_F	 0x16a0 /* 10/100 only */
+#define  TG3PCI_DEVICE_TIGON3_5717	 0x1655
+#define  TG3PCI_DEVICE_TIGON3_5718	 0x1656
+#define  TG3PCI_DEVICE_TIGON3_57781	 0x16b1
+#define  TG3PCI_DEVICE_TIGON3_57785	 0x16b5
+#define  TG3PCI_DEVICE_TIGON3_57761	 0x16b0
+#define  TG3PCI_DEVICE_TIGON3_57765	 0x16b4
+#define  TG3PCI_DEVICE_TIGON3_57791	 0x16b2
+#define  TG3PCI_DEVICE_TIGON3_57795	 0x16b6
+#define  TG3PCI_DEVICE_TIGON3_5719	 0x1657
+#define  TG3PCI_DEVICE_TIGON3_5720	 0x165f
+/* 0x04 --> 0x2c unused */
+#define TG3PCI_SUBVENDOR_ID_BROADCOM		PCI_VENDOR_ID_BROADCOM
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6	0x1644
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5	0x0001
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6	0x0002
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9	0x0003
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1	0x0005
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8	0x0006
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7	0x0007
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10	0x0008
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12	0x8008
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1	0x0009
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2	0x8009
+#define TG3PCI_SUBVENDOR_ID_3COM		PCI_VENDOR_ID_3COM
+#define TG3PCI_SUBDEVICE_ID_3COM_3C996T		0x1000
+#define TG3PCI_SUBDEVICE_ID_3COM_3C996BT	0x1006
+#define TG3PCI_SUBDEVICE_ID_3COM_3C996SX	0x1004
+#define TG3PCI_SUBDEVICE_ID_3COM_3C1000T	0x1007
+#define TG3PCI_SUBDEVICE_ID_3COM_3C940BR01	0x1008
+#define TG3PCI_SUBVENDOR_ID_DELL		PCI_VENDOR_ID_DELL
+#define TG3PCI_SUBDEVICE_ID_DELL_VIPER		0x00d1
+#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR		0x0106
+#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT		0x0109
+#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT	0x010a
+#define TG3PCI_SUBVENDOR_ID_COMPAQ		PCI_VENDOR_ID_COMPAQ
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE	0x007c
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2	0x009a
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING	0x007d
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780	0x0085
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2	0x0099
+#define TG3PCI_SUBVENDOR_ID_IBM			PCI_VENDOR_ID_IBM
+#define TG3PCI_SUBDEVICE_ID_IBM_5703SAX2	0x0281
+/* 0x30 --> 0x64 unused */
+#define TG3PCI_MSI_DATA			0x00000064
+/* 0x66 --> 0x68 unused */
+#define TG3PCI_MISC_HOST_CTRL		0x00000068
+#define  MISC_HOST_CTRL_CLEAR_INT	 0x00000001
+#define  MISC_HOST_CTRL_MASK_PCI_INT	 0x00000002
+#define  MISC_HOST_CTRL_BYTE_SWAP	 0x00000004
+#define  MISC_HOST_CTRL_WORD_SWAP	 0x00000008
+#define  MISC_HOST_CTRL_PCISTATE_RW	 0x00000010
+#define  MISC_HOST_CTRL_CLKREG_RW	 0x00000020
+#define  MISC_HOST_CTRL_REGWORD_SWAP	 0x00000040
+#define  MISC_HOST_CTRL_INDIR_ACCESS	 0x00000080
+#define  MISC_HOST_CTRL_IRQ_MASK_MODE	 0x00000100
+#define  MISC_HOST_CTRL_TAGGED_STATUS	 0x00000200
+#define  MISC_HOST_CTRL_CHIPREV		 0xffff0000
+#define  MISC_HOST_CTRL_CHIPREV_SHIFT	 16
+#define  GET_CHIP_REV_ID(MISC_HOST_CTRL) \
+	 (((MISC_HOST_CTRL) & MISC_HOST_CTRL_CHIPREV) >> \
+	  MISC_HOST_CTRL_CHIPREV_SHIFT)
+#define  CHIPREV_ID_5700_A0		 0x7000
+#define  CHIPREV_ID_5700_A1		 0x7001
+#define  CHIPREV_ID_5700_B0		 0x7100
+#define  CHIPREV_ID_5700_B1		 0x7101
+#define  CHIPREV_ID_5700_B3		 0x7102
+#define  CHIPREV_ID_5700_ALTIMA		 0x7104
+#define  CHIPREV_ID_5700_C0		 0x7200
+#define  CHIPREV_ID_5701_A0		 0x0000
+#define  CHIPREV_ID_5701_B0		 0x0100
+#define  CHIPREV_ID_5701_B2		 0x0102
+#define  CHIPREV_ID_5701_B5		 0x0105
+#define  CHIPREV_ID_5703_A0		 0x1000
+#define  CHIPREV_ID_5703_A1		 0x1001
+#define  CHIPREV_ID_5703_A2		 0x1002
+#define  CHIPREV_ID_5703_A3		 0x1003
+#define  CHIPREV_ID_5704_A0		 0x2000
+#define  CHIPREV_ID_5704_A1		 0x2001
+#define  CHIPREV_ID_5704_A2		 0x2002
+#define  CHIPREV_ID_5704_A3		 0x2003
+#define  CHIPREV_ID_5705_A0		 0x3000
+#define  CHIPREV_ID_5705_A1		 0x3001
+#define  CHIPREV_ID_5705_A2		 0x3002
+#define  CHIPREV_ID_5705_A3		 0x3003
+#define  CHIPREV_ID_5750_A0		 0x4000
+#define  CHIPREV_ID_5750_A1		 0x4001
+#define  CHIPREV_ID_5750_A3		 0x4003
+#define  CHIPREV_ID_5750_C2		 0x4202
+#define  CHIPREV_ID_5752_A0_HW		 0x5000
+#define  CHIPREV_ID_5752_A0		 0x6000
+#define  CHIPREV_ID_5752_A1		 0x6001
+#define  CHIPREV_ID_5714_A2		 0x9002
+#define  CHIPREV_ID_5906_A1		 0xc001
+#define  CHIPREV_ID_57780_A0		 0x57780000
+#define  CHIPREV_ID_57780_A1		 0x57780001
+#define  CHIPREV_ID_5717_A0		 0x05717000
+#define  CHIPREV_ID_57765_A0		 0x57785000
+#define  CHIPREV_ID_5719_A0		 0x05719000
+#define  CHIPREV_ID_5720_A0		 0x05720000
+#define  GET_ASIC_REV(CHIP_REV_ID)	((CHIP_REV_ID) >> 12)
+#define   ASIC_REV_5700			 0x07
+#define   ASIC_REV_5701			 0x00
+#define   ASIC_REV_5703			 0x01
+#define   ASIC_REV_5704			 0x02
+#define   ASIC_REV_5705			 0x03
+#define   ASIC_REV_5750			 0x04
+#define   ASIC_REV_5752			 0x06
+#define   ASIC_REV_5780			 0x08
+#define   ASIC_REV_5714			 0x09
+#define   ASIC_REV_5755			 0x0a
+#define   ASIC_REV_5787			 0x0b
+#define   ASIC_REV_5906			 0x0c
+#define   ASIC_REV_USE_PROD_ID_REG	 0x0f
+#define   ASIC_REV_5784			 0x5784
+#define   ASIC_REV_5761			 0x5761
+#define   ASIC_REV_5785			 0x5785
+#define   ASIC_REV_57780		 0x57780
+#define   ASIC_REV_5717			 0x5717
+#define   ASIC_REV_57765		 0x57785
+#define   ASIC_REV_5719			 0x5719
+#define   ASIC_REV_5720			 0x5720
+#define  GET_CHIP_REV(CHIP_REV_ID)	((CHIP_REV_ID) >> 8)
+#define   CHIPREV_5700_AX		 0x70
+#define   CHIPREV_5700_BX		 0x71
+#define   CHIPREV_5700_CX		 0x72
+#define   CHIPREV_5701_AX		 0x00
+#define   CHIPREV_5703_AX		 0x10
+#define   CHIPREV_5704_AX		 0x20
+#define   CHIPREV_5704_BX		 0x21
+#define   CHIPREV_5750_AX		 0x40
+#define   CHIPREV_5750_BX		 0x41
+#define   CHIPREV_5784_AX		 0x57840
+#define   CHIPREV_5761_AX		 0x57610
+#define   CHIPREV_57765_AX		 0x577650
+#define  GET_METAL_REV(CHIP_REV_ID)	((CHIP_REV_ID) & 0xff)
+#define   METAL_REV_A0			 0x00
+#define   METAL_REV_A1			 0x01
+#define   METAL_REV_B0			 0x00
+#define   METAL_REV_B1			 0x01
+#define   METAL_REV_B2			 0x02
+#define TG3PCI_DMA_RW_CTRL		0x0000006c
+#define  DMA_RWCTRL_DIS_CACHE_ALIGNMENT  0x00000001
+#define  DMA_RWCTRL_TAGGED_STAT_WA	 0x00000080
+#define  DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380
+#define  DMA_RWCTRL_READ_BNDRY_MASK	 0x00000700
+#define  DMA_RWCTRL_READ_BNDRY_DISAB	 0x00000000
+#define  DMA_RWCTRL_READ_BNDRY_16	 0x00000100
+#define  DMA_RWCTRL_READ_BNDRY_128_PCIX	 0x00000100
+#define  DMA_RWCTRL_READ_BNDRY_32	 0x00000200
+#define  DMA_RWCTRL_READ_BNDRY_256_PCIX	 0x00000200
+#define  DMA_RWCTRL_READ_BNDRY_64	 0x00000300
+#define  DMA_RWCTRL_READ_BNDRY_384_PCIX	 0x00000300
+#define  DMA_RWCTRL_READ_BNDRY_128	 0x00000400
+#define  DMA_RWCTRL_READ_BNDRY_256	 0x00000500
+#define  DMA_RWCTRL_READ_BNDRY_512	 0x00000600
+#define  DMA_RWCTRL_READ_BNDRY_1024	 0x00000700
+#define  DMA_RWCTRL_WRITE_BNDRY_MASK	 0x00003800
+#define  DMA_RWCTRL_WRITE_BNDRY_DISAB	 0x00000000
+#define  DMA_RWCTRL_WRITE_BNDRY_16	 0x00000800
+#define  DMA_RWCTRL_WRITE_BNDRY_128_PCIX 0x00000800
+#define  DMA_RWCTRL_WRITE_BNDRY_32	 0x00001000
+#define  DMA_RWCTRL_WRITE_BNDRY_256_PCIX 0x00001000
+#define  DMA_RWCTRL_WRITE_BNDRY_64	 0x00001800
+#define  DMA_RWCTRL_WRITE_BNDRY_384_PCIX 0x00001800
+#define  DMA_RWCTRL_WRITE_BNDRY_128	 0x00002000
+#define  DMA_RWCTRL_WRITE_BNDRY_256	 0x00002800
+#define  DMA_RWCTRL_WRITE_BNDRY_512	 0x00003000
+#define  DMA_RWCTRL_WRITE_BNDRY_1024	 0x00003800
+#define  DMA_RWCTRL_ONE_DMA		 0x00004000
+#define  DMA_RWCTRL_READ_WATER		 0x00070000
+#define  DMA_RWCTRL_READ_WATER_SHIFT	 16
+#define  DMA_RWCTRL_WRITE_WATER		 0x00380000
+#define  DMA_RWCTRL_WRITE_WATER_SHIFT	 19
+#define  DMA_RWCTRL_USE_MEM_READ_MULT	 0x00400000
+#define  DMA_RWCTRL_ASSERT_ALL_BE	 0x00800000
+#define  DMA_RWCTRL_PCI_READ_CMD	 0x0f000000
+#define  DMA_RWCTRL_PCI_READ_CMD_SHIFT	 24
+#define  DMA_RWCTRL_PCI_WRITE_CMD	 0xf0000000
+#define  DMA_RWCTRL_PCI_WRITE_CMD_SHIFT	 28
+#define  DMA_RWCTRL_WRITE_BNDRY_64_PCIE	 0x10000000
+#define  DMA_RWCTRL_WRITE_BNDRY_128_PCIE 0x30000000
+#define  DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE 0x70000000
+#define TG3PCI_PCISTATE			0x00000070
+#define  PCISTATE_FORCE_RESET		 0x00000001
+#define  PCISTATE_INT_NOT_ACTIVE	 0x00000002
+#define  PCISTATE_CONV_PCI_MODE		 0x00000004
+#define  PCISTATE_BUS_SPEED_HIGH	 0x00000008
+#define  PCISTATE_BUS_32BIT		 0x00000010
+#define  PCISTATE_ROM_ENABLE		 0x00000020
+#define  PCISTATE_ROM_RETRY_ENABLE	 0x00000040
+#define  PCISTATE_FLAT_VIEW		 0x00000100
+#define  PCISTATE_RETRY_SAME_DMA	 0x00002000
+#define  PCISTATE_ALLOW_APE_CTLSPC_WR	 0x00010000
+#define  PCISTATE_ALLOW_APE_SHMEM_WR	 0x00020000
+#define  PCISTATE_ALLOW_APE_PSPACE_WR	 0x00040000
+#define TG3PCI_CLOCK_CTRL		0x00000074
+#define  CLOCK_CTRL_CORECLK_DISABLE	 0x00000200
+#define  CLOCK_CTRL_RXCLK_DISABLE	 0x00000400
+#define  CLOCK_CTRL_TXCLK_DISABLE	 0x00000800
+#define  CLOCK_CTRL_ALTCLK		 0x00001000
+#define  CLOCK_CTRL_PWRDOWN_PLL133	 0x00008000
+#define  CLOCK_CTRL_44MHZ_CORE		 0x00040000
+#define  CLOCK_CTRL_625_CORE		 0x00100000
+#define  CLOCK_CTRL_FORCE_CLKRUN	 0x00200000
+#define  CLOCK_CTRL_CLKRUN_OENABLE	 0x00400000
+#define  CLOCK_CTRL_DELAY_PCI_GRANT	 0x80000000
+#define TG3PCI_REG_BASE_ADDR		0x00000078
+#define TG3PCI_MEM_WIN_BASE_ADDR	0x0000007c
+#define TG3PCI_REG_DATA			0x00000080
+#define TG3PCI_MEM_WIN_DATA		0x00000084
+#define TG3PCI_MISC_LOCAL_CTRL		0x00000090
+/* 0x94 --> 0x98 unused */
+#define TG3PCI_STD_RING_PROD_IDX	0x00000098 /* 64-bit */
+#define TG3PCI_RCV_RET_RING_CON_IDX	0x000000a0 /* 64-bit */
+/* 0xa8 --> 0xb8 unused */
+#define TG3PCI_DUAL_MAC_CTRL		0x000000b8
+#define  DUAL_MAC_CTRL_CH_MASK		 0x00000003
+#define  DUAL_MAC_CTRL_ID		 0x00000004
+#define TG3PCI_PRODID_ASICREV		0x000000bc
+#define  PROD_ID_ASIC_REV_MASK		 0x0fffffff
+/* 0xc0 --> 0xf4 unused */
+
+#define TG3PCI_GEN2_PRODID_ASICREV	0x000000f4
+#define TG3PCI_GEN15_PRODID_ASICREV	0x000000fc
+/* 0xf8 --> 0x200 unused */
+
+#define TG3_CORR_ERR_STAT		0x00000110
+#define  TG3_CORR_ERR_STAT_CLEAR	0xffffffff
+/* 0x114 --> 0x200 unused */
+
+/* Mailbox registers */
+#define MAILBOX_INTERRUPT_0		0x00000200 /* 64-bit */
+#define MAILBOX_INTERRUPT_1		0x00000208 /* 64-bit */
+#define MAILBOX_INTERRUPT_2		0x00000210 /* 64-bit */
+#define MAILBOX_INTERRUPT_3		0x00000218 /* 64-bit */
+#define MAILBOX_GENERAL_0		0x00000220 /* 64-bit */
+#define MAILBOX_GENERAL_1		0x00000228 /* 64-bit */
+#define MAILBOX_GENERAL_2		0x00000230 /* 64-bit */
+#define MAILBOX_GENERAL_3		0x00000238 /* 64-bit */
+#define MAILBOX_GENERAL_4		0x00000240 /* 64-bit */
+#define MAILBOX_GENERAL_5		0x00000248 /* 64-bit */
+#define MAILBOX_GENERAL_6		0x00000250 /* 64-bit */
+#define MAILBOX_GENERAL_7		0x00000258 /* 64-bit */
+#define MAILBOX_RELOAD_STAT		0x00000260 /* 64-bit */
+#define MAILBOX_RCV_STD_PROD_IDX	0x00000268 /* 64-bit */
+#define TG3_RX_STD_PROD_IDX_REG		(MAILBOX_RCV_STD_PROD_IDX + \
+					 TG3_64BIT_REG_LOW)
+#define MAILBOX_RCV_JUMBO_PROD_IDX	0x00000270 /* 64-bit */
+#define TG3_RX_JMB_PROD_IDX_REG		(MAILBOX_RCV_JUMBO_PROD_IDX + \
+					 TG3_64BIT_REG_LOW)
+#define MAILBOX_RCV_MINI_PROD_IDX	0x00000278 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_0	0x00000280 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_1	0x00000288 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_2	0x00000290 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_3	0x00000298 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_4	0x000002a0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_5	0x000002a8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_6	0x000002b0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_7	0x000002b8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_8	0x000002c0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_9	0x000002c8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_10	0x000002d0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_11	0x000002d8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_12	0x000002e0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_13	0x000002e8 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_14	0x000002f0 /* 64-bit */
+#define MAILBOX_RCVRET_CON_IDX_15	0x000002f8 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_0	0x00000300 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_1	0x00000308 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_2	0x00000310 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_3	0x00000318 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_4	0x00000320 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_5	0x00000328 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_6	0x00000330 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_7	0x00000338 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_8	0x00000340 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_9	0x00000348 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_10	0x00000350 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_11	0x00000358 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_12	0x00000360 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_13	0x00000368 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_14	0x00000370 /* 64-bit */
+#define MAILBOX_SNDHOST_PROD_IDX_15	0x00000378 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_0	0x00000380 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_1	0x00000388 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_2	0x00000390 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_3	0x00000398 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_4	0x000003a0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_5	0x000003a8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_6	0x000003b0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_7	0x000003b8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_8	0x000003c0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_9	0x000003c8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_10	0x000003d0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_11	0x000003d8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_12	0x000003e0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_13	0x000003e8 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_14	0x000003f0 /* 64-bit */
+#define MAILBOX_SNDNIC_PROD_IDX_15	0x000003f8 /* 64-bit */
+
+/* MAC control registers */
+#define MAC_MODE			0x00000400
+#define  MAC_MODE_RESET			 0x00000001
+#define  MAC_MODE_HALF_DUPLEX		 0x00000002
+#define  MAC_MODE_PORT_MODE_MASK	 0x0000000c
+#define  MAC_MODE_PORT_MODE_TBI		 0x0000000c
+#define  MAC_MODE_PORT_MODE_GMII	 0x00000008
+#define  MAC_MODE_PORT_MODE_MII		 0x00000004
+#define  MAC_MODE_PORT_MODE_NONE	 0x00000000
+#define  MAC_MODE_PORT_INT_LPBACK	 0x00000010
+#define  MAC_MODE_TAGGED_MAC_CTRL	 0x00000080
+#define  MAC_MODE_TX_BURSTING		 0x00000100
+#define  MAC_MODE_MAX_DEFER		 0x00000200
+#define  MAC_MODE_LINK_POLARITY		 0x00000400
+#define  MAC_MODE_RXSTAT_ENABLE		 0x00000800
+#define  MAC_MODE_RXSTAT_CLEAR		 0x00001000
+#define  MAC_MODE_RXSTAT_FLUSH		 0x00002000
+#define  MAC_MODE_TXSTAT_ENABLE		 0x00004000
+#define  MAC_MODE_TXSTAT_CLEAR		 0x00008000
+#define  MAC_MODE_TXSTAT_FLUSH		 0x00010000
+#define  MAC_MODE_SEND_CONFIGS		 0x00020000
+#define  MAC_MODE_MAGIC_PKT_ENABLE	 0x00040000
+#define  MAC_MODE_ACPI_ENABLE		 0x00080000
+#define  MAC_MODE_MIP_ENABLE		 0x00100000
+#define  MAC_MODE_TDE_ENABLE		 0x00200000
+#define  MAC_MODE_RDE_ENABLE		 0x00400000
+#define  MAC_MODE_FHDE_ENABLE		 0x00800000
+#define  MAC_MODE_KEEP_FRAME_IN_WOL	 0x01000000
+#define  MAC_MODE_APE_RX_EN		 0x08000000
+#define  MAC_MODE_APE_TX_EN		 0x10000000
+#define MAC_STATUS			0x00000404
+#define  MAC_STATUS_PCS_SYNCED		 0x00000001
+#define  MAC_STATUS_SIGNAL_DET		 0x00000002
+#define  MAC_STATUS_RCVD_CFG		 0x00000004
+#define  MAC_STATUS_CFG_CHANGED		 0x00000008
+#define  MAC_STATUS_SYNC_CHANGED	 0x00000010
+#define  MAC_STATUS_PORT_DEC_ERR	 0x00000400
+#define  MAC_STATUS_LNKSTATE_CHANGED	 0x00001000
+#define  MAC_STATUS_MI_COMPLETION	 0x00400000
+#define  MAC_STATUS_MI_INTERRUPT	 0x00800000
+#define  MAC_STATUS_AP_ERROR		 0x01000000
+#define  MAC_STATUS_ODI_ERROR		 0x02000000
+#define  MAC_STATUS_RXSTAT_OVERRUN	 0x04000000
+#define  MAC_STATUS_TXSTAT_OVERRUN	 0x08000000
+#define MAC_EVENT			0x00000408
+#define  MAC_EVENT_PORT_DECODE_ERR	 0x00000400
+#define  MAC_EVENT_LNKSTATE_CHANGED	 0x00001000
+#define  MAC_EVENT_MI_COMPLETION	 0x00400000
+#define  MAC_EVENT_MI_INTERRUPT		 0x00800000
+#define  MAC_EVENT_AP_ERROR		 0x01000000
+#define  MAC_EVENT_ODI_ERROR		 0x02000000
+#define  MAC_EVENT_RXSTAT_OVERRUN	 0x04000000
+#define  MAC_EVENT_TXSTAT_OVERRUN	 0x08000000
+#define MAC_LED_CTRL			0x0000040c
+#define  LED_CTRL_LNKLED_OVERRIDE	 0x00000001
+#define  LED_CTRL_1000MBPS_ON		 0x00000002
+#define  LED_CTRL_100MBPS_ON		 0x00000004
+#define  LED_CTRL_10MBPS_ON		 0x00000008
+#define  LED_CTRL_TRAFFIC_OVERRIDE	 0x00000010
+#define  LED_CTRL_TRAFFIC_BLINK		 0x00000020
+#define  LED_CTRL_TRAFFIC_LED		 0x00000040
+#define  LED_CTRL_1000MBPS_STATUS	 0x00000080
+#define  LED_CTRL_100MBPS_STATUS	 0x00000100
+#define  LED_CTRL_10MBPS_STATUS		 0x00000200
+#define  LED_CTRL_TRAFFIC_STATUS	 0x00000400
+#define  LED_CTRL_MODE_MAC		 0x00000000
+#define  LED_CTRL_MODE_PHY_1		 0x00000800
+#define  LED_CTRL_MODE_PHY_2		 0x00001000
+#define  LED_CTRL_MODE_SHASTA_MAC	 0x00002000
+#define  LED_CTRL_MODE_SHARED		 0x00004000
+#define  LED_CTRL_MODE_COMBO		 0x00008000
+#define  LED_CTRL_BLINK_RATE_MASK	 0x7ff80000
+#define  LED_CTRL_BLINK_RATE_SHIFT	 19
+#define  LED_CTRL_BLINK_PER_OVERRIDE	 0x00080000
+#define  LED_CTRL_BLINK_RATE_OVERRIDE	 0x80000000
+#define MAC_ADDR_0_HIGH			0x00000410 /* upper 2 bytes */
+#define MAC_ADDR_0_LOW			0x00000414 /* lower 4 bytes */
+#define MAC_ADDR_1_HIGH			0x00000418 /* upper 2 bytes */
+#define MAC_ADDR_1_LOW			0x0000041c /* lower 4 bytes */
+#define MAC_ADDR_2_HIGH			0x00000420 /* upper 2 bytes */
+#define MAC_ADDR_2_LOW			0x00000424 /* lower 4 bytes */
+#define MAC_ADDR_3_HIGH			0x00000428 /* upper 2 bytes */
+#define MAC_ADDR_3_LOW			0x0000042c /* lower 4 bytes */
+#define MAC_ACPI_MBUF_PTR		0x00000430
+#define MAC_ACPI_LEN_OFFSET		0x00000434
+#define  ACPI_LENOFF_LEN_MASK		 0x0000ffff
+#define  ACPI_LENOFF_LEN_SHIFT		 0
+#define  ACPI_LENOFF_OFF_MASK		 0x0fff0000
+#define  ACPI_LENOFF_OFF_SHIFT		 16
+#define MAC_TX_BACKOFF_SEED		0x00000438
+#define  TX_BACKOFF_SEED_MASK		 0x000003ff
+#define MAC_RX_MTU_SIZE			0x0000043c
+#define  RX_MTU_SIZE_MASK		 0x0000ffff
+#define MAC_PCS_TEST			0x00000440
+#define  PCS_TEST_PATTERN_MASK		 0x000fffff
+#define  PCS_TEST_PATTERN_SHIFT		 0
+#define  PCS_TEST_ENABLE		 0x00100000
+#define MAC_TX_AUTO_NEG			0x00000444
+#define  TX_AUTO_NEG_MASK		 0x0000ffff
+#define  TX_AUTO_NEG_SHIFT		 0
+#define MAC_RX_AUTO_NEG			0x00000448
+#define  RX_AUTO_NEG_MASK		 0x0000ffff
+#define  RX_AUTO_NEG_SHIFT		 0
+#define MAC_MI_COM			0x0000044c
+#define  MI_COM_CMD_MASK		 0x0c000000
+#define  MI_COM_CMD_WRITE		 0x04000000
+#define  MI_COM_CMD_READ		 0x08000000
+#define  MI_COM_READ_FAILED		 0x10000000
+#define  MI_COM_START			 0x20000000
+#define  MI_COM_BUSY			 0x20000000
+#define  MI_COM_PHY_ADDR_MASK		 0x03e00000
+#define  MI_COM_PHY_ADDR_SHIFT		 21
+#define  MI_COM_REG_ADDR_MASK		 0x001f0000
+#define  MI_COM_REG_ADDR_SHIFT		 16
+#define  MI_COM_DATA_MASK		 0x0000ffff
+#define MAC_MI_STAT			0x00000450
+#define  MAC_MI_STAT_LNKSTAT_ATTN_ENAB	 0x00000001
+#define  MAC_MI_STAT_10MBPS_MODE	 0x00000002
+#define MAC_MI_MODE			0x00000454
+#define  MAC_MI_MODE_CLK_10MHZ		 0x00000001
+#define  MAC_MI_MODE_SHORT_PREAMBLE	 0x00000002
+#define  MAC_MI_MODE_AUTO_POLL		 0x00000010
+#define  MAC_MI_MODE_500KHZ_CONST	 0x00008000
+#define  MAC_MI_MODE_BASE		 0x000c0000 /* XXX magic values XXX */
+#define MAC_AUTO_POLL_STATUS		0x00000458
+#define  MAC_AUTO_POLL_ERROR		 0x00000001
+#define MAC_TX_MODE			0x0000045c
+#define  TX_MODE_RESET			 0x00000001
+#define  TX_MODE_ENABLE			 0x00000002
+#define  TX_MODE_FLOW_CTRL_ENABLE	 0x00000010
+#define  TX_MODE_BIG_BCKOFF_ENABLE	 0x00000020
+#define  TX_MODE_LONG_PAUSE_ENABLE	 0x00000040
+#define  TX_MODE_MBUF_LOCKUP_FIX	 0x00000100
+#define  TX_MODE_JMB_FRM_LEN		 0x00400000
+#define  TX_MODE_CNT_DN_MODE		 0x00800000
+#define MAC_TX_STATUS			0x00000460
+#define  TX_STATUS_XOFFED		 0x00000001
+#define  TX_STATUS_SENT_XOFF		 0x00000002
+#define  TX_STATUS_SENT_XON		 0x00000004
+#define  TX_STATUS_LINK_UP		 0x00000008
+#define  TX_STATUS_ODI_UNDERRUN		 0x00000010
+#define  TX_STATUS_ODI_OVERRUN		 0x00000020
+#define MAC_TX_LENGTHS			0x00000464
+#define  TX_LENGTHS_SLOT_TIME_MASK	 0x000000ff
+#define  TX_LENGTHS_SLOT_TIME_SHIFT	 0
+#define  TX_LENGTHS_IPG_MASK		 0x00000f00
+#define  TX_LENGTHS_IPG_SHIFT		 8
+#define  TX_LENGTHS_IPG_CRS_MASK	 0x00003000
+#define  TX_LENGTHS_IPG_CRS_SHIFT	 12
+#define  TX_LENGTHS_JMB_FRM_LEN_MSK	 0x00ff0000
+#define  TX_LENGTHS_CNT_DWN_VAL_MSK	 0xff000000
+#define MAC_RX_MODE			0x00000468
+#define  RX_MODE_RESET			 0x00000001
+#define  RX_MODE_ENABLE			 0x00000002
+#define  RX_MODE_FLOW_CTRL_ENABLE	 0x00000004
+#define  RX_MODE_KEEP_MAC_CTRL		 0x00000008
+#define  RX_MODE_KEEP_PAUSE		 0x00000010
+#define  RX_MODE_ACCEPT_OVERSIZED	 0x00000020
+#define  RX_MODE_ACCEPT_RUNTS		 0x00000040
+#define  RX_MODE_LEN_CHECK		 0x00000080
+#define  RX_MODE_PROMISC		 0x00000100
+#define  RX_MODE_NO_CRC_CHECK		 0x00000200
+#define  RX_MODE_KEEP_VLAN_TAG		 0x00000400
+#define  RX_MODE_RSS_IPV4_HASH_EN	 0x00010000
+#define  RX_MODE_RSS_TCP_IPV4_HASH_EN	 0x00020000
+#define  RX_MODE_RSS_IPV6_HASH_EN	 0x00040000
+#define  RX_MODE_RSS_TCP_IPV6_HASH_EN	 0x00080000
+#define  RX_MODE_RSS_ITBL_HASH_BITS_7	 0x00700000
+#define  RX_MODE_RSS_ENABLE		 0x00800000
+#define  RX_MODE_IPV6_CSUM_ENABLE	 0x01000000
+#define MAC_RX_STATUS			0x0000046c
+#define  RX_STATUS_REMOTE_TX_XOFFED	 0x00000001
+#define  RX_STATUS_XOFF_RCVD		 0x00000002
+#define  RX_STATUS_XON_RCVD		 0x00000004
+#define MAC_HASH_REG_0			0x00000470
+#define MAC_HASH_REG_1			0x00000474
+#define MAC_HASH_REG_2			0x00000478
+#define MAC_HASH_REG_3			0x0000047c
+#define MAC_RCV_RULE_0			0x00000480
+#define MAC_RCV_VALUE_0			0x00000484
+#define MAC_RCV_RULE_1			0x00000488
+#define MAC_RCV_VALUE_1			0x0000048c
+#define MAC_RCV_RULE_2			0x00000490
+#define MAC_RCV_VALUE_2			0x00000494
+#define MAC_RCV_RULE_3			0x00000498
+#define MAC_RCV_VALUE_3			0x0000049c
+#define MAC_RCV_RULE_4			0x000004a0
+#define MAC_RCV_VALUE_4			0x000004a4
+#define MAC_RCV_RULE_5			0x000004a8
+#define MAC_RCV_VALUE_5			0x000004ac
+#define MAC_RCV_RULE_6			0x000004b0
+#define MAC_RCV_VALUE_6			0x000004b4
+#define MAC_RCV_RULE_7			0x000004b8
+#define MAC_RCV_VALUE_7			0x000004bc
+#define MAC_RCV_RULE_8			0x000004c0
+#define MAC_RCV_VALUE_8			0x000004c4
+#define MAC_RCV_RULE_9			0x000004c8
+#define MAC_RCV_VALUE_9			0x000004cc
+#define MAC_RCV_RULE_10			0x000004d0
+#define MAC_RCV_VALUE_10		0x000004d4
+#define MAC_RCV_RULE_11			0x000004d8
+#define MAC_RCV_VALUE_11		0x000004dc
+#define MAC_RCV_RULE_12			0x000004e0
+#define MAC_RCV_VALUE_12		0x000004e4
+#define MAC_RCV_RULE_13			0x000004e8
+#define MAC_RCV_VALUE_13		0x000004ec
+#define MAC_RCV_RULE_14			0x000004f0
+#define MAC_RCV_VALUE_14		0x000004f4
+#define MAC_RCV_RULE_15			0x000004f8
+#define MAC_RCV_VALUE_15		0x000004fc
+#define  RCV_RULE_DISABLE_MASK		 0x7fffffff
+#define MAC_RCV_RULE_CFG		0x00000500
+#define  RCV_RULE_CFG_DEFAULT_CLASS	0x00000008
+#define MAC_LOW_WMARK_MAX_RX_FRAME	0x00000504
+/* 0x508 --> 0x520 unused */
+#define MAC_HASHREGU_0			0x00000520
+#define MAC_HASHREGU_1			0x00000524
+#define MAC_HASHREGU_2			0x00000528
+#define MAC_HASHREGU_3			0x0000052c
+#define MAC_EXTADDR_0_HIGH		0x00000530
+#define MAC_EXTADDR_0_LOW		0x00000534
+#define MAC_EXTADDR_1_HIGH		0x00000538
+#define MAC_EXTADDR_1_LOW		0x0000053c
+#define MAC_EXTADDR_2_HIGH		0x00000540
+#define MAC_EXTADDR_2_LOW		0x00000544
+#define MAC_EXTADDR_3_HIGH		0x00000548
+#define MAC_EXTADDR_3_LOW		0x0000054c
+#define MAC_EXTADDR_4_HIGH		0x00000550
+#define MAC_EXTADDR_4_LOW		0x00000554
+#define MAC_EXTADDR_5_HIGH		0x00000558
+#define MAC_EXTADDR_5_LOW		0x0000055c
+#define MAC_EXTADDR_6_HIGH		0x00000560
+#define MAC_EXTADDR_6_LOW		0x00000564
+#define MAC_EXTADDR_7_HIGH		0x00000568
+#define MAC_EXTADDR_7_LOW		0x0000056c
+#define MAC_EXTADDR_8_HIGH		0x00000570
+#define MAC_EXTADDR_8_LOW		0x00000574
+#define MAC_EXTADDR_9_HIGH		0x00000578
+#define MAC_EXTADDR_9_LOW		0x0000057c
+#define MAC_EXTADDR_10_HIGH		0x00000580
+#define MAC_EXTADDR_10_LOW		0x00000584
+#define MAC_EXTADDR_11_HIGH		0x00000588
+#define MAC_EXTADDR_11_LOW		0x0000058c
+#define MAC_SERDES_CFG			0x00000590
+#define  MAC_SERDES_CFG_EDGE_SELECT	 0x00001000
+#define MAC_SERDES_STAT			0x00000594
+/* 0x598 --> 0x5a0 unused */
+#define MAC_PHYCFG1			0x000005a0
+#define  MAC_PHYCFG1_RGMII_INT		 0x00000001
+#define  MAC_PHYCFG1_RXCLK_TO_MASK	 0x00001ff0
+#define  MAC_PHYCFG1_RXCLK_TIMEOUT	 0x00001000
+#define  MAC_PHYCFG1_TXCLK_TO_MASK	 0x01ff0000
+#define  MAC_PHYCFG1_TXCLK_TIMEOUT	 0x01000000
+#define  MAC_PHYCFG1_RGMII_EXT_RX_DEC	 0x02000000
+#define  MAC_PHYCFG1_RGMII_SND_STAT_EN	 0x04000000
+#define  MAC_PHYCFG1_TXC_DRV		 0x20000000
+#define MAC_PHYCFG2			0x000005a4
+#define  MAC_PHYCFG2_INBAND_ENABLE	 0x00000001
+#define  MAC_PHYCFG2_EMODE_MASK_MASK	 0x000001c0
+#define  MAC_PHYCFG2_EMODE_MASK_AC131	 0x000000c0
+#define  MAC_PHYCFG2_EMODE_MASK_50610	 0x00000100
+#define  MAC_PHYCFG2_EMODE_MASK_RT8211	 0x00000000
+#define  MAC_PHYCFG2_EMODE_MASK_RT8201	 0x000001c0
+#define  MAC_PHYCFG2_EMODE_COMP_MASK	 0x00000e00
+#define  MAC_PHYCFG2_EMODE_COMP_AC131	 0x00000600
+#define  MAC_PHYCFG2_EMODE_COMP_50610	 0x00000400
+#define  MAC_PHYCFG2_EMODE_COMP_RT8211	 0x00000800
+#define  MAC_PHYCFG2_EMODE_COMP_RT8201	 0x00000000
+#define  MAC_PHYCFG2_FMODE_MASK_MASK	 0x00007000
+#define  MAC_PHYCFG2_FMODE_MASK_AC131	 0x00006000
+#define  MAC_PHYCFG2_FMODE_MASK_50610	 0x00004000
+#define  MAC_PHYCFG2_FMODE_MASK_RT8211	 0x00000000
+#define  MAC_PHYCFG2_FMODE_MASK_RT8201	 0x00007000
+#define  MAC_PHYCFG2_FMODE_COMP_MASK	 0x00038000
+#define  MAC_PHYCFG2_FMODE_COMP_AC131	 0x00030000
+#define  MAC_PHYCFG2_FMODE_COMP_50610	 0x00008000
+#define  MAC_PHYCFG2_FMODE_COMP_RT8211	 0x00038000
+#define  MAC_PHYCFG2_FMODE_COMP_RT8201	 0x00000000
+#define  MAC_PHYCFG2_GMODE_MASK_MASK	 0x001c0000
+#define  MAC_PHYCFG2_GMODE_MASK_AC131	 0x001c0000
+#define  MAC_PHYCFG2_GMODE_MASK_50610	 0x00100000
+#define  MAC_PHYCFG2_GMODE_MASK_RT8211	 0x00000000
+#define  MAC_PHYCFG2_GMODE_MASK_RT8201	 0x001c0000
+#define  MAC_PHYCFG2_GMODE_COMP_MASK	 0x00e00000
+#define  MAC_PHYCFG2_GMODE_COMP_AC131	 0x00e00000
+#define  MAC_PHYCFG2_GMODE_COMP_50610	 0x00000000
+#define  MAC_PHYCFG2_GMODE_COMP_RT8211	 0x00200000
+#define  MAC_PHYCFG2_GMODE_COMP_RT8201	 0x00000000
+#define  MAC_PHYCFG2_ACT_MASK_MASK	 0x03000000
+#define  MAC_PHYCFG2_ACT_MASK_AC131	 0x03000000
+#define  MAC_PHYCFG2_ACT_MASK_50610	 0x01000000
+#define  MAC_PHYCFG2_ACT_MASK_RT8211	 0x03000000
+#define  MAC_PHYCFG2_ACT_MASK_RT8201	 0x01000000
+#define  MAC_PHYCFG2_ACT_COMP_MASK	 0x0c000000
+#define  MAC_PHYCFG2_ACT_COMP_AC131	 0x00000000
+#define  MAC_PHYCFG2_ACT_COMP_50610	 0x00000000
+#define  MAC_PHYCFG2_ACT_COMP_RT8211	 0x00000000
+#define  MAC_PHYCFG2_ACT_COMP_RT8201	 0x08000000
+#define  MAC_PHYCFG2_QUAL_MASK_MASK	 0x30000000
+#define  MAC_PHYCFG2_QUAL_MASK_AC131	 0x30000000
+#define  MAC_PHYCFG2_QUAL_MASK_50610	 0x30000000
+#define  MAC_PHYCFG2_QUAL_MASK_RT8211	 0x30000000
+#define  MAC_PHYCFG2_QUAL_MASK_RT8201	 0x30000000
+#define  MAC_PHYCFG2_QUAL_COMP_MASK	 0xc0000000
+#define  MAC_PHYCFG2_QUAL_COMP_AC131	 0x00000000
+#define  MAC_PHYCFG2_QUAL_COMP_50610	 0x00000000
+#define  MAC_PHYCFG2_QUAL_COMP_RT8211	 0x00000000
+#define  MAC_PHYCFG2_QUAL_COMP_RT8201	 0x00000000
+#define MAC_PHYCFG2_50610_LED_MODES \
+	(MAC_PHYCFG2_EMODE_MASK_50610 | \
+	 MAC_PHYCFG2_EMODE_COMP_50610 | \
+	 MAC_PHYCFG2_FMODE_MASK_50610 | \
+	 MAC_PHYCFG2_FMODE_COMP_50610 | \
+	 MAC_PHYCFG2_GMODE_MASK_50610 | \
+	 MAC_PHYCFG2_GMODE_COMP_50610 | \
+	 MAC_PHYCFG2_ACT_MASK_50610 | \
+	 MAC_PHYCFG2_ACT_COMP_50610 | \
+	 MAC_PHYCFG2_QUAL_MASK_50610 | \
+	 MAC_PHYCFG2_QUAL_COMP_50610)
+#define MAC_PHYCFG2_AC131_LED_MODES \
+	(MAC_PHYCFG2_EMODE_MASK_AC131 | \
+	 MAC_PHYCFG2_EMODE_COMP_AC131 | \
+	 MAC_PHYCFG2_FMODE_MASK_AC131 | \
+	 MAC_PHYCFG2_FMODE_COMP_AC131 | \
+	 MAC_PHYCFG2_GMODE_MASK_AC131 | \
+	 MAC_PHYCFG2_GMODE_COMP_AC131 | \
+	 MAC_PHYCFG2_ACT_MASK_AC131 | \
+	 MAC_PHYCFG2_ACT_COMP_AC131 | \
+	 MAC_PHYCFG2_QUAL_MASK_AC131 | \
+	 MAC_PHYCFG2_QUAL_COMP_AC131)
+#define MAC_PHYCFG2_RTL8211C_LED_MODES \
+	(MAC_PHYCFG2_EMODE_MASK_RT8211 | \
+	 MAC_PHYCFG2_EMODE_COMP_RT8211 | \
+	 MAC_PHYCFG2_FMODE_MASK_RT8211 | \
+	 MAC_PHYCFG2_FMODE_COMP_RT8211 | \
+	 MAC_PHYCFG2_GMODE_MASK_RT8211 | \
+	 MAC_PHYCFG2_GMODE_COMP_RT8211 | \
+	 MAC_PHYCFG2_ACT_MASK_RT8211 | \
+	 MAC_PHYCFG2_ACT_COMP_RT8211 | \
+	 MAC_PHYCFG2_QUAL_MASK_RT8211 | \
+	 MAC_PHYCFG2_QUAL_COMP_RT8211)
+#define MAC_PHYCFG2_RTL8201E_LED_MODES \
+	(MAC_PHYCFG2_EMODE_MASK_RT8201 | \
+	 MAC_PHYCFG2_EMODE_COMP_RT8201 | \
+	 MAC_PHYCFG2_FMODE_MASK_RT8201 | \
+	 MAC_PHYCFG2_FMODE_COMP_RT8201 | \
+	 MAC_PHYCFG2_GMODE_MASK_RT8201 | \
+	 MAC_PHYCFG2_GMODE_COMP_RT8201 | \
+	 MAC_PHYCFG2_ACT_MASK_RT8201 | \
+	 MAC_PHYCFG2_ACT_COMP_RT8201 | \
+	 MAC_PHYCFG2_QUAL_MASK_RT8201 | \
+	 MAC_PHYCFG2_QUAL_COMP_RT8201)
+#define MAC_EXT_RGMII_MODE		0x000005a8
+#define  MAC_RGMII_MODE_TX_ENABLE	 0x00000001
+#define  MAC_RGMII_MODE_TX_LOWPWR	 0x00000002
+#define  MAC_RGMII_MODE_TX_RESET	 0x00000004
+#define  MAC_RGMII_MODE_RX_INT_B	 0x00000100
+#define  MAC_RGMII_MODE_RX_QUALITY	 0x00000200
+#define  MAC_RGMII_MODE_RX_ACTIVITY	 0x00000400
+#define  MAC_RGMII_MODE_RX_ENG_DET	 0x00000800
+/* 0x5ac --> 0x5b0 unused */
+#define SERDES_RX_CTRL			0x000005b0	/* 5780/5714 only */
+#define  SERDES_RX_SIG_DETECT		 0x00000400
+#define SG_DIG_CTRL			0x000005b0
+#define  SG_DIG_USING_HW_AUTONEG	 0x80000000
+#define  SG_DIG_SOFT_RESET		 0x40000000
+#define  SG_DIG_DISABLE_LINKRDY		 0x20000000
+#define  SG_DIG_CRC16_CLEAR_N		 0x01000000
+#define  SG_DIG_EN10B			 0x00800000
+#define  SG_DIG_CLEAR_STATUS		 0x00400000
+#define  SG_DIG_LOCAL_DUPLEX_STATUS	 0x00200000
+#define  SG_DIG_LOCAL_LINK_STATUS	 0x00100000
+#define  SG_DIG_SPEED_STATUS_MASK	 0x000c0000
+#define  SG_DIG_SPEED_STATUS_SHIFT	 18
+#define  SG_DIG_JUMBO_PACKET_DISABLE	 0x00020000
+#define  SG_DIG_RESTART_AUTONEG		 0x00010000
+#define  SG_DIG_FIBER_MODE		 0x00008000
+#define  SG_DIG_REMOTE_FAULT_MASK	 0x00006000
+#define  SG_DIG_PAUSE_MASK		 0x00001800
+#define  SG_DIG_PAUSE_CAP		 0x00000800
+#define  SG_DIG_ASYM_PAUSE		 0x00001000
+#define  SG_DIG_GBIC_ENABLE		 0x00000400
+#define  SG_DIG_CHECK_END_ENABLE	 0x00000200
+#define  SG_DIG_SGMII_AUTONEG_TIMER	 0x00000100
+#define  SG_DIG_CLOCK_PHASE_SELECT	 0x00000080
+#define  SG_DIG_GMII_INPUT_SELECT	 0x00000040
+#define  SG_DIG_MRADV_CRC16_SELECT	 0x00000020
+#define  SG_DIG_COMMA_DETECT_ENABLE	 0x00000010
+#define  SG_DIG_AUTONEG_TIMER_REDUCE	 0x00000008
+#define  SG_DIG_AUTONEG_LOW_ENABLE	 0x00000004
+#define  SG_DIG_REMOTE_LOOPBACK		 0x00000002
+#define  SG_DIG_LOOPBACK		 0x00000001
+#define  SG_DIG_COMMON_SETUP (SG_DIG_CRC16_CLEAR_N | \
+			      SG_DIG_LOCAL_DUPLEX_STATUS | \
+			      SG_DIG_LOCAL_LINK_STATUS | \
+			      (0x2 << SG_DIG_SPEED_STATUS_SHIFT) | \
+			      SG_DIG_FIBER_MODE | SG_DIG_GBIC_ENABLE)
+#define SG_DIG_STATUS			0x000005b4
+#define  SG_DIG_CRC16_BUS_MASK		 0xffff0000
+#define  SG_DIG_PARTNER_FAULT_MASK	 0x00600000 /* If !MRADV_CRC16_SELECT */
+#define  SG_DIG_PARTNER_ASYM_PAUSE	 0x00100000 /* If !MRADV_CRC16_SELECT */
+#define  SG_DIG_PARTNER_PAUSE_CAPABLE	 0x00080000 /* If !MRADV_CRC16_SELECT */
+#define  SG_DIG_PARTNER_HALF_DUPLEX	 0x00040000 /* If !MRADV_CRC16_SELECT */
+#define  SG_DIG_PARTNER_FULL_DUPLEX	 0x00020000 /* If !MRADV_CRC16_SELECT */
+#define  SG_DIG_PARTNER_NEXT_PAGE	 0x00010000 /* If !MRADV_CRC16_SELECT */
+#define  SG_DIG_AUTONEG_STATE_MASK	 0x00000ff0
+#define  SG_DIG_IS_SERDES		 0x00000100
+#define  SG_DIG_COMMA_DETECTOR		 0x00000008
+#define  SG_DIG_MAC_ACK_STATUS		 0x00000004
+#define  SG_DIG_AUTONEG_COMPLETE	 0x00000002
+#define  SG_DIG_AUTONEG_ERROR		 0x00000001
+/* 0x5b8 --> 0x600 unused */
+#define MAC_TX_MAC_STATE_BASE		0x00000600 /* 16 bytes */
+#define MAC_RX_MAC_STATE_BASE		0x00000610 /* 20 bytes */
+/* 0x624 --> 0x670 unused */
+
+#define MAC_RSS_INDIR_TBL_0		0x00000630
+
+#define MAC_RSS_HASH_KEY_0		0x00000670
+#define MAC_RSS_HASH_KEY_1		0x00000674
+#define MAC_RSS_HASH_KEY_2		0x00000678
+#define MAC_RSS_HASH_KEY_3		0x0000067c
+#define MAC_RSS_HASH_KEY_4		0x00000680
+#define MAC_RSS_HASH_KEY_5		0x00000684
+#define MAC_RSS_HASH_KEY_6		0x00000688
+#define MAC_RSS_HASH_KEY_7		0x0000068c
+#define MAC_RSS_HASH_KEY_8		0x00000690
+#define MAC_RSS_HASH_KEY_9		0x00000694
+/* 0x698 --> 0x800 unused */
+
+#define MAC_TX_STATS_OCTETS		0x00000800
+#define MAC_TX_STATS_RESV1		0x00000804
+#define MAC_TX_STATS_COLLISIONS		0x00000808
+#define MAC_TX_STATS_XON_SENT		0x0000080c
+#define MAC_TX_STATS_XOFF_SENT		0x00000810
+#define MAC_TX_STATS_RESV2		0x00000814
+#define MAC_TX_STATS_MAC_ERRORS		0x00000818
+#define MAC_TX_STATS_SINGLE_COLLISIONS	0x0000081c
+#define MAC_TX_STATS_MULT_COLLISIONS	0x00000820
+#define MAC_TX_STATS_DEFERRED		0x00000824
+#define MAC_TX_STATS_RESV3		0x00000828
+#define MAC_TX_STATS_EXCESSIVE_COL	0x0000082c
+#define MAC_TX_STATS_LATE_COL		0x00000830
+#define MAC_TX_STATS_RESV4_1		0x00000834
+#define MAC_TX_STATS_RESV4_2		0x00000838
+#define MAC_TX_STATS_RESV4_3		0x0000083c
+#define MAC_TX_STATS_RESV4_4		0x00000840
+#define MAC_TX_STATS_RESV4_5		0x00000844
+#define MAC_TX_STATS_RESV4_6		0x00000848
+#define MAC_TX_STATS_RESV4_7		0x0000084c
+#define MAC_TX_STATS_RESV4_8		0x00000850
+#define MAC_TX_STATS_RESV4_9		0x00000854
+#define MAC_TX_STATS_RESV4_10		0x00000858
+#define MAC_TX_STATS_RESV4_11		0x0000085c
+#define MAC_TX_STATS_RESV4_12		0x00000860
+#define MAC_TX_STATS_RESV4_13		0x00000864
+#define MAC_TX_STATS_RESV4_14		0x00000868
+#define MAC_TX_STATS_UCAST		0x0000086c
+#define MAC_TX_STATS_MCAST		0x00000870
+#define MAC_TX_STATS_BCAST		0x00000874
+#define MAC_TX_STATS_RESV5_1		0x00000878
+#define MAC_TX_STATS_RESV5_2		0x0000087c
+#define MAC_RX_STATS_OCTETS		0x00000880
+#define MAC_RX_STATS_RESV1		0x00000884
+#define MAC_RX_STATS_FRAGMENTS		0x00000888
+#define MAC_RX_STATS_UCAST		0x0000088c
+#define MAC_RX_STATS_MCAST		0x00000890
+#define MAC_RX_STATS_BCAST		0x00000894
+#define MAC_RX_STATS_FCS_ERRORS		0x00000898
+#define MAC_RX_STATS_ALIGN_ERRORS	0x0000089c
+#define MAC_RX_STATS_XON_PAUSE_RECVD	0x000008a0
+#define MAC_RX_STATS_XOFF_PAUSE_RECVD	0x000008a4
+#define MAC_RX_STATS_MAC_CTRL_RECVD	0x000008a8
+#define MAC_RX_STATS_XOFF_ENTERED	0x000008ac
+#define MAC_RX_STATS_FRAME_TOO_LONG	0x000008b0
+#define MAC_RX_STATS_JABBERS		0x000008b4
+#define MAC_RX_STATS_UNDERSIZE		0x000008b8
+/* 0x8bc --> 0xc00 unused */
+
+/* Send data initiator control registers */
+#define SNDDATAI_MODE			0x00000c00
+#define  SNDDATAI_MODE_RESET		 0x00000001
+#define  SNDDATAI_MODE_ENABLE		 0x00000002
+#define  SNDDATAI_MODE_STAT_OFLOW_ENAB	 0x00000004
+#define SNDDATAI_STATUS			0x00000c04
+#define  SNDDATAI_STATUS_STAT_OFLOW	 0x00000004
+#define SNDDATAI_STATSCTRL		0x00000c08
+#define  SNDDATAI_SCTRL_ENABLE		 0x00000001
+#define  SNDDATAI_SCTRL_FASTUPD		 0x00000002
+#define  SNDDATAI_SCTRL_CLEAR		 0x00000004
+#define  SNDDATAI_SCTRL_FLUSH		 0x00000008
+#define  SNDDATAI_SCTRL_FORCE_ZERO	 0x00000010
+#define SNDDATAI_STATSENAB		0x00000c0c
+#define SNDDATAI_STATSINCMASK		0x00000c10
+#define ISO_PKT_TX			0x00000c20
+/* 0xc24 --> 0xc80 unused */
+#define SNDDATAI_COS_CNT_0		0x00000c80
+#define SNDDATAI_COS_CNT_1		0x00000c84
+#define SNDDATAI_COS_CNT_2		0x00000c88
+#define SNDDATAI_COS_CNT_3		0x00000c8c
+#define SNDDATAI_COS_CNT_4		0x00000c90
+#define SNDDATAI_COS_CNT_5		0x00000c94
+#define SNDDATAI_COS_CNT_6		0x00000c98
+#define SNDDATAI_COS_CNT_7		0x00000c9c
+#define SNDDATAI_COS_CNT_8		0x00000ca0
+#define SNDDATAI_COS_CNT_9		0x00000ca4
+#define SNDDATAI_COS_CNT_10		0x00000ca8
+#define SNDDATAI_COS_CNT_11		0x00000cac
+#define SNDDATAI_COS_CNT_12		0x00000cb0
+#define SNDDATAI_COS_CNT_13		0x00000cb4
+#define SNDDATAI_COS_CNT_14		0x00000cb8
+#define SNDDATAI_COS_CNT_15		0x00000cbc
+#define SNDDATAI_DMA_RDQ_FULL_CNT	0x00000cc0
+#define SNDDATAI_DMA_PRIO_RDQ_FULL_CNT	0x00000cc4
+#define SNDDATAI_SDCQ_FULL_CNT		0x00000cc8
+#define SNDDATAI_NICRNG_SSND_PIDX_CNT	0x00000ccc
+#define SNDDATAI_STATS_UPDATED_CNT	0x00000cd0
+#define SNDDATAI_INTERRUPTS_CNT		0x00000cd4
+#define SNDDATAI_AVOID_INTERRUPTS_CNT	0x00000cd8
+#define SNDDATAI_SND_THRESH_HIT_CNT	0x00000cdc
+/* 0xce0 --> 0x1000 unused */
+
+/* Send data completion control registers */
+#define SNDDATAC_MODE			0x00001000
+#define  SNDDATAC_MODE_RESET		 0x00000001
+#define  SNDDATAC_MODE_ENABLE		 0x00000002
+#define  SNDDATAC_MODE_CDELAY		 0x00000010
+/* 0x1004 --> 0x1400 unused */
+
+/* Send BD ring selector */
+#define SNDBDS_MODE			0x00001400
+#define  SNDBDS_MODE_RESET		 0x00000001
+#define  SNDBDS_MODE_ENABLE		 0x00000002
+#define  SNDBDS_MODE_ATTN_ENABLE	 0x00000004
+#define SNDBDS_STATUS			0x00001404
+#define  SNDBDS_STATUS_ERROR_ATTN	 0x00000004
+#define SNDBDS_HWDIAG			0x00001408
+/* 0x140c --> 0x1440 */
+#define SNDBDS_SEL_CON_IDX_0		0x00001440
+#define SNDBDS_SEL_CON_IDX_1		0x00001444
+#define SNDBDS_SEL_CON_IDX_2		0x00001448
+#define SNDBDS_SEL_CON_IDX_3		0x0000144c
+#define SNDBDS_SEL_CON_IDX_4		0x00001450
+#define SNDBDS_SEL_CON_IDX_5		0x00001454
+#define SNDBDS_SEL_CON_IDX_6		0x00001458
+#define SNDBDS_SEL_CON_IDX_7		0x0000145c
+#define SNDBDS_SEL_CON_IDX_8		0x00001460
+#define SNDBDS_SEL_CON_IDX_9		0x00001464
+#define SNDBDS_SEL_CON_IDX_10		0x00001468
+#define SNDBDS_SEL_CON_IDX_11		0x0000146c
+#define SNDBDS_SEL_CON_IDX_12		0x00001470
+#define SNDBDS_SEL_CON_IDX_13		0x00001474
+#define SNDBDS_SEL_CON_IDX_14		0x00001478
+#define SNDBDS_SEL_CON_IDX_15		0x0000147c
+/* 0x1480 --> 0x1800 unused */
+
+/* Send BD initiator control registers */
+#define SNDBDI_MODE			0x00001800
+#define  SNDBDI_MODE_RESET		 0x00000001
+#define  SNDBDI_MODE_ENABLE		 0x00000002
+#define  SNDBDI_MODE_ATTN_ENABLE	 0x00000004
+#define  SNDBDI_MODE_MULTI_TXQ_EN	 0x00000020
+#define SNDBDI_STATUS			0x00001804
+#define  SNDBDI_STATUS_ERROR_ATTN	 0x00000004
+#define SNDBDI_IN_PROD_IDX_0		0x00001808
+#define SNDBDI_IN_PROD_IDX_1		0x0000180c
+#define SNDBDI_IN_PROD_IDX_2		0x00001810
+#define SNDBDI_IN_PROD_IDX_3		0x00001814
+#define SNDBDI_IN_PROD_IDX_4		0x00001818
+#define SNDBDI_IN_PROD_IDX_5		0x0000181c
+#define SNDBDI_IN_PROD_IDX_6		0x00001820
+#define SNDBDI_IN_PROD_IDX_7		0x00001824
+#define SNDBDI_IN_PROD_IDX_8		0x00001828
+#define SNDBDI_IN_PROD_IDX_9		0x0000182c
+#define SNDBDI_IN_PROD_IDX_10		0x00001830
+#define SNDBDI_IN_PROD_IDX_11		0x00001834
+#define SNDBDI_IN_PROD_IDX_12		0x00001838
+#define SNDBDI_IN_PROD_IDX_13		0x0000183c
+#define SNDBDI_IN_PROD_IDX_14		0x00001840
+#define SNDBDI_IN_PROD_IDX_15		0x00001844
+/* 0x1848 --> 0x1c00 unused */
+
+/* Send BD completion control registers */
+#define SNDBDC_MODE			0x00001c00
+#define SNDBDC_MODE_RESET		 0x00000001
+#define SNDBDC_MODE_ENABLE		 0x00000002
+#define SNDBDC_MODE_ATTN_ENABLE		 0x00000004
+/* 0x1c04 --> 0x2000 unused */
+
+/* Receive list placement control registers */
+#define RCVLPC_MODE			0x00002000
+#define  RCVLPC_MODE_RESET		 0x00000001
+#define  RCVLPC_MODE_ENABLE		 0x00000002
+#define  RCVLPC_MODE_CLASS0_ATTN_ENAB	 0x00000004
+#define  RCVLPC_MODE_MAPOOR_AATTN_ENAB	 0x00000008
+#define  RCVLPC_MODE_STAT_OFLOW_ENAB	 0x00000010
+#define RCVLPC_STATUS			0x00002004
+#define  RCVLPC_STATUS_CLASS0		 0x00000004
+#define  RCVLPC_STATUS_MAPOOR		 0x00000008
+#define  RCVLPC_STATUS_STAT_OFLOW	 0x00000010
+#define RCVLPC_LOCK			0x00002008
+#define  RCVLPC_LOCK_REQ_MASK		 0x0000ffff
+#define  RCVLPC_LOCK_REQ_SHIFT		 0
+#define  RCVLPC_LOCK_GRANT_MASK		 0xffff0000
+#define  RCVLPC_LOCK_GRANT_SHIFT	 16
+#define RCVLPC_NON_EMPTY_BITS		0x0000200c
+#define  RCVLPC_NON_EMPTY_BITS_MASK	 0x0000ffff
+#define RCVLPC_CONFIG			0x00002010
+#define RCVLPC_STATSCTRL		0x00002014
+#define  RCVLPC_STATSCTRL_ENABLE	 0x00000001
+#define  RCVLPC_STATSCTRL_FASTUPD	 0x00000002
+#define RCVLPC_STATS_ENABLE		0x00002018
+#define  RCVLPC_STATSENAB_ASF_FIX	 0x00000002
+#define  RCVLPC_STATSENAB_DACK_FIX	 0x00040000
+#define  RCVLPC_STATSENAB_LNGBRST_RFIX	 0x00400000
+#define RCVLPC_STATS_INCMASK		0x0000201c
+/* 0x2020 --> 0x2100 unused */
+#define RCVLPC_SELLST_BASE		0x00002100 /* 16 16-byte entries */
+#define  SELLST_TAIL			0x00000004
+#define  SELLST_CONT			0x00000008
+#define  SELLST_UNUSED			0x0000000c
+#define RCVLPC_COS_CNTL_BASE		0x00002200 /* 16 4-byte entries */
+#define RCVLPC_DROP_FILTER_CNT		0x00002240
+#define RCVLPC_DMA_WQ_FULL_CNT		0x00002244
+#define RCVLPC_DMA_HIPRIO_WQ_FULL_CNT	0x00002248
+#define RCVLPC_NO_RCV_BD_CNT		0x0000224c
+#define RCVLPC_IN_DISCARDS_CNT		0x00002250
+#define RCVLPC_IN_ERRORS_CNT		0x00002254
+#define RCVLPC_RCV_THRESH_HIT_CNT	0x00002258
+/* 0x225c --> 0x2400 unused */
+
+/* Receive Data and Receive BD Initiator Control */
+#define RCVDBDI_MODE			0x00002400
+#define  RCVDBDI_MODE_RESET		 0x00000001
+#define  RCVDBDI_MODE_ENABLE		 0x00000002
+#define  RCVDBDI_MODE_JUMBOBD_NEEDED	 0x00000004
+#define  RCVDBDI_MODE_FRM_TOO_BIG	 0x00000008
+#define  RCVDBDI_MODE_INV_RING_SZ	 0x00000010
+#define  RCVDBDI_MODE_LRG_RING_SZ	 0x00010000
+#define RCVDBDI_STATUS			0x00002404
+#define  RCVDBDI_STATUS_JUMBOBD_NEEDED	 0x00000004
+#define  RCVDBDI_STATUS_FRM_TOO_BIG	 0x00000008
+#define  RCVDBDI_STATUS_INV_RING_SZ	 0x00000010
+#define RCVDBDI_SPLIT_FRAME_MINSZ	0x00002408
+/* 0x240c --> 0x2440 unused */
+#define RCVDBDI_JUMBO_BD		0x00002440 /* TG3_BDINFO_... */
+#define RCVDBDI_STD_BD			0x00002450 /* TG3_BDINFO_... */
+#define RCVDBDI_MINI_BD			0x00002460 /* TG3_BDINFO_... */
+#define RCVDBDI_JUMBO_CON_IDX		0x00002470
+#define RCVDBDI_STD_CON_IDX		0x00002474
+#define RCVDBDI_MINI_CON_IDX		0x00002478
+/* 0x247c --> 0x2480 unused */
+#define RCVDBDI_BD_PROD_IDX_0		0x00002480
+#define RCVDBDI_BD_PROD_IDX_1		0x00002484
+#define RCVDBDI_BD_PROD_IDX_2		0x00002488
+#define RCVDBDI_BD_PROD_IDX_3		0x0000248c
+#define RCVDBDI_BD_PROD_IDX_4		0x00002490
+#define RCVDBDI_BD_PROD_IDX_5		0x00002494
+#define RCVDBDI_BD_PROD_IDX_6		0x00002498
+#define RCVDBDI_BD_PROD_IDX_7		0x0000249c
+#define RCVDBDI_BD_PROD_IDX_8		0x000024a0
+#define RCVDBDI_BD_PROD_IDX_9		0x000024a4
+#define RCVDBDI_BD_PROD_IDX_10		0x000024a8
+#define RCVDBDI_BD_PROD_IDX_11		0x000024ac
+#define RCVDBDI_BD_PROD_IDX_12		0x000024b0
+#define RCVDBDI_BD_PROD_IDX_13		0x000024b4
+#define RCVDBDI_BD_PROD_IDX_14		0x000024b8
+#define RCVDBDI_BD_PROD_IDX_15		0x000024bc
+#define RCVDBDI_HWDIAG			0x000024c0
+/* 0x24c4 --> 0x2800 unused */
+
+/* Receive Data Completion Control */
+#define RCVDCC_MODE			0x00002800
+#define  RCVDCC_MODE_RESET		 0x00000001
+#define  RCVDCC_MODE_ENABLE		 0x00000002
+#define  RCVDCC_MODE_ATTN_ENABLE	 0x00000004
+/* 0x2804 --> 0x2c00 unused */
+
+/* Receive BD Initiator Control Registers */
+#define RCVBDI_MODE			0x00002c00
+#define  RCVBDI_MODE_RESET		 0x00000001
+#define  RCVBDI_MODE_ENABLE		 0x00000002
+#define  RCVBDI_MODE_RCB_ATTN_ENAB	 0x00000004
+#define RCVBDI_STATUS			0x00002c04
+#define  RCVBDI_STATUS_RCB_ATTN		 0x00000004
+#define RCVBDI_JUMBO_PROD_IDX		0x00002c08
+#define RCVBDI_STD_PROD_IDX		0x00002c0c
+#define RCVBDI_MINI_PROD_IDX		0x00002c10
+#define RCVBDI_MINI_THRESH		0x00002c14
+#define RCVBDI_STD_THRESH		0x00002c18
+#define RCVBDI_JUMBO_THRESH		0x00002c1c
+/* 0x2c20 --> 0x2d00 unused */
+
+#define STD_REPLENISH_LWM		0x00002d00
+#define JMB_REPLENISH_LWM		0x00002d04
+/* 0x2d08 --> 0x3000 unused */
+
+/* Receive BD Completion Control Registers */
+#define RCVCC_MODE			0x00003000
+#define  RCVCC_MODE_RESET		 0x00000001
+#define  RCVCC_MODE_ENABLE		 0x00000002
+#define  RCVCC_MODE_ATTN_ENABLE		 0x00000004
+#define RCVCC_STATUS			0x00003004
+#define  RCVCC_STATUS_ERROR_ATTN	 0x00000004
+#define RCVCC_JUMP_PROD_IDX		0x00003008
+#define RCVCC_STD_PROD_IDX		0x0000300c
+#define RCVCC_MINI_PROD_IDX		0x00003010
+/* 0x3014 --> 0x3400 unused */
+
+/* Receive list selector control registers */
+#define RCVLSC_MODE			0x00003400
+#define  RCVLSC_MODE_RESET		 0x00000001
+#define  RCVLSC_MODE_ENABLE		 0x00000002
+#define  RCVLSC_MODE_ATTN_ENABLE	 0x00000004
+#define RCVLSC_STATUS			0x00003404
+#define  RCVLSC_STATUS_ERROR_ATTN	 0x00000004
+/* 0x3408 --> 0x3600 unused */
+
+#define TG3_CPMU_DRV_STATUS		0x0000344c
+
+/* CPMU registers */
+#define TG3_CPMU_CTRL			0x00003600
+#define  CPMU_CTRL_LINK_IDLE_MODE	 0x00000200
+#define  CPMU_CTRL_LINK_AWARE_MODE	 0x00000400
+#define  CPMU_CTRL_LINK_SPEED_MODE	 0x00004000
+#define  CPMU_CTRL_GPHY_10MB_RXONLY	 0x00010000
+#define TG3_CPMU_LSPD_10MB_CLK		0x00003604
+#define  CPMU_LSPD_10MB_MACCLK_MASK	 0x001f0000
+#define  CPMU_LSPD_10MB_MACCLK_6_25	 0x00130000
+/* 0x3608 --> 0x360c unused */
+
+#define TG3_CPMU_LSPD_1000MB_CLK	0x0000360c
+#define  CPMU_LSPD_1000MB_MACCLK_62_5	 0x00000000
+#define  CPMU_LSPD_1000MB_MACCLK_12_5	 0x00110000
+#define  CPMU_LSPD_1000MB_MACCLK_MASK	 0x001f0000
+#define TG3_CPMU_LNK_AWARE_PWRMD	0x00003610
+#define  CPMU_LNK_AWARE_MACCLK_MASK	 0x001f0000
+#define  CPMU_LNK_AWARE_MACCLK_6_25	 0x00130000
+/* 0x3614 --> 0x361c unused */
+
+#define TG3_CPMU_HST_ACC		0x0000361c
+#define  CPMU_HST_ACC_MACCLK_MASK	 0x001f0000
+#define  CPMU_HST_ACC_MACCLK_6_25	 0x00130000
+/* 0x3620 --> 0x3630 unused */
+
+#define TG3_CPMU_CLCK_ORIDE		0x00003624
+#define  CPMU_CLCK_ORIDE_MAC_ORIDE_EN	 0x80000000
+
+#define TG3_CPMU_CLCK_STAT		0x00003630
+#define  CPMU_CLCK_STAT_MAC_CLCK_MASK	 0x001f0000
+#define  CPMU_CLCK_STAT_MAC_CLCK_62_5	 0x00000000
+#define  CPMU_CLCK_STAT_MAC_CLCK_12_5	 0x00110000
+#define  CPMU_CLCK_STAT_MAC_CLCK_6_25	 0x00130000
+/* 0x3634 --> 0x365c unused */
+
+#define TG3_CPMU_MUTEX_REQ		0x0000365c
+#define  CPMU_MUTEX_REQ_DRIVER		 0x00001000
+#define TG3_CPMU_MUTEX_GNT		0x00003660
+#define  CPMU_MUTEX_GNT_DRIVER		 0x00001000
+#define TG3_CPMU_PHY_STRAP		0x00003664
+#define TG3_CPMU_PHY_STRAP_IS_SERDES	 0x00000020
+/* 0x3664 --> 0x36b0 unused */
+
+#define TG3_CPMU_EEE_MODE		0x000036b0
+#define  TG3_CPMU_EEEMD_APE_TX_DET_EN	 0x00000004
+#define  TG3_CPMU_EEEMD_ERLY_L1_XIT_DET	 0x00000008
+#define  TG3_CPMU_EEEMD_SND_IDX_DET_EN	 0x00000040
+#define  TG3_CPMU_EEEMD_LPI_ENABLE	 0x00000080
+#define  TG3_CPMU_EEEMD_LPI_IN_TX	 0x00000100
+#define  TG3_CPMU_EEEMD_LPI_IN_RX	 0x00000200
+#define  TG3_CPMU_EEEMD_EEE_ENABLE	 0x00100000
+#define TG3_CPMU_EEE_DBTMR1		0x000036b4
+#define  TG3_CPMU_DBTMR1_PCIEXIT_2047US	 0x07ff0000
+#define  TG3_CPMU_DBTMR1_LNKIDLE_2047US	 0x000007ff
+#define TG3_CPMU_EEE_DBTMR2		0x000036b8
+#define  TG3_CPMU_DBTMR2_APE_TX_2047US	 0x07ff0000
+#define  TG3_CPMU_DBTMR2_TXIDXEQ_2047US	 0x000007ff
+#define TG3_CPMU_EEE_LNKIDL_CTRL	0x000036bc
+#define  TG3_CPMU_EEE_LNKIDL_PCIE_NL0	 0x01000000
+#define  TG3_CPMU_EEE_LNKIDL_UART_IDL	 0x00000004
+/* 0x36c0 --> 0x36d0 unused */
+
+#define TG3_CPMU_EEE_CTRL		0x000036d0
+#define TG3_CPMU_EEE_CTRL_EXIT_16_5_US	 0x0000019d
+#define TG3_CPMU_EEE_CTRL_EXIT_36_US	 0x00000384
+#define TG3_CPMU_EEE_CTRL_EXIT_20_1_US	 0x000001f8
+/* 0x36d4 --> 0x3800 unused */
+
+/* Mbuf cluster free registers */
+#define MBFREE_MODE			0x00003800
+#define  MBFREE_MODE_RESET		 0x00000001
+#define  MBFREE_MODE_ENABLE		 0x00000002
+#define MBFREE_STATUS			0x00003804
+/* 0x3808 --> 0x3c00 unused */
+
+/* Host coalescing control registers */
+#define HOSTCC_MODE			0x00003c00
+#define  HOSTCC_MODE_RESET		 0x00000001
+#define  HOSTCC_MODE_ENABLE		 0x00000002
+#define  HOSTCC_MODE_ATTN		 0x00000004
+#define  HOSTCC_MODE_NOW		 0x00000008
+#define  HOSTCC_MODE_FULL_STATUS	 0x00000000
+#define  HOSTCC_MODE_64BYTE		 0x00000080
+#define  HOSTCC_MODE_32BYTE		 0x00000100
+#define  HOSTCC_MODE_CLRTICK_RXBD	 0x00000200
+#define  HOSTCC_MODE_CLRTICK_TXBD	 0x00000400
+#define  HOSTCC_MODE_NOINT_ON_NOW	 0x00000800
+#define  HOSTCC_MODE_NOINT_ON_FORCE	 0x00001000
+#define  HOSTCC_MODE_COAL_VEC1_NOW	 0x00002000
+#define HOSTCC_STATUS			0x00003c04
+#define  HOSTCC_STATUS_ERROR_ATTN	 0x00000004
+#define HOSTCC_RXCOL_TICKS		0x00003c08
+#define  LOW_RXCOL_TICKS		 0x00000032
+#define  LOW_RXCOL_TICKS_CLRTCKS	 0x00000014
+#define  DEFAULT_RXCOL_TICKS		 0x00000048
+#define  HIGH_RXCOL_TICKS		 0x00000096
+#define  MAX_RXCOL_TICKS		 0x000003ff
+#define HOSTCC_TXCOL_TICKS		0x00003c0c
+#define  LOW_TXCOL_TICKS		 0x00000096
+#define  LOW_TXCOL_TICKS_CLRTCKS	 0x00000048
+#define  DEFAULT_TXCOL_TICKS		 0x0000012c
+#define  HIGH_TXCOL_TICKS		 0x00000145
+#define  MAX_TXCOL_TICKS		 0x000003ff
+#define HOSTCC_RXMAX_FRAMES		0x00003c10
+#define  LOW_RXMAX_FRAMES		 0x00000005
+#define  DEFAULT_RXMAX_FRAMES		 0x00000008
+#define  HIGH_RXMAX_FRAMES		 0x00000012
+#define  MAX_RXMAX_FRAMES		 0x000000ff
+#define HOSTCC_TXMAX_FRAMES		0x00003c14
+#define  LOW_TXMAX_FRAMES		 0x00000035
+#define  DEFAULT_TXMAX_FRAMES		 0x0000004b
+#define  HIGH_TXMAX_FRAMES		 0x00000052
+#define  MAX_TXMAX_FRAMES		 0x000000ff
+#define HOSTCC_RXCOAL_TICK_INT		0x00003c18
+#define  DEFAULT_RXCOAL_TICK_INT	 0x00000019
+#define  DEFAULT_RXCOAL_TICK_INT_CLRTCKS 0x00000014
+#define  MAX_RXCOAL_TICK_INT		 0x000003ff
+#define HOSTCC_TXCOAL_TICK_INT		0x00003c1c
+#define  DEFAULT_TXCOAL_TICK_INT	 0x00000019
+#define  DEFAULT_TXCOAL_TICK_INT_CLRTCKS 0x00000014
+#define  MAX_TXCOAL_TICK_INT		 0x000003ff
+#define HOSTCC_RXCOAL_MAXF_INT		0x00003c20
+#define  DEFAULT_RXCOAL_MAXF_INT	 0x00000005
+#define  MAX_RXCOAL_MAXF_INT		 0x000000ff
+#define HOSTCC_TXCOAL_MAXF_INT		0x00003c24
+#define  DEFAULT_TXCOAL_MAXF_INT	 0x00000005
+#define  MAX_TXCOAL_MAXF_INT		 0x000000ff
+#define HOSTCC_STAT_COAL_TICKS		0x00003c28
+#define  DEFAULT_STAT_COAL_TICKS	 0x000f4240
+#define  MAX_STAT_COAL_TICKS		 0xd693d400
+#define  MIN_STAT_COAL_TICKS		 0x00000064
+/* 0x3c2c --> 0x3c30 unused */
+#define HOSTCC_STATS_BLK_HOST_ADDR	0x00003c30 /* 64-bit */
+#define HOSTCC_STATUS_BLK_HOST_ADDR	0x00003c38 /* 64-bit */
+#define HOSTCC_STATS_BLK_NIC_ADDR	0x00003c40
+#define HOSTCC_STATUS_BLK_NIC_ADDR	0x00003c44
+#define HOSTCC_FLOW_ATTN		0x00003c48
+#define HOSTCC_FLOW_ATTN_MBUF_LWM	 0x00000040
+/* 0x3c4c --> 0x3c50 unused */
+#define HOSTCC_JUMBO_CON_IDX		0x00003c50
+#define HOSTCC_STD_CON_IDX		0x00003c54
+#define HOSTCC_MINI_CON_IDX		0x00003c58
+/* 0x3c5c --> 0x3c80 unused */
+#define HOSTCC_RET_PROD_IDX_0		0x00003c80
+#define HOSTCC_RET_PROD_IDX_1		0x00003c84
+#define HOSTCC_RET_PROD_IDX_2		0x00003c88
+#define HOSTCC_RET_PROD_IDX_3		0x00003c8c
+#define HOSTCC_RET_PROD_IDX_4		0x00003c90
+#define HOSTCC_RET_PROD_IDX_5		0x00003c94
+#define HOSTCC_RET_PROD_IDX_6		0x00003c98
+#define HOSTCC_RET_PROD_IDX_7		0x00003c9c
+#define HOSTCC_RET_PROD_IDX_8		0x00003ca0
+#define HOSTCC_RET_PROD_IDX_9		0x00003ca4
+#define HOSTCC_RET_PROD_IDX_10		0x00003ca8
+#define HOSTCC_RET_PROD_IDX_11		0x00003cac
+#define HOSTCC_RET_PROD_IDX_12		0x00003cb0
+#define HOSTCC_RET_PROD_IDX_13		0x00003cb4
+#define HOSTCC_RET_PROD_IDX_14		0x00003cb8
+#define HOSTCC_RET_PROD_IDX_15		0x00003cbc
+#define HOSTCC_SND_CON_IDX_0		0x00003cc0
+#define HOSTCC_SND_CON_IDX_1		0x00003cc4
+#define HOSTCC_SND_CON_IDX_2		0x00003cc8
+#define HOSTCC_SND_CON_IDX_3		0x00003ccc
+#define HOSTCC_SND_CON_IDX_4		0x00003cd0
+#define HOSTCC_SND_CON_IDX_5		0x00003cd4
+#define HOSTCC_SND_CON_IDX_6		0x00003cd8
+#define HOSTCC_SND_CON_IDX_7		0x00003cdc
+#define HOSTCC_SND_CON_IDX_8		0x00003ce0
+#define HOSTCC_SND_CON_IDX_9		0x00003ce4
+#define HOSTCC_SND_CON_IDX_10		0x00003ce8
+#define HOSTCC_SND_CON_IDX_11		0x00003cec
+#define HOSTCC_SND_CON_IDX_12		0x00003cf0
+#define HOSTCC_SND_CON_IDX_13		0x00003cf4
+#define HOSTCC_SND_CON_IDX_14		0x00003cf8
+#define HOSTCC_SND_CON_IDX_15		0x00003cfc
+#define HOSTCC_STATBLCK_RING1		0x00003d00
+/* 0x3d00 --> 0x3d80 unused */
+
+#define HOSTCC_RXCOL_TICKS_VEC1		0x00003d80
+#define HOSTCC_TXCOL_TICKS_VEC1		0x00003d84
+#define HOSTCC_RXMAX_FRAMES_VEC1	0x00003d88
+#define HOSTCC_TXMAX_FRAMES_VEC1	0x00003d8c
+#define HOSTCC_RXCOAL_MAXF_INT_VEC1	0x00003d90
+#define HOSTCC_TXCOAL_MAXF_INT_VEC1	0x00003d94
+/* 0x3d98 --> 0x4000 unused */
+
+/* Memory arbiter control registers */
+#define MEMARB_MODE			0x00004000
+#define  MEMARB_MODE_RESET		 0x00000001
+#define  MEMARB_MODE_ENABLE		 0x00000002
+#define MEMARB_STATUS			0x00004004
+#define MEMARB_TRAP_ADDR_LOW		0x00004008
+#define MEMARB_TRAP_ADDR_HIGH		0x0000400c
+/* 0x4010 --> 0x4400 unused */
+
+/* Buffer manager control registers */
+#define BUFMGR_MODE			0x00004400
+#define  BUFMGR_MODE_RESET		 0x00000001
+#define  BUFMGR_MODE_ENABLE		 0x00000002
+#define  BUFMGR_MODE_ATTN_ENABLE	 0x00000004
+#define  BUFMGR_MODE_BM_TEST		 0x00000008
+#define  BUFMGR_MODE_MBLOW_ATTN_ENAB	 0x00000010
+#define  BUFMGR_MODE_NO_TX_UNDERRUN	 0x80000000
+#define BUFMGR_STATUS			0x00004404
+#define  BUFMGR_STATUS_ERROR		 0x00000004
+#define  BUFMGR_STATUS_MBLOW		 0x00000010
+#define BUFMGR_MB_POOL_ADDR		0x00004408
+#define BUFMGR_MB_POOL_SIZE		0x0000440c
+#define BUFMGR_MB_RDMA_LOW_WATER	0x00004410
+#define  DEFAULT_MB_RDMA_LOW_WATER	 0x00000050
+#define  DEFAULT_MB_RDMA_LOW_WATER_5705	 0x00000000
+#define  DEFAULT_MB_RDMA_LOW_WATER_JUMBO 0x00000130
+#define  DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780 0x00000000
+#define BUFMGR_MB_MACRX_LOW_WATER	0x00004414
+#define  DEFAULT_MB_MACRX_LOW_WATER	  0x00000020
+#define  DEFAULT_MB_MACRX_LOW_WATER_5705  0x00000010
+#define  DEFAULT_MB_MACRX_LOW_WATER_5906  0x00000004
+#define  DEFAULT_MB_MACRX_LOW_WATER_57765 0x0000002a
+#define  DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098
+#define  DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b
+#define  DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765 0x0000007e
+#define BUFMGR_MB_HIGH_WATER		0x00004418
+#define  DEFAULT_MB_HIGH_WATER		 0x00000060
+#define  DEFAULT_MB_HIGH_WATER_5705	 0x00000060
+#define  DEFAULT_MB_HIGH_WATER_5906	 0x00000010
+#define  DEFAULT_MB_HIGH_WATER_57765	 0x000000a0
+#define  DEFAULT_MB_HIGH_WATER_JUMBO	 0x0000017c
+#define  DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096
+#define  DEFAULT_MB_HIGH_WATER_JUMBO_57765 0x000000ea
+#define BUFMGR_RX_MB_ALLOC_REQ		0x0000441c
+#define  BUFMGR_MB_ALLOC_BIT		 0x10000000
+#define BUFMGR_RX_MB_ALLOC_RESP		0x00004420
+#define BUFMGR_TX_MB_ALLOC_REQ		0x00004424
+#define BUFMGR_TX_MB_ALLOC_RESP		0x00004428
+#define BUFMGR_DMA_DESC_POOL_ADDR	0x0000442c
+#define BUFMGR_DMA_DESC_POOL_SIZE	0x00004430
+#define BUFMGR_DMA_LOW_WATER		0x00004434
+#define  DEFAULT_DMA_LOW_WATER		 0x00000005
+#define BUFMGR_DMA_HIGH_WATER		0x00004438
+#define  DEFAULT_DMA_HIGH_WATER		 0x0000000a
+#define BUFMGR_RX_DMA_ALLOC_REQ		0x0000443c
+#define BUFMGR_RX_DMA_ALLOC_RESP	0x00004440
+#define BUFMGR_TX_DMA_ALLOC_REQ		0x00004444
+#define BUFMGR_TX_DMA_ALLOC_RESP	0x00004448
+#define BUFMGR_HWDIAG_0			0x0000444c
+#define BUFMGR_HWDIAG_1			0x00004450
+#define BUFMGR_HWDIAG_2			0x00004454
+/* 0x4458 --> 0x4800 unused */
+
+/* Read DMA control registers */
+#define RDMAC_MODE			0x00004800
+#define  RDMAC_MODE_RESET		 0x00000001
+#define  RDMAC_MODE_ENABLE		 0x00000002
+#define  RDMAC_MODE_TGTABORT_ENAB	 0x00000004
+#define  RDMAC_MODE_MSTABORT_ENAB	 0x00000008
+#define  RDMAC_MODE_PARITYERR_ENAB	 0x00000010
+#define  RDMAC_MODE_ADDROFLOW_ENAB	 0x00000020
+#define  RDMAC_MODE_FIFOOFLOW_ENAB	 0x00000040
+#define  RDMAC_MODE_FIFOURUN_ENAB	 0x00000080
+#define  RDMAC_MODE_FIFOOREAD_ENAB	 0x00000100
+#define  RDMAC_MODE_LNGREAD_ENAB	 0x00000200
+#define  RDMAC_MODE_SPLIT_ENABLE	 0x00000800
+#define  RDMAC_MODE_BD_SBD_CRPT_ENAB	 0x00000800
+#define  RDMAC_MODE_SPLIT_RESET		 0x00001000
+#define  RDMAC_MODE_MBUF_RBD_CRPT_ENAB	 0x00001000
+#define  RDMAC_MODE_MBUF_SBD_CRPT_ENAB	 0x00002000
+#define  RDMAC_MODE_FIFO_SIZE_128	 0x00020000
+#define  RDMAC_MODE_FIFO_LONG_BURST	 0x00030000
+#define  RDMAC_MODE_MULT_DMA_RD_DIS	 0x01000000
+#define  RDMAC_MODE_IPV4_LSO_EN		 0x08000000
+#define  RDMAC_MODE_IPV6_LSO_EN		 0x10000000
+#define  RDMAC_MODE_H2BNC_VLAN_DET	 0x20000000
+#define RDMAC_STATUS			0x00004804
+#define  RDMAC_STATUS_TGTABORT		 0x00000004
+#define  RDMAC_STATUS_MSTABORT		 0x00000008
+#define  RDMAC_STATUS_PARITYERR		 0x00000010
+#define  RDMAC_STATUS_ADDROFLOW		 0x00000020
+#define  RDMAC_STATUS_FIFOOFLOW		 0x00000040
+#define  RDMAC_STATUS_FIFOURUN		 0x00000080
+#define  RDMAC_STATUS_FIFOOREAD		 0x00000100
+#define  RDMAC_STATUS_LNGREAD		 0x00000200
+/* 0x4808 --> 0x4900 unused */
+
+#define TG3_RDMA_RSRVCTRL_REG		0x00004900
+#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX	 0x00000004
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K	 0x00000c00
+#define TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK	 0x00000ff0
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K	 0x000c0000
+#define TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK	 0x000ff000
+#define TG3_RDMA_RSRVCTRL_TXMRGN_320B	 0x28000000
+#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK	 0xffe00000
+/* 0x4904 --> 0x4910 unused */
+
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL	0x00004910
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K	 0x00030000
+#define TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K	 0x000c0000
+/* 0x4914 --> 0x4c00 unused */
+
+/* Write DMA control registers */
+#define WDMAC_MODE			0x00004c00
+#define  WDMAC_MODE_RESET		 0x00000001
+#define  WDMAC_MODE_ENABLE		 0x00000002
+#define  WDMAC_MODE_TGTABORT_ENAB	 0x00000004
+#define  WDMAC_MODE_MSTABORT_ENAB	 0x00000008
+#define  WDMAC_MODE_PARITYERR_ENAB	 0x00000010
+#define  WDMAC_MODE_ADDROFLOW_ENAB	 0x00000020
+#define  WDMAC_MODE_FIFOOFLOW_ENAB	 0x00000040
+#define  WDMAC_MODE_FIFOURUN_ENAB	 0x00000080
+#define  WDMAC_MODE_FIFOOREAD_ENAB	 0x00000100
+#define  WDMAC_MODE_LNGREAD_ENAB	 0x00000200
+#define  WDMAC_MODE_RX_ACCEL		 0x00000400
+#define  WDMAC_MODE_STATUS_TAG_FIX	 0x20000000
+#define  WDMAC_MODE_BURST_ALL_DATA	 0xc0000000
+#define WDMAC_STATUS			0x00004c04
+#define  WDMAC_STATUS_TGTABORT		 0x00000004
+#define  WDMAC_STATUS_MSTABORT		 0x00000008
+#define  WDMAC_STATUS_PARITYERR		 0x00000010
+#define  WDMAC_STATUS_ADDROFLOW		 0x00000020
+#define  WDMAC_STATUS_FIFOOFLOW		 0x00000040
+#define  WDMAC_STATUS_FIFOURUN		 0x00000080
+#define  WDMAC_STATUS_FIFOOREAD		 0x00000100
+#define  WDMAC_STATUS_LNGREAD		 0x00000200
+/* 0x4c08 --> 0x5000 unused */
+
+/* Per-cpu register offsets (arm9) */
+#define CPU_MODE			0x00000000
+#define  CPU_MODE_RESET			 0x00000001
+#define  CPU_MODE_HALT			 0x00000400
+#define CPU_STATE			0x00000004
+#define CPU_EVTMASK			0x00000008
+/* 0xc --> 0x1c reserved */
+#define CPU_PC				0x0000001c
+#define CPU_INSN			0x00000020
+#define CPU_SPAD_UFLOW			0x00000024
+#define CPU_WDOG_CLEAR			0x00000028
+#define CPU_WDOG_VECTOR			0x0000002c
+#define CPU_WDOG_PC			0x00000030
+#define CPU_HW_BP			0x00000034
+/* 0x38 --> 0x44 unused */
+#define CPU_WDOG_SAVED_STATE		0x00000044
+#define CPU_LAST_BRANCH_ADDR		0x00000048
+#define CPU_SPAD_UFLOW_SET		0x0000004c
+/* 0x50 --> 0x200 unused */
+#define CPU_R0				0x00000200
+#define CPU_R1				0x00000204
+#define CPU_R2				0x00000208
+#define CPU_R3				0x0000020c
+#define CPU_R4				0x00000210
+#define CPU_R5				0x00000214
+#define CPU_R6				0x00000218
+#define CPU_R7				0x0000021c
+#define CPU_R8				0x00000220
+#define CPU_R9				0x00000224
+#define CPU_R10				0x00000228
+#define CPU_R11				0x0000022c
+#define CPU_R12				0x00000230
+#define CPU_R13				0x00000234
+#define CPU_R14				0x00000238
+#define CPU_R15				0x0000023c
+#define CPU_R16				0x00000240
+#define CPU_R17				0x00000244
+#define CPU_R18				0x00000248
+#define CPU_R19				0x0000024c
+#define CPU_R20				0x00000250
+#define CPU_R21				0x00000254
+#define CPU_R22				0x00000258
+#define CPU_R23				0x0000025c
+#define CPU_R24				0x00000260
+#define CPU_R25				0x00000264
+#define CPU_R26				0x00000268
+#define CPU_R27				0x0000026c
+#define CPU_R28				0x00000270
+#define CPU_R29				0x00000274
+#define CPU_R30				0x00000278
+#define CPU_R31				0x0000027c
+/* 0x280 --> 0x400 unused */
+
+#define RX_CPU_BASE			0x00005000
+#define RX_CPU_MODE			0x00005000
+#define RX_CPU_STATE			0x00005004
+#define RX_CPU_PGMCTR			0x0000501c
+#define RX_CPU_HWBKPT			0x00005034
+#define TX_CPU_BASE			0x00005400
+#define TX_CPU_MODE			0x00005400
+#define TX_CPU_STATE			0x00005404
+#define TX_CPU_PGMCTR			0x0000541c
+
+#define VCPU_STATUS			0x00005100
+#define  VCPU_STATUS_INIT_DONE		 0x04000000
+#define  VCPU_STATUS_DRV_RESET		 0x08000000
+
+#define VCPU_CFGSHDW			0x00005104
+#define  VCPU_CFGSHDW_WOL_ENABLE	 0x00000001
+#define  VCPU_CFGSHDW_WOL_MAGPKT	 0x00000004
+#define  VCPU_CFGSHDW_ASPM_DBNC		 0x00001000
+
+/* Mailboxes */
+#define GRCMBOX_BASE			0x00005600
+#define GRCMBOX_INTERRUPT_0		0x00005800 /* 64-bit */
+#define GRCMBOX_INTERRUPT_1		0x00005808 /* 64-bit */
+#define GRCMBOX_INTERRUPT_2		0x00005810 /* 64-bit */
+#define GRCMBOX_INTERRUPT_3		0x00005818 /* 64-bit */
+#define GRCMBOX_GENERAL_0		0x00005820 /* 64-bit */
+#define GRCMBOX_GENERAL_1		0x00005828 /* 64-bit */
+#define GRCMBOX_GENERAL_2		0x00005830 /* 64-bit */
+#define GRCMBOX_GENERAL_3		0x00005838 /* 64-bit */
+#define GRCMBOX_GENERAL_4		0x00005840 /* 64-bit */
+#define GRCMBOX_GENERAL_5		0x00005848 /* 64-bit */
+#define GRCMBOX_GENERAL_6		0x00005850 /* 64-bit */
+#define GRCMBOX_GENERAL_7		0x00005858 /* 64-bit */
+#define GRCMBOX_RELOAD_STAT		0x00005860 /* 64-bit */
+#define GRCMBOX_RCVSTD_PROD_IDX		0x00005868 /* 64-bit */
+#define GRCMBOX_RCVJUMBO_PROD_IDX	0x00005870 /* 64-bit */
+#define GRCMBOX_RCVMINI_PROD_IDX	0x00005878 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_0	0x00005880 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_1	0x00005888 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_2	0x00005890 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_3	0x00005898 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_4	0x000058a0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_5	0x000058a8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_6	0x000058b0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_7	0x000058b8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_8	0x000058c0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_9	0x000058c8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_10	0x000058d0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_11	0x000058d8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_12	0x000058e0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_13	0x000058e8 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_14	0x000058f0 /* 64-bit */
+#define GRCMBOX_RCVRET_CON_IDX_15	0x000058f8 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_0	0x00005900 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_1	0x00005908 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_2	0x00005910 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_3	0x00005918 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_4	0x00005920 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_5	0x00005928 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_6	0x00005930 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_7	0x00005938 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_8	0x00005940 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_9	0x00005948 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_10	0x00005950 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_11	0x00005958 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_12	0x00005960 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_13	0x00005968 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_14	0x00005970 /* 64-bit */
+#define GRCMBOX_SNDHOST_PROD_IDX_15	0x00005978 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_0	0x00005980 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_1	0x00005988 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_2	0x00005990 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_3	0x00005998 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_4	0x000059a0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_5	0x000059a8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_6	0x000059b0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_7	0x000059b8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_8	0x000059c0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_9	0x000059c8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_10	0x000059d0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_11	0x000059d8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_12	0x000059e0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_13	0x000059e8 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_14	0x000059f0 /* 64-bit */
+#define GRCMBOX_SNDNIC_PROD_IDX_15	0x000059f8 /* 64-bit */
+#define GRCMBOX_HIGH_PRIO_EV_VECTOR	0x00005a00
+#define GRCMBOX_HIGH_PRIO_EV_MASK	0x00005a04
+#define GRCMBOX_LOW_PRIO_EV_VEC		0x00005a08
+#define GRCMBOX_LOW_PRIO_EV_MASK	0x00005a0c
+/* 0x5a10 --> 0x5c00 */
+
+/* Flow Through queues */
+#define FTQ_RESET			0x00005c00
+/* 0x5c04 --> 0x5c10 unused */
+#define FTQ_DMA_NORM_READ_CTL		0x00005c10
+#define FTQ_DMA_NORM_READ_FULL_CNT	0x00005c14
+#define FTQ_DMA_NORM_READ_FIFO_ENQDEQ	0x00005c18
+#define FTQ_DMA_NORM_READ_WRITE_PEEK	0x00005c1c
+#define FTQ_DMA_HIGH_READ_CTL		0x00005c20
+#define FTQ_DMA_HIGH_READ_FULL_CNT	0x00005c24
+#define FTQ_DMA_HIGH_READ_FIFO_ENQDEQ	0x00005c28
+#define FTQ_DMA_HIGH_READ_WRITE_PEEK	0x00005c2c
+#define FTQ_DMA_COMP_DISC_CTL		0x00005c30
+#define FTQ_DMA_COMP_DISC_FULL_CNT	0x00005c34
+#define FTQ_DMA_COMP_DISC_FIFO_ENQDEQ	0x00005c38
+#define FTQ_DMA_COMP_DISC_WRITE_PEEK	0x00005c3c
+#define FTQ_SEND_BD_COMP_CTL		0x00005c40
+#define FTQ_SEND_BD_COMP_FULL_CNT	0x00005c44
+#define FTQ_SEND_BD_COMP_FIFO_ENQDEQ	0x00005c48
+#define FTQ_SEND_BD_COMP_WRITE_PEEK	0x00005c4c
+#define FTQ_SEND_DATA_INIT_CTL		0x00005c50
+#define FTQ_SEND_DATA_INIT_FULL_CNT	0x00005c54
+#define FTQ_SEND_DATA_INIT_FIFO_ENQDEQ	0x00005c58
+#define FTQ_SEND_DATA_INIT_WRITE_PEEK	0x00005c5c
+#define FTQ_DMA_NORM_WRITE_CTL		0x00005c60
+#define FTQ_DMA_NORM_WRITE_FULL_CNT	0x00005c64
+#define FTQ_DMA_NORM_WRITE_FIFO_ENQDEQ	0x00005c68
+#define FTQ_DMA_NORM_WRITE_WRITE_PEEK	0x00005c6c
+#define FTQ_DMA_HIGH_WRITE_CTL		0x00005c70
+#define FTQ_DMA_HIGH_WRITE_FULL_CNT	0x00005c74
+#define FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ	0x00005c78
+#define FTQ_DMA_HIGH_WRITE_WRITE_PEEK	0x00005c7c
+#define FTQ_SWTYPE1_CTL			0x00005c80
+#define FTQ_SWTYPE1_FULL_CNT		0x00005c84
+#define FTQ_SWTYPE1_FIFO_ENQDEQ		0x00005c88
+#define FTQ_SWTYPE1_WRITE_PEEK		0x00005c8c
+#define FTQ_SEND_DATA_COMP_CTL		0x00005c90
+#define FTQ_SEND_DATA_COMP_FULL_CNT	0x00005c94
+#define FTQ_SEND_DATA_COMP_FIFO_ENQDEQ	0x00005c98
+#define FTQ_SEND_DATA_COMP_WRITE_PEEK	0x00005c9c
+#define FTQ_HOST_COAL_CTL		0x00005ca0
+#define FTQ_HOST_COAL_FULL_CNT		0x00005ca4
+#define FTQ_HOST_COAL_FIFO_ENQDEQ	0x00005ca8
+#define FTQ_HOST_COAL_WRITE_PEEK	0x00005cac
+#define FTQ_MAC_TX_CTL			0x00005cb0
+#define FTQ_MAC_TX_FULL_CNT		0x00005cb4
+#define FTQ_MAC_TX_FIFO_ENQDEQ		0x00005cb8
+#define FTQ_MAC_TX_WRITE_PEEK		0x00005cbc
+#define FTQ_MB_FREE_CTL			0x00005cc0
+#define FTQ_MB_FREE_FULL_CNT		0x00005cc4
+#define FTQ_MB_FREE_FIFO_ENQDEQ		0x00005cc8
+#define FTQ_MB_FREE_WRITE_PEEK		0x00005ccc
+#define FTQ_RCVBD_COMP_CTL		0x00005cd0
+#define FTQ_RCVBD_COMP_FULL_CNT		0x00005cd4
+#define FTQ_RCVBD_COMP_FIFO_ENQDEQ	0x00005cd8
+#define FTQ_RCVBD_COMP_WRITE_PEEK	0x00005cdc
+#define FTQ_RCVLST_PLMT_CTL		0x00005ce0
+#define FTQ_RCVLST_PLMT_FULL_CNT	0x00005ce4
+#define FTQ_RCVLST_PLMT_FIFO_ENQDEQ	0x00005ce8
+#define FTQ_RCVLST_PLMT_WRITE_PEEK	0x00005cec
+#define FTQ_RCVDATA_INI_CTL		0x00005cf0
+#define FTQ_RCVDATA_INI_FULL_CNT	0x00005cf4
+#define FTQ_RCVDATA_INI_FIFO_ENQDEQ	0x00005cf8
+#define FTQ_RCVDATA_INI_WRITE_PEEK	0x00005cfc
+#define FTQ_RCVDATA_COMP_CTL		0x00005d00
+#define FTQ_RCVDATA_COMP_FULL_CNT	0x00005d04
+#define FTQ_RCVDATA_COMP_FIFO_ENQDEQ	0x00005d08
+#define FTQ_RCVDATA_COMP_WRITE_PEEK	0x00005d0c
+#define FTQ_SWTYPE2_CTL			0x00005d10
+#define FTQ_SWTYPE2_FULL_CNT		0x00005d14
+#define FTQ_SWTYPE2_FIFO_ENQDEQ		0x00005d18
+#define FTQ_SWTYPE2_WRITE_PEEK		0x00005d1c
+/* 0x5d20 --> 0x6000 unused */
+
+/* Message signaled interrupt registers */
+#define MSGINT_MODE			0x00006000
+#define  MSGINT_MODE_RESET		 0x00000001
+#define  MSGINT_MODE_ENABLE		 0x00000002
+#define  MSGINT_MODE_ONE_SHOT_DISABLE	 0x00000020
+#define  MSGINT_MODE_MULTIVEC_EN	 0x00000080
+#define MSGINT_STATUS			0x00006004
+#define  MSGINT_STATUS_MSI_REQ		 0x00000001
+#define MSGINT_FIFO			0x00006008
+/* 0x600c --> 0x6400 unused */
+
+/* DMA completion registers */
+#define DMAC_MODE			0x00006400
+#define  DMAC_MODE_RESET		 0x00000001
+#define  DMAC_MODE_ENABLE		 0x00000002
+/* 0x6404 --> 0x6800 unused */
+
+/* GRC registers */
+#define GRC_MODE			0x00006800
+#define  GRC_MODE_UPD_ON_COAL		0x00000001
+#define  GRC_MODE_BSWAP_NONFRM_DATA	0x00000002
+#define  GRC_MODE_WSWAP_NONFRM_DATA	0x00000004
+#define  GRC_MODE_BSWAP_DATA		0x00000010
+#define  GRC_MODE_WSWAP_DATA		0x00000020
+#define  GRC_MODE_BYTE_SWAP_B2HRX_DATA	0x00000040
+#define  GRC_MODE_WORD_SWAP_B2HRX_DATA	0x00000080
+#define  GRC_MODE_SPLITHDR		0x00000100
+#define  GRC_MODE_NOFRM_CRACKING	0x00000200
+#define  GRC_MODE_INCL_CRC		0x00000400
+#define  GRC_MODE_ALLOW_BAD_FRMS	0x00000800
+#define  GRC_MODE_NOIRQ_ON_SENDS	0x00002000
+#define  GRC_MODE_NOIRQ_ON_RCV		0x00004000
+#define  GRC_MODE_FORCE_PCI32BIT	0x00008000
+#define  GRC_MODE_B2HRX_ENABLE		0x00008000
+#define  GRC_MODE_HOST_STACKUP		0x00010000
+#define  GRC_MODE_HOST_SENDBDS		0x00020000
+#define  GRC_MODE_HTX2B_ENABLE		0x00040000
+#define  GRC_MODE_NO_TX_PHDR_CSUM	0x00100000
+#define  GRC_MODE_NVRAM_WR_ENABLE	0x00200000
+#define  GRC_MODE_PCIE_TL_SEL		0x00000000
+#define  GRC_MODE_PCIE_PL_SEL		0x00400000
+#define  GRC_MODE_NO_RX_PHDR_CSUM	0x00800000
+#define  GRC_MODE_IRQ_ON_TX_CPU_ATTN	0x01000000
+#define  GRC_MODE_IRQ_ON_RX_CPU_ATTN	0x02000000
+#define  GRC_MODE_IRQ_ON_MAC_ATTN	0x04000000
+#define  GRC_MODE_IRQ_ON_DMA_ATTN	0x08000000
+#define  GRC_MODE_IRQ_ON_FLOW_ATTN	0x10000000
+#define  GRC_MODE_4X_NIC_SEND_RINGS	0x20000000
+#define  GRC_MODE_PCIE_DL_SEL		0x20000000
+#define  GRC_MODE_MCAST_FRM_ENABLE	0x40000000
+#define  GRC_MODE_PCIE_HI_1K_EN		0x80000000
+#define  GRC_MODE_PCIE_PORT_MASK	(GRC_MODE_PCIE_TL_SEL | \
+					 GRC_MODE_PCIE_PL_SEL | \
+					 GRC_MODE_PCIE_DL_SEL | \
+					 GRC_MODE_PCIE_HI_1K_EN)
+#define GRC_MISC_CFG			0x00006804
+#define  GRC_MISC_CFG_CORECLK_RESET	0x00000001
+#define  GRC_MISC_CFG_PRESCALAR_MASK	0x000000fe
+#define  GRC_MISC_CFG_PRESCALAR_SHIFT	1
+#define  GRC_MISC_CFG_BOARD_ID_MASK	0x0001e000
+#define  GRC_MISC_CFG_BOARD_ID_5700	0x0001e000
+#define  GRC_MISC_CFG_BOARD_ID_5701	0x00000000
+#define  GRC_MISC_CFG_BOARD_ID_5702FE	0x00004000
+#define  GRC_MISC_CFG_BOARD_ID_5703	0x00000000
+#define  GRC_MISC_CFG_BOARD_ID_5703S	0x00002000
+#define  GRC_MISC_CFG_BOARD_ID_5704	0x00000000
+#define  GRC_MISC_CFG_BOARD_ID_5704CIOBE 0x00004000
+#define  GRC_MISC_CFG_BOARD_ID_5704_A2	0x00008000
+#define  GRC_MISC_CFG_BOARD_ID_5788	0x00010000
+#define  GRC_MISC_CFG_BOARD_ID_5788M	0x00018000
+#define  GRC_MISC_CFG_BOARD_ID_AC91002A1 0x00018000
+#define  GRC_MISC_CFG_EPHY_IDDQ		0x00200000
+#define  GRC_MISC_CFG_KEEP_GPHY_POWER	0x04000000
+#define GRC_LOCAL_CTRL			0x00006808
+#define  GRC_LCLCTRL_INT_ACTIVE		0x00000001
+#define  GRC_LCLCTRL_CLEARINT		0x00000002
+#define  GRC_LCLCTRL_SETINT		0x00000004
+#define  GRC_LCLCTRL_INT_ON_ATTN	0x00000008
+#define  GRC_LCLCTRL_GPIO_UART_SEL	0x00000010	/* 5755 only */
+#define  GRC_LCLCTRL_USE_SIG_DETECT	0x00000010	/* 5714/5780 only */
+#define  GRC_LCLCTRL_USE_EXT_SIG_DETECT	0x00000020	/* 5714/5780 only */
+#define  GRC_LCLCTRL_GPIO_INPUT3	0x00000020
+#define  GRC_LCLCTRL_GPIO_OE3		0x00000040
+#define  GRC_LCLCTRL_GPIO_OUTPUT3	0x00000080
+#define  GRC_LCLCTRL_GPIO_INPUT0	0x00000100
+#define  GRC_LCLCTRL_GPIO_INPUT1	0x00000200
+#define  GRC_LCLCTRL_GPIO_INPUT2	0x00000400
+#define  GRC_LCLCTRL_GPIO_OE0		0x00000800
+#define  GRC_LCLCTRL_GPIO_OE1		0x00001000
+#define  GRC_LCLCTRL_GPIO_OE2		0x00002000
+#define  GRC_LCLCTRL_GPIO_OUTPUT0	0x00004000
+#define  GRC_LCLCTRL_GPIO_OUTPUT1	0x00008000
+#define  GRC_LCLCTRL_GPIO_OUTPUT2	0x00010000
+#define  GRC_LCLCTRL_EXTMEM_ENABLE	0x00020000
+#define  GRC_LCLCTRL_MEMSZ_MASK		0x001c0000
+#define  GRC_LCLCTRL_MEMSZ_256K		0x00000000
+#define  GRC_LCLCTRL_MEMSZ_512K		0x00040000
+#define  GRC_LCLCTRL_MEMSZ_1M		0x00080000
+#define  GRC_LCLCTRL_MEMSZ_2M		0x000c0000
+#define  GRC_LCLCTRL_MEMSZ_4M		0x00100000
+#define  GRC_LCLCTRL_MEMSZ_8M		0x00140000
+#define  GRC_LCLCTRL_MEMSZ_16M		0x00180000
+#define  GRC_LCLCTRL_BANK_SELECT	0x00200000
+#define  GRC_LCLCTRL_SSRAM_TYPE		0x00400000
+#define  GRC_LCLCTRL_AUTO_SEEPROM	0x01000000
+#define GRC_TIMER			0x0000680c
+#define GRC_RX_CPU_EVENT		0x00006810
+#define  GRC_RX_CPU_DRIVER_EVENT	0x00004000
+#define GRC_RX_TIMER_REF		0x00006814
+#define GRC_RX_CPU_SEM			0x00006818
+#define GRC_REMOTE_RX_CPU_ATTN		0x0000681c
+#define GRC_TX_CPU_EVENT		0x00006820
+#define GRC_TX_TIMER_REF		0x00006824
+#define GRC_TX_CPU_SEM			0x00006828
+#define GRC_REMOTE_TX_CPU_ATTN		0x0000682c
+#define GRC_MEM_POWER_UP		0x00006830 /* 64-bit */
+#define GRC_EEPROM_ADDR			0x00006838
+#define  EEPROM_ADDR_WRITE		0x00000000
+#define  EEPROM_ADDR_READ		0x80000000
+#define  EEPROM_ADDR_COMPLETE		0x40000000
+#define  EEPROM_ADDR_FSM_RESET		0x20000000
+#define  EEPROM_ADDR_DEVID_MASK		0x1c000000
+#define  EEPROM_ADDR_DEVID_SHIFT	26
+#define  EEPROM_ADDR_START		0x02000000
+#define  EEPROM_ADDR_CLKPERD_SHIFT	16
+#define  EEPROM_ADDR_ADDR_MASK		0x0000ffff
+#define  EEPROM_ADDR_ADDR_SHIFT		0
+#define  EEPROM_DEFAULT_CLOCK_PERIOD	0x60
+#define  EEPROM_CHIP_SIZE		(64 * 1024)
+#define GRC_EEPROM_DATA			0x0000683c
+#define GRC_EEPROM_CTRL			0x00006840
+#define GRC_MDI_CTRL			0x00006844
+#define GRC_SEEPROM_DELAY		0x00006848
+/* 0x684c --> 0x6890 unused */
+#define GRC_VCPU_EXT_CTRL		0x00006890
+#define GRC_VCPU_EXT_CTRL_HALT_CPU	 0x00400000
+#define GRC_VCPU_EXT_CTRL_DISABLE_WOL	 0x20000000
+#define GRC_FASTBOOT_PC			0x00006894	/* 5752, 5755, 5787 */
+
+/* 0x6c00 --> 0x7000 unused */
+
+/* NVRAM Control registers */
+#define NVRAM_CMD			0x00007000
+#define  NVRAM_CMD_RESET		 0x00000001
+#define  NVRAM_CMD_DONE			 0x00000008
+#define  NVRAM_CMD_GO			 0x00000010
+#define  NVRAM_CMD_WR			 0x00000020
+#define  NVRAM_CMD_RD			 0x00000000
+#define  NVRAM_CMD_ERASE		 0x00000040
+#define  NVRAM_CMD_FIRST		 0x00000080
+#define  NVRAM_CMD_LAST			 0x00000100
+#define  NVRAM_CMD_WREN			 0x00010000
+#define  NVRAM_CMD_WRDI			 0x00020000
+#define NVRAM_STAT			0x00007004
+#define NVRAM_WRDATA			0x00007008
+#define NVRAM_ADDR			0x0000700c
+#define  NVRAM_ADDR_MSK			0x00ffffff
+#define NVRAM_RDDATA			0x00007010
+#define NVRAM_CFG1			0x00007014
+#define  NVRAM_CFG1_FLASHIF_ENAB	 0x00000001
+#define  NVRAM_CFG1_BUFFERED_MODE	 0x00000002
+#define  NVRAM_CFG1_PASS_THRU		 0x00000004
+#define  NVRAM_CFG1_STATUS_BITS		 0x00000070
+#define  NVRAM_CFG1_BIT_BANG		 0x00000008
+#define  NVRAM_CFG1_FLASH_SIZE		 0x02000000
+#define  NVRAM_CFG1_COMPAT_BYPASS	 0x80000000
+#define  NVRAM_CFG1_VENDOR_MASK		 0x03000003
+#define  FLASH_VENDOR_ATMEL_EEPROM	 0x02000000
+#define  FLASH_VENDOR_ATMEL_FLASH_BUFFERED	 0x02000003
+#define  FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED	 0x00000003
+#define  FLASH_VENDOR_ST			 0x03000001
+#define  FLASH_VENDOR_SAIFUN		 0x01000003
+#define  FLASH_VENDOR_SST_SMALL		 0x00000001
+#define  FLASH_VENDOR_SST_LARGE		 0x02000001
+#define  NVRAM_CFG1_5752VENDOR_MASK	 0x03c00003
+#define  FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ	 0x00000000
+#define  FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ	 0x02000000
+#define  FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED	 0x02000003
+#define  FLASH_5752VENDOR_ST_M45PE10	 0x02400000
+#define  FLASH_5752VENDOR_ST_M45PE20	 0x02400002
+#define  FLASH_5752VENDOR_ST_M45PE40	 0x02400001
+#define  FLASH_5755VENDOR_ATMEL_FLASH_1	 0x03400001
+#define  FLASH_5755VENDOR_ATMEL_FLASH_2	 0x03400002
+#define  FLASH_5755VENDOR_ATMEL_FLASH_3	 0x03400000
+#define  FLASH_5755VENDOR_ATMEL_FLASH_4	 0x00000003
+#define  FLASH_5755VENDOR_ATMEL_FLASH_5	 0x02000003
+#define  FLASH_5755VENDOR_ATMEL_EEPROM_64KHZ	 0x03c00003
+#define  FLASH_5755VENDOR_ATMEL_EEPROM_376KHZ	 0x03c00002
+#define  FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ	 0x03000003
+#define  FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ	 0x03000002
+#define  FLASH_5787VENDOR_MICRO_EEPROM_64KHZ	 0x03000000
+#define  FLASH_5787VENDOR_MICRO_EEPROM_376KHZ	 0x02000000
+#define  FLASH_5761VENDOR_ATMEL_MDB021D	 0x00800003
+#define  FLASH_5761VENDOR_ATMEL_MDB041D	 0x00800000
+#define  FLASH_5761VENDOR_ATMEL_MDB081D	 0x00800002
+#define  FLASH_5761VENDOR_ATMEL_MDB161D	 0x00800001
+#define  FLASH_5761VENDOR_ATMEL_ADB021D	 0x00000003
+#define  FLASH_5761VENDOR_ATMEL_ADB041D	 0x00000000
+#define  FLASH_5761VENDOR_ATMEL_ADB081D	 0x00000002
+#define  FLASH_5761VENDOR_ATMEL_ADB161D	 0x00000001
+#define  FLASH_5761VENDOR_ST_M_M45PE20	 0x02800001
+#define  FLASH_5761VENDOR_ST_M_M45PE40	 0x02800000
+#define  FLASH_5761VENDOR_ST_M_M45PE80	 0x02800002
+#define  FLASH_5761VENDOR_ST_M_M45PE16	 0x02800003
+#define  FLASH_5761VENDOR_ST_A_M45PE20	 0x02000001
+#define  FLASH_5761VENDOR_ST_A_M45PE40	 0x02000000
+#define  FLASH_5761VENDOR_ST_A_M45PE80	 0x02000002
+#define  FLASH_5761VENDOR_ST_A_M45PE16	 0x02000003
+#define  FLASH_57780VENDOR_ATMEL_AT45DB011D 0x00400000
+#define  FLASH_57780VENDOR_ATMEL_AT45DB011B 0x03400000
+#define  FLASH_57780VENDOR_ATMEL_AT45DB021D 0x00400002
+#define  FLASH_57780VENDOR_ATMEL_AT45DB021B 0x03400002
+#define  FLASH_57780VENDOR_ATMEL_AT45DB041D 0x00400001
+#define  FLASH_57780VENDOR_ATMEL_AT45DB041B 0x03400001
+#define  FLASH_5717VENDOR_ATMEL_EEPROM	 0x02000001
+#define  FLASH_5717VENDOR_MICRO_EEPROM	 0x02000003
+#define  FLASH_5717VENDOR_ATMEL_MDB011D	 0x01000001
+#define  FLASH_5717VENDOR_ATMEL_MDB021D	 0x01000003
+#define  FLASH_5717VENDOR_ST_M_M25PE10	 0x02000000
+#define  FLASH_5717VENDOR_ST_M_M25PE20	 0x02000002
+#define  FLASH_5717VENDOR_ST_M_M45PE10	 0x00000001
+#define  FLASH_5717VENDOR_ST_M_M45PE20	 0x00000003
+#define  FLASH_5717VENDOR_ATMEL_ADB011B	 0x01400000
+#define  FLASH_5717VENDOR_ATMEL_ADB021B	 0x01400002
+#define  FLASH_5717VENDOR_ATMEL_ADB011D	 0x01400001
+#define  FLASH_5717VENDOR_ATMEL_ADB021D	 0x01400003
+#define  FLASH_5717VENDOR_ST_A_M25PE10	 0x02400000
+#define  FLASH_5717VENDOR_ST_A_M25PE20	 0x02400002
+#define  FLASH_5717VENDOR_ST_A_M45PE10	 0x02400001
+#define  FLASH_5717VENDOR_ST_A_M45PE20	 0x02400003
+#define  FLASH_5717VENDOR_ATMEL_45USPT	 0x03400000
+#define  FLASH_5717VENDOR_ST_25USPT	 0x03400002
+#define  FLASH_5717VENDOR_ST_45USPT	 0x03400001
+#define  FLASH_5720_EEPROM_HD		 0x00000001
+#define  FLASH_5720_EEPROM_LD		 0x00000003
+#define  FLASH_5720VENDOR_M_ATMEL_DB011D 0x01000000
+#define  FLASH_5720VENDOR_M_ATMEL_DB021D 0x01000002
+#define  FLASH_5720VENDOR_M_ATMEL_DB041D 0x01000001
+#define  FLASH_5720VENDOR_M_ATMEL_DB081D 0x01000003
+#define  FLASH_5720VENDOR_M_ST_M25PE10	 0x02000000
+#define  FLASH_5720VENDOR_M_ST_M25PE20	 0x02000002
+#define  FLASH_5720VENDOR_M_ST_M25PE40	 0x02000001
+#define  FLASH_5720VENDOR_M_ST_M25PE80	 0x02000003
+#define  FLASH_5720VENDOR_M_ST_M45PE10	 0x03000000
+#define  FLASH_5720VENDOR_M_ST_M45PE20	 0x03000002
+#define  FLASH_5720VENDOR_M_ST_M45PE40	 0x03000001
+#define  FLASH_5720VENDOR_M_ST_M45PE80	 0x03000003
+#define  FLASH_5720VENDOR_A_ATMEL_DB011B 0x01800000
+#define  FLASH_5720VENDOR_A_ATMEL_DB021B 0x01800002
+#define  FLASH_5720VENDOR_A_ATMEL_DB041B 0x01800001
+#define  FLASH_5720VENDOR_A_ATMEL_DB011D 0x01c00000
+#define  FLASH_5720VENDOR_A_ATMEL_DB021D 0x01c00002
+#define  FLASH_5720VENDOR_A_ATMEL_DB041D 0x01c00001
+#define  FLASH_5720VENDOR_A_ATMEL_DB081D 0x01c00003
+#define  FLASH_5720VENDOR_A_ST_M25PE10	 0x02800000
+#define  FLASH_5720VENDOR_A_ST_M25PE20	 0x02800002
+#define  FLASH_5720VENDOR_A_ST_M25PE40	 0x02800001
+#define  FLASH_5720VENDOR_A_ST_M25PE80	 0x02800003
+#define  FLASH_5720VENDOR_A_ST_M45PE10	 0x02c00000
+#define  FLASH_5720VENDOR_A_ST_M45PE20	 0x02c00002
+#define  FLASH_5720VENDOR_A_ST_M45PE40	 0x02c00001
+#define  FLASH_5720VENDOR_A_ST_M45PE80	 0x02c00003
+#define  FLASH_5720VENDOR_ATMEL_45USPT	 0x03c00000
+#define  FLASH_5720VENDOR_ST_25USPT	 0x03c00002
+#define  FLASH_5720VENDOR_ST_45USPT	 0x03c00001
+#define  NVRAM_CFG1_5752PAGE_SIZE_MASK	 0x70000000
+#define  FLASH_5752PAGE_SIZE_256	 0x00000000
+#define  FLASH_5752PAGE_SIZE_512	 0x10000000
+#define  FLASH_5752PAGE_SIZE_1K		 0x20000000
+#define  FLASH_5752PAGE_SIZE_2K		 0x30000000
+#define  FLASH_5752PAGE_SIZE_4K		 0x40000000
+#define  FLASH_5752PAGE_SIZE_264	 0x50000000
+#define  FLASH_5752PAGE_SIZE_528	 0x60000000
+#define NVRAM_CFG2			0x00007018
+#define NVRAM_CFG3			0x0000701c
+#define NVRAM_SWARB			0x00007020
+#define  SWARB_REQ_SET0			 0x00000001
+#define  SWARB_REQ_SET1			 0x00000002
+#define  SWARB_REQ_SET2			 0x00000004
+#define  SWARB_REQ_SET3			 0x00000008
+#define  SWARB_REQ_CLR0			 0x00000010
+#define  SWARB_REQ_CLR1			 0x00000020
+#define  SWARB_REQ_CLR2			 0x00000040
+#define  SWARB_REQ_CLR3			 0x00000080
+#define  SWARB_GNT0			 0x00000100
+#define  SWARB_GNT1			 0x00000200
+#define  SWARB_GNT2			 0x00000400
+#define  SWARB_GNT3			 0x00000800
+#define  SWARB_REQ0			 0x00001000
+#define  SWARB_REQ1			 0x00002000
+#define  SWARB_REQ2			 0x00004000
+#define  SWARB_REQ3			 0x00008000
+#define NVRAM_ACCESS			0x00007024
+#define  ACCESS_ENABLE			 0x00000001
+#define  ACCESS_WR_ENABLE		 0x00000002
+#define NVRAM_WRITE1			0x00007028
+/* 0x702c unused */
+
+#define NVRAM_ADDR_LOCKOUT		0x00007030
+/* 0x7034 --> 0x7500 unused */
+
+#define OTP_MODE			0x00007500
+#define OTP_MODE_OTP_THRU_GRC		 0x00000001
+#define OTP_CTRL			0x00007504
+#define OTP_CTRL_OTP_PROG_ENABLE	 0x00200000
+#define OTP_CTRL_OTP_CMD_READ		 0x00000000
+#define OTP_CTRL_OTP_CMD_INIT		 0x00000008
+#define OTP_CTRL_OTP_CMD_START		 0x00000001
+#define OTP_STATUS			0x00007508
+#define OTP_STATUS_CMD_DONE		 0x00000001
+#define OTP_ADDRESS			0x0000750c
+#define OTP_ADDRESS_MAGIC1		 0x000000a0
+#define OTP_ADDRESS_MAGIC2		 0x00000080
+/* 0x7510 unused */
+
+#define OTP_READ_DATA			0x00007514
+/* 0x7518 --> 0x7c04 unused */
+
+#define PCIE_TRANSACTION_CFG		0x00007c04
+#define PCIE_TRANS_CFG_1SHOT_MSI	 0x20000000
+#define PCIE_TRANS_CFG_LOM		 0x00000020
+/* 0x7c08 --> 0x7d28 unused */
+
+#define PCIE_PWR_MGMT_THRESH		0x00007d28
+#define PCIE_PWR_MGMT_L1_THRESH_MSK	 0x0000ff00
+#define PCIE_PWR_MGMT_L1_THRESH_4MS	 0x0000ff00
+#define PCIE_PWR_MGMT_EXT_ASPM_TMR_EN	 0x01000000
+/* 0x7d2c --> 0x7d54 unused */
+
+#define TG3_PCIE_LNKCTL			0x00007d54
+#define  TG3_PCIE_LNKCTL_L1_PLL_PD_EN	 0x00000008
+#define  TG3_PCIE_LNKCTL_L1_PLL_PD_DIS	 0x00000080
+/* 0x7d58 --> 0x7e70 unused */
+
+#define TG3_PCIE_PHY_TSTCTL		0x00007e2c
+#define  TG3_PCIE_PHY_TSTCTL_PCIE10	 0x00000040
+#define  TG3_PCIE_PHY_TSTCTL_PSCRAM	 0x00000020
+
+#define TG3_PCIE_EIDLE_DELAY		0x00007e70
+#define  TG3_PCIE_EIDLE_DELAY_MASK	 0x0000001f
+#define  TG3_PCIE_EIDLE_DELAY_13_CLKS	 0x0000000c
+/* 0x7e74 --> 0x8000 unused */
+
+
+/* Alternate PCIE definitions */
+#define TG3_PCIE_TLDLPL_PORT		0x00007c00
+#define TG3_PCIE_DL_LO_FTSMAX		0x0000000c
+#define TG3_PCIE_DL_LO_FTSMAX_MSK	0x000000ff
+#define TG3_PCIE_DL_LO_FTSMAX_VAL	0x0000002c
+#define TG3_PCIE_PL_LO_PHYCTL1		 0x00000004
+#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN	  0x00001000
+#define TG3_PCIE_PL_LO_PHYCTL5		 0x00000014
+#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ	  0x80000000
+
+#define TG3_REG_BLK_SIZE		0x00008000
+
+/* OTP bit definitions */
+#define TG3_OTP_AGCTGT_MASK		0x000000e0
+#define TG3_OTP_AGCTGT_SHIFT		1
+#define TG3_OTP_HPFFLTR_MASK		0x00000300
+#define TG3_OTP_HPFFLTR_SHIFT		1
+#define TG3_OTP_HPFOVER_MASK		0x00000400
+#define TG3_OTP_HPFOVER_SHIFT		1
+#define TG3_OTP_LPFDIS_MASK		0x00000800
+#define TG3_OTP_LPFDIS_SHIFT		11
+#define TG3_OTP_VDAC_MASK		0xff000000
+#define TG3_OTP_VDAC_SHIFT		24
+#define TG3_OTP_10BTAMP_MASK		0x0000f000
+#define TG3_OTP_10BTAMP_SHIFT		8
+#define TG3_OTP_ROFF_MASK		0x00e00000
+#define TG3_OTP_ROFF_SHIFT		11
+#define TG3_OTP_RCOFF_MASK		0x001c0000
+#define TG3_OTP_RCOFF_SHIFT		16
+
+#define TG3_OTP_DEFAULT			0x286c1640
+
+
+/* Hardware Legacy NVRAM layout */
+#define TG3_NVM_VPD_OFF			0x100
+#define TG3_NVM_VPD_LEN			256
+
+/* Hardware Selfboot NVRAM layout */
+#define TG3_NVM_HWSB_CFG1		0x00000004
+#define  TG3_NVM_HWSB_CFG1_MAJMSK	0xf8000000
+#define  TG3_NVM_HWSB_CFG1_MAJSFT	27
+#define  TG3_NVM_HWSB_CFG1_MINMSK	0x07c00000
+#define  TG3_NVM_HWSB_CFG1_MINSFT	22
+
+#define TG3_EEPROM_MAGIC		0x669955aa
+#define TG3_EEPROM_MAGIC_FW		0xa5000000
+#define TG3_EEPROM_MAGIC_FW_MSK		0xff000000
+#define TG3_EEPROM_SB_FORMAT_MASK	0x00e00000
+#define TG3_EEPROM_SB_FORMAT_1		0x00200000
+#define TG3_EEPROM_SB_REVISION_MASK	0x001f0000
+#define TG3_EEPROM_SB_REVISION_0	0x00000000
+#define TG3_EEPROM_SB_REVISION_2	0x00020000
+#define TG3_EEPROM_SB_REVISION_3	0x00030000
+#define TG3_EEPROM_SB_REVISION_4	0x00040000
+#define TG3_EEPROM_SB_REVISION_5	0x00050000
+#define TG3_EEPROM_SB_REVISION_6	0x00060000
+#define TG3_EEPROM_MAGIC_HW		0xabcd
+#define TG3_EEPROM_MAGIC_HW_MSK		0xffff
+
+#define TG3_NVM_DIR_START		0x18
+#define TG3_NVM_DIR_END			0x78
+#define TG3_NVM_DIRENT_SIZE		0xc
+#define TG3_NVM_DIRTYPE_SHIFT		24
+#define TG3_NVM_DIRTYPE_LENMSK		0x003fffff
+#define TG3_NVM_DIRTYPE_ASFINI		1
+#define TG3_NVM_DIRTYPE_EXTVPD		20
+#define TG3_NVM_PTREV_BCVER		0x94
+#define TG3_NVM_BCVER_MAJMSK		0x0000ff00
+#define TG3_NVM_BCVER_MAJSFT		8
+#define TG3_NVM_BCVER_MINMSK		0x000000ff
+
+#define TG3_EEPROM_SB_F1R0_EDH_OFF	0x10
+#define TG3_EEPROM_SB_F1R2_EDH_OFF	0x14
+#define TG3_EEPROM_SB_F1R2_MBA_OFF	0x10
+#define TG3_EEPROM_SB_F1R3_EDH_OFF	0x18
+#define TG3_EEPROM_SB_F1R4_EDH_OFF	0x1c
+#define TG3_EEPROM_SB_F1R5_EDH_OFF	0x20
+#define TG3_EEPROM_SB_F1R6_EDH_OFF	0x4c
+#define TG3_EEPROM_SB_EDH_MAJ_MASK	0x00000700
+#define TG3_EEPROM_SB_EDH_MAJ_SHFT	8
+#define TG3_EEPROM_SB_EDH_MIN_MASK	0x000000ff
+#define TG3_EEPROM_SB_EDH_BLD_MASK	0x0000f800
+#define TG3_EEPROM_SB_EDH_BLD_SHFT	11
+
+
+/* 32K Window into NIC internal memory */
+#define NIC_SRAM_WIN_BASE		0x00008000
+
+/* Offsets into first 32k of NIC internal memory. */
+#define NIC_SRAM_PAGE_ZERO		0x00000000
+#define NIC_SRAM_SEND_RCB		0x00000100 /* 16 * TG3_BDINFO_... */
+#define NIC_SRAM_RCV_RET_RCB		0x00000200 /* 16 * TG3_BDINFO_... */
+#define NIC_SRAM_STATS_BLK		0x00000300
+#define NIC_SRAM_STATUS_BLK		0x00000b00
+
+#define NIC_SRAM_FIRMWARE_MBOX		0x00000b50
+#define  NIC_SRAM_FIRMWARE_MBOX_MAGIC1	 0x4B657654
+#define  NIC_SRAM_FIRMWARE_MBOX_MAGIC2	 0x4861764b /* !dma on linkchg */
+
+#define NIC_SRAM_DATA_SIG		0x00000b54
+#define  NIC_SRAM_DATA_SIG_MAGIC	 0x4b657654 /* ascii for 'KevT' */
+
+#define NIC_SRAM_DATA_CFG			0x00000b58
+#define  NIC_SRAM_DATA_CFG_LED_MODE_MASK	 0x0000000c
+#define  NIC_SRAM_DATA_CFG_LED_MODE_MAC		 0x00000000
+#define  NIC_SRAM_DATA_CFG_LED_MODE_PHY_1	 0x00000004
+#define  NIC_SRAM_DATA_CFG_LED_MODE_PHY_2	 0x00000008
+#define  NIC_SRAM_DATA_CFG_PHY_TYPE_MASK	 0x00000030
+#define  NIC_SRAM_DATA_CFG_PHY_TYPE_UNKNOWN	 0x00000000
+#define  NIC_SRAM_DATA_CFG_PHY_TYPE_COPPER	 0x00000010
+#define  NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER	 0x00000020
+#define  NIC_SRAM_DATA_CFG_WOL_ENABLE		 0x00000040
+#define  NIC_SRAM_DATA_CFG_ASF_ENABLE		 0x00000080
+#define  NIC_SRAM_DATA_CFG_EEPROM_WP		 0x00000100
+#define  NIC_SRAM_DATA_CFG_MINI_PCI		 0x00001000
+#define  NIC_SRAM_DATA_CFG_FIBER_WOL		 0x00004000
+#define  NIC_SRAM_DATA_CFG_NO_GPIO2		 0x00100000
+#define  NIC_SRAM_DATA_CFG_APE_ENABLE		 0x00200000
+
+#define NIC_SRAM_DATA_VER			0x00000b5c
+#define  NIC_SRAM_DATA_VER_SHIFT		 16
+
+#define NIC_SRAM_DATA_PHY_ID		0x00000b74
+#define  NIC_SRAM_DATA_PHY_ID1_MASK	 0xffff0000
+#define  NIC_SRAM_DATA_PHY_ID2_MASK	 0x0000ffff
+
+#define NIC_SRAM_FW_CMD_MBOX		0x00000b78
+#define  FWCMD_NICDRV_ALIVE		 0x00000001
+#define  FWCMD_NICDRV_PAUSE_FW		 0x00000002
+#define  FWCMD_NICDRV_IPV4ADDR_CHG	 0x00000003
+#define  FWCMD_NICDRV_IPV6ADDR_CHG	 0x00000004
+#define  FWCMD_NICDRV_FIX_DMAR		 0x00000005
+#define  FWCMD_NICDRV_FIX_DMAW		 0x00000006
+#define  FWCMD_NICDRV_LINK_UPDATE	 0x0000000c
+#define  FWCMD_NICDRV_ALIVE2		 0x0000000d
+#define  FWCMD_NICDRV_ALIVE3		 0x0000000e
+#define NIC_SRAM_FW_CMD_LEN_MBOX	0x00000b7c
+#define NIC_SRAM_FW_CMD_DATA_MBOX	0x00000b80
+#define NIC_SRAM_FW_ASF_STATUS_MBOX	0x00000c00
+#define NIC_SRAM_FW_DRV_STATE_MBOX	0x00000c04
+#define  DRV_STATE_START		 0x00000001
+#define  DRV_STATE_START_DONE		 0x80000001
+#define  DRV_STATE_UNLOAD		 0x00000002
+#define  DRV_STATE_UNLOAD_DONE		 0x80000002
+#define  DRV_STATE_WOL			 0x00000003
+#define  DRV_STATE_SUSPEND		 0x00000004
+
+#define NIC_SRAM_FW_RESET_TYPE_MBOX	0x00000c08
+
+#define NIC_SRAM_MAC_ADDR_HIGH_MBOX	0x00000c14
+#define NIC_SRAM_MAC_ADDR_LOW_MBOX	0x00000c18
+
+#define NIC_SRAM_WOL_MBOX		0x00000d30
+#define  WOL_SIGNATURE			 0x474c0000
+#define  WOL_DRV_STATE_SHUTDOWN		 0x00000001
+#define  WOL_DRV_WOL			 0x00000002
+#define  WOL_SET_MAGIC_PKT		 0x00000004
+
+#define NIC_SRAM_DATA_CFG_2		0x00000d38
+
+#define  NIC_SRAM_DATA_CFG_2_APD_EN	 0x00000400
+#define  SHASTA_EXT_LED_MODE_MASK	 0x00018000
+#define  SHASTA_EXT_LED_LEGACY		 0x00000000
+#define  SHASTA_EXT_LED_SHARED		 0x00008000
+#define  SHASTA_EXT_LED_MAC		 0x00010000
+#define  SHASTA_EXT_LED_COMBO		 0x00018000
+
+#define NIC_SRAM_DATA_CFG_3		0x00000d3c
+#define  NIC_SRAM_ASPM_DEBOUNCE		 0x00000002
+
+#define NIC_SRAM_DATA_CFG_4		0x00000d60
+#define  NIC_SRAM_GMII_MODE		 0x00000002
+#define  NIC_SRAM_RGMII_INBAND_DISABLE	 0x00000004
+#define  NIC_SRAM_RGMII_EXT_IBND_RX_EN	 0x00000008
+#define  NIC_SRAM_RGMII_EXT_IBND_TX_EN	 0x00000010
+
+#define NIC_SRAM_RX_MINI_BUFFER_DESC	0x00001000
+
+#define NIC_SRAM_DMA_DESC_POOL_BASE	0x00002000
+#define  NIC_SRAM_DMA_DESC_POOL_SIZE	 0x00002000
+#define NIC_SRAM_TX_BUFFER_DESC		0x00004000 /* 512 entries */
+#define NIC_SRAM_RX_BUFFER_DESC		0x00006000 /* 256 entries */
+#define NIC_SRAM_RX_JUMBO_BUFFER_DESC	0x00007000 /* 256 entries */
+#define NIC_SRAM_MBUF_POOL_BASE		0x00008000
+#define  NIC_SRAM_MBUF_POOL_SIZE96	 0x00018000
+#define  NIC_SRAM_MBUF_POOL_SIZE64	 0x00010000
+#define  NIC_SRAM_MBUF_POOL_BASE5705	0x00010000
+#define  NIC_SRAM_MBUF_POOL_SIZE5705	0x0000e000
+
+#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5700	128
+#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5755	64
+#define TG3_SRAM_RX_STD_BDCACHE_SIZE_5906	32
+
+#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700	64
+#define TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717	16
+
+
+/* Currently this is fixed. */
+#define TG3_PHY_MII_ADDR		0x01
+
+
+/*** Tigon3 specific PHY MII registers. ***/
+#define MII_TG3_MMD_CTRL		0x0d /* MMD Access Control register */
+#define MII_TG3_MMD_CTRL_DATA_NOINC	0x4000
+#define MII_TG3_MMD_ADDRESS		0x0e /* MMD Address Data register */
+
+#define MII_TG3_EXT_CTRL		0x10 /* Extended control register */
+#define  MII_TG3_EXT_CTRL_FIFO_ELASTIC	0x0001
+#define  MII_TG3_EXT_CTRL_LNK3_LED_MODE	0x0002
+#define  MII_TG3_EXT_CTRL_FORCE_LED_OFF	0x0008
+#define  MII_TG3_EXT_CTRL_TBI		0x8000
+
+#define MII_TG3_EXT_STAT		0x11 /* Extended status register */
+#define  MII_TG3_EXT_STAT_LPASS		0x0100
+
+#define MII_TG3_RXR_COUNTERS		0x14 /* Local/Remote Receiver Counts */
+#define MII_TG3_DSP_RW_PORT		0x15 /* DSP coefficient read/write port */
+#define MII_TG3_DSP_CONTROL		0x16 /* DSP control register */
+#define MII_TG3_DSP_ADDRESS		0x17 /* DSP address register */
+
+#define MII_TG3_DSP_TAP1		0x0001
+#define  MII_TG3_DSP_TAP1_AGCTGT_DFLT	0x0007
+#define MII_TG3_DSP_TAP26		0x001a
+#define  MII_TG3_DSP_TAP26_ALNOKO	0x0001
+#define  MII_TG3_DSP_TAP26_RMRXSTO	0x0002
+#define  MII_TG3_DSP_TAP26_OPCSINPT	0x0004
+#define MII_TG3_DSP_AADJ1CH0		0x001f
+#define MII_TG3_DSP_CH34TP2		0x4022
+#define MII_TG3_DSP_CH34TP2_HIBW01	0x01ff
+#define MII_TG3_DSP_AADJ1CH3		0x601f
+#define  MII_TG3_DSP_AADJ1CH3_ADCCKADJ	0x0002
+#define MII_TG3_DSP_EXP1_INT_STAT	0x0f01
+#define MII_TG3_DSP_EXP8		0x0f08
+#define  MII_TG3_DSP_EXP8_REJ2MHz	0x0001
+#define  MII_TG3_DSP_EXP8_AEDW		0x0200
+#define MII_TG3_DSP_EXP75		0x0f75
+#define MII_TG3_DSP_EXP96		0x0f96
+#define MII_TG3_DSP_EXP97		0x0f97
+
+#define MII_TG3_AUX_CTRL		0x18 /* auxiliary control register */
+
+#define MII_TG3_AUXCTL_SHDWSEL_AUXCTL	0x0000
+#define MII_TG3_AUXCTL_ACTL_TX_6DB	0x0400
+#define MII_TG3_AUXCTL_ACTL_SMDSP_ENA	0x0800
+#define MII_TG3_AUXCTL_ACTL_EXTPKTLEN	0x4000
+
+#define MII_TG3_AUXCTL_SHDWSEL_PWRCTL	0x0002
+#define MII_TG3_AUXCTL_PCTL_WOL_EN	0x0008
+#define MII_TG3_AUXCTL_PCTL_100TX_LPWR	0x0010
+#define MII_TG3_AUXCTL_PCTL_SPR_ISOLATE	0x0020
+#define MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC	0x0040
+#define MII_TG3_AUXCTL_PCTL_VREG_11V	0x0180
+
+#define MII_TG3_AUXCTL_SHDWSEL_MISCTEST	0x0004
+
+#define MII_TG3_AUXCTL_SHDWSEL_MISC	0x0007
+#define MII_TG3_AUXCTL_MISC_WIRESPD_EN	0x0010
+#define MII_TG3_AUXCTL_MISC_FORCE_AMDIX	0x0200
+#define MII_TG3_AUXCTL_MISC_RDSEL_SHIFT	12
+#define MII_TG3_AUXCTL_MISC_WREN	0x8000
+
+
+#define MII_TG3_AUX_STAT		0x19 /* auxiliary status register */
+#define MII_TG3_AUX_STAT_LPASS		0x0004
+#define MII_TG3_AUX_STAT_SPDMASK	0x0700
+#define MII_TG3_AUX_STAT_10HALF		0x0100
+#define MII_TG3_AUX_STAT_10FULL		0x0200
+#define MII_TG3_AUX_STAT_100HALF	0x0300
+#define MII_TG3_AUX_STAT_100_4		0x0400
+#define MII_TG3_AUX_STAT_100FULL	0x0500
+#define MII_TG3_AUX_STAT_1000HALF	0x0600
+#define MII_TG3_AUX_STAT_1000FULL	0x0700
+#define MII_TG3_AUX_STAT_100		0x0008
+#define MII_TG3_AUX_STAT_FULL		0x0001
+
+#define MII_TG3_ISTAT			0x1a /* IRQ status register */
+#define MII_TG3_IMASK			0x1b /* IRQ mask register */
+
+/* ISTAT/IMASK event bits */
+#define MII_TG3_INT_LINKCHG		0x0002
+#define MII_TG3_INT_SPEEDCHG		0x0004
+#define MII_TG3_INT_DUPLEXCHG		0x0008
+#define MII_TG3_INT_ANEG_PAGE_RX	0x0400
+
+#define MII_TG3_MISC_SHDW		0x1c
+#define MII_TG3_MISC_SHDW_WREN		0x8000
+
+#define MII_TG3_MISC_SHDW_APD_WKTM_84MS	0x0001
+#define MII_TG3_MISC_SHDW_APD_ENABLE	0x0020
+#define MII_TG3_MISC_SHDW_APD_SEL	0x2800
+
+#define MII_TG3_MISC_SHDW_SCR5_C125OE	0x0001
+#define MII_TG3_MISC_SHDW_SCR5_DLLAPD	0x0002
+#define MII_TG3_MISC_SHDW_SCR5_SDTL	0x0004
+#define MII_TG3_MISC_SHDW_SCR5_DLPTLM	0x0008
+#define MII_TG3_MISC_SHDW_SCR5_LPED	0x0010
+#define MII_TG3_MISC_SHDW_SCR5_SEL	0x1400
+
+#define MII_TG3_TEST1			0x1e
+#define MII_TG3_TEST1_TRIM_EN		0x0010
+#define MII_TG3_TEST1_CRC_EN		0x8000
+
+/* Clause 45 expansion registers */
+#define TG3_CL45_D7_EEERES_STAT		0x803e
+#define TG3_CL45_D7_EEERES_STAT_LP_100TX	0x0002
+#define TG3_CL45_D7_EEERES_STAT_LP_1000T	0x0004
+
+
+/* Fast Ethernet Tranceiver definitions */
+#define MII_TG3_FET_PTEST		0x17
+#define  MII_TG3_FET_PTEST_FRC_TX_LINK	0x1000
+#define  MII_TG3_FET_PTEST_FRC_TX_LOCK	0x0800
+
+#define MII_TG3_FET_TEST		0x1f
+#define  MII_TG3_FET_SHADOW_EN		0x0080
+
+#define MII_TG3_FET_SHDW_MISCCTRL	0x10
+#define  MII_TG3_FET_SHDW_MISCCTRL_MDIX	0x4000
+
+#define MII_TG3_FET_SHDW_AUXMODE4	0x1a
+#define MII_TG3_FET_SHDW_AUXMODE4_SBPD	0x0008
+
+#define MII_TG3_FET_SHDW_AUXSTAT2	0x1b
+#define  MII_TG3_FET_SHDW_AUXSTAT2_APD	0x0020
+
+
+/* APE registers.  Accessible through BAR1 */
+#define TG3_APE_GPIO_MSG		0x0008
+#define TG3_APE_GPIO_MSG_SHIFT		4
+#define TG3_APE_EVENT			0x000c
+#define  APE_EVENT_1			 0x00000001
+#define TG3_APE_LOCK_REQ		0x002c
+#define  APE_LOCK_REQ_DRIVER		 0x00001000
+#define TG3_APE_LOCK_GRANT		0x004c
+#define  APE_LOCK_GRANT_DRIVER		 0x00001000
+#define TG3_APE_SEG_SIG			0x4000
+#define  APE_SEG_SIG_MAGIC		 0x41504521
+
+/* APE shared memory.  Accessible through BAR1 */
+#define TG3_APE_FW_STATUS		0x400c
+#define  APE_FW_STATUS_READY		 0x00000100
+#define TG3_APE_FW_FEATURES		0x4010
+#define  TG3_APE_FW_FEATURE_NCSI	 0x00000002
+#define TG3_APE_FW_VERSION		0x4018
+#define  APE_FW_VERSION_MAJMSK		 0xff000000
+#define  APE_FW_VERSION_MAJSFT		 24
+#define  APE_FW_VERSION_MINMSK		 0x00ff0000
+#define  APE_FW_VERSION_MINSFT		 16
+#define  APE_FW_VERSION_REVMSK		 0x0000ff00
+#define  APE_FW_VERSION_REVSFT		 8
+#define  APE_FW_VERSION_BLDMSK		 0x000000ff
+#define TG3_APE_HOST_SEG_SIG		0x4200
+#define  APE_HOST_SEG_SIG_MAGIC		 0x484f5354
+#define TG3_APE_HOST_SEG_LEN		0x4204
+#define  APE_HOST_SEG_LEN_MAGIC		 0x00000020
+#define TG3_APE_HOST_INIT_COUNT		0x4208
+#define TG3_APE_HOST_DRIVER_ID		0x420c
+#define  APE_HOST_DRIVER_ID_LINUX	 0xf0000000
+#define  APE_HOST_DRIVER_ID_MAGIC(maj, min)	\
+	(APE_HOST_DRIVER_ID_LINUX | (maj & 0xff) << 16 | (min & 0xff) << 8)
+#define TG3_APE_HOST_BEHAVIOR		0x4210
+#define  APE_HOST_BEHAV_NO_PHYLOCK	 0x00000001
+#define TG3_APE_HOST_HEARTBEAT_INT_MS	0x4214
+#define  APE_HOST_HEARTBEAT_INT_DISABLE	 0
+#define  APE_HOST_HEARTBEAT_INT_5SEC	 5000
+#define TG3_APE_HOST_HEARTBEAT_COUNT	0x4218
+#define TG3_APE_HOST_DRVR_STATE		0x421c
+#define TG3_APE_HOST_DRVR_STATE_START	 0x00000001
+#define TG3_APE_HOST_DRVR_STATE_UNLOAD	 0x00000002
+#define TG3_APE_HOST_DRVR_STATE_WOL	 0x00000003
+#define TG3_APE_HOST_WOL_SPEED		0x4224
+#define TG3_APE_HOST_WOL_SPEED_AUTO	 0x00008000
+
+#define TG3_APE_EVENT_STATUS		0x4300
+
+#define  APE_EVENT_STATUS_DRIVER_EVNT	 0x00000010
+#define  APE_EVENT_STATUS_STATE_CHNGE	 0x00000500
+#define  APE_EVENT_STATUS_STATE_START	 0x00010000
+#define  APE_EVENT_STATUS_STATE_UNLOAD	 0x00020000
+#define  APE_EVENT_STATUS_STATE_WOL	 0x00030000
+#define  APE_EVENT_STATUS_STATE_SUSPEND	 0x00040000
+#define  APE_EVENT_STATUS_EVENT_PENDING	 0x80000000
+
+#define TG3_APE_PER_LOCK_REQ		0x8400
+#define  APE_LOCK_PER_REQ_DRIVER	 0x00001000
+#define TG3_APE_PER_LOCK_GRANT		0x8420
+#define  APE_PER_LOCK_GRANT_DRIVER	 0x00001000
+
+/* APE convenience enumerations. */
+#define TG3_APE_LOCK_GRC                1
+#define TG3_APE_LOCK_MEM                4
+#define TG3_APE_LOCK_GPIO               7
+
+#define TG3_EEPROM_SB_F1R2_MBA_OFF	0x10
+
+
+/* There are two ways to manage the TX descriptors on the tigon3.
+ * Either the descriptors are in host DMA'able memory, or they
+ * exist only in the cards on-chip SRAM.  All 16 send bds are under
+ * the same mode, they may not be configured individually.
+ *
+ * This driver always uses host memory TX descriptors.
+ *
+ * To use host memory TX descriptors:
+ *	1) Set GRC_MODE_HOST_SENDBDS in GRC_MODE register.
+ *	   Make sure GRC_MODE_4X_NIC_SEND_RINGS is clear.
+ *	2) Allocate DMA'able memory.
+ *	3) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM:
+ *	   a) Set TG3_BDINFO_HOST_ADDR to DMA address of memory
+ *	      obtained in step 2
+ *	   b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC.
+ *	   c) Set len field of TG3_BDINFO_MAXLEN_FLAGS to number
+ *            of TX descriptors.  Leave flags field clear.
+ *	4) Access TX descriptors via host memory.  The chip
+ *	   will refetch into local SRAM as needed when producer
+ *	   index mailboxes are updated.
+ *
+ * To use on-chip TX descriptors:
+ *	1) Set GRC_MODE_4X_NIC_SEND_RINGS in GRC_MODE register.
+ *	   Make sure GRC_MODE_HOST_SENDBDS is clear.
+ *	2) In NIC_SRAM_SEND_RCB (of desired index) of on-chip SRAM:
+ *	   a) Set TG3_BDINFO_HOST_ADDR to zero.
+ *	   b) Set TG3_BDINFO_NIC_ADDR to NIC_SRAM_TX_BUFFER_DESC
+ *	   c) TG3_BDINFO_MAXLEN_FLAGS is don't care.
+ *	3) Access TX descriptors directly in on-chip SRAM
+ *	   using normal {read,write}l().  (and not using
+ *         pointer dereferencing of ioremap()'d memory like
+ *	   the broken Broadcom driver does)
+ *
+ * Note that BDINFO_FLAGS_DISABLED should be set in the flags field of
+ * TG3_BDINFO_MAXLEN_FLAGS of all unused SEND_RCB indices.
+ */
+struct tg3_tx_buffer_desc {
+	u32				addr_hi;
+	u32				addr_lo;
+
+	u32				len_flags;
+#define TXD_FLAG_TCPUDP_CSUM		0x0001
+#define TXD_FLAG_IP_CSUM		0x0002
+#define TXD_FLAG_END			0x0004
+#define TXD_FLAG_IP_FRAG		0x0008
+#define TXD_FLAG_JMB_PKT		0x0008
+#define TXD_FLAG_IP_FRAG_END		0x0010
+#define TXD_FLAG_VLAN			0x0040
+#define TXD_FLAG_COAL_NOW		0x0080
+#define TXD_FLAG_CPU_PRE_DMA		0x0100
+#define TXD_FLAG_CPU_POST_DMA		0x0200
+#define TXD_FLAG_ADD_SRC_ADDR		0x1000
+#define TXD_FLAG_CHOOSE_SRC_ADDR	0x6000
+#define TXD_FLAG_NO_CRC			0x8000
+#define TXD_LEN_SHIFT			16
+
+	u32				vlan_tag;
+#define TXD_VLAN_TAG_SHIFT		0
+#define TXD_MSS_SHIFT			16
+};
+
+#define TXD_ADDR			0x00UL /* 64-bit */
+#define TXD_LEN_FLAGS			0x08UL /* 32-bit (upper 16-bits are len) */
+#define TXD_VLAN_TAG			0x0cUL /* 32-bit (upper 16-bits are tag) */
+#define TXD_SIZE			0x10UL
+
+struct tg3_rx_buffer_desc {
+	u32				addr_hi;
+	u32				addr_lo;
+
+	u32				idx_len;
+#define RXD_IDX_MASK	0xffff0000
+#define RXD_IDX_SHIFT	16
+#define RXD_LEN_MASK	0x0000ffff
+#define RXD_LEN_SHIFT	0
+
+	u32				type_flags;
+#define RXD_TYPE_SHIFT	16
+#define RXD_FLAGS_SHIFT	0
+
+#define RXD_FLAG_END			0x0004
+#define RXD_FLAG_MINI			0x0800
+#define RXD_FLAG_JUMBO			0x0020
+#define RXD_FLAG_VLAN			0x0040
+#define RXD_FLAG_ERROR			0x0400
+#define RXD_FLAG_IP_CSUM		0x1000
+#define RXD_FLAG_TCPUDP_CSUM		0x2000
+#define RXD_FLAG_IS_TCP			0x4000
+
+	u32				ip_tcp_csum;
+#define RXD_IPCSUM_MASK		0xffff0000
+#define RXD_IPCSUM_SHIFT	16
+#define RXD_TCPCSUM_MASK	0x0000ffff
+#define RXD_TCPCSUM_SHIFT	0
+
+	u32				err_vlan;
+
+#define RXD_VLAN_MASK			0x0000ffff
+
+#define RXD_ERR_BAD_CRC			0x00010000
+#define RXD_ERR_COLLISION		0x00020000
+#define RXD_ERR_LINK_LOST		0x00040000
+#define RXD_ERR_PHY_DECODE		0x00080000
+#define RXD_ERR_ODD_NIBBLE_RCVD_MII	0x00100000
+#define RXD_ERR_MAC_ABRT		0x00200000
+#define RXD_ERR_TOO_SMALL		0x00400000
+#define RXD_ERR_NO_RESOURCES		0x00800000
+#define RXD_ERR_HUGE_FRAME		0x01000000
+#define RXD_ERR_MASK			0xffff0000
+
+	u32				reserved;
+	u32				opaque;
+#define RXD_OPAQUE_INDEX_MASK		0x0000ffff
+#define RXD_OPAQUE_INDEX_SHIFT		0
+#define RXD_OPAQUE_RING_STD		0x00010000
+#define RXD_OPAQUE_RING_JUMBO		0x00020000
+#define RXD_OPAQUE_RING_MINI		0x00040000
+#define RXD_OPAQUE_RING_MASK		0x00070000
+};
+
+struct tg3_ext_rx_buffer_desc {
+	struct {
+		u32			addr_hi;
+		u32			addr_lo;
+	}				addrlist[3];
+	u32				len2_len1;
+	u32				resv_len3;
+	struct tg3_rx_buffer_desc	std;
+};
+
+/* We only use this when testing out the DMA engine
+ * at probe time.  This is the internal format of buffer
+ * descriptors used by the chip at NIC_SRAM_DMA_DESCS.
+ */
+struct tg3_internal_buffer_desc {
+	u32				addr_hi;
+	u32				addr_lo;
+	u32				nic_mbuf;
+	/* XXX FIX THIS */
+#ifdef __BIG_ENDIAN
+	u16				cqid_sqid;
+	u16				len;
+#else
+	u16				len;
+	u16				cqid_sqid;
+#endif
+	u32				flags;
+	u32				__cookie1;
+	u32				__cookie2;
+	u32				__cookie3;
+};
+
+#define TG3_HW_STATUS_SIZE		0x50
+struct tg3_hw_status {
+	u32				status;
+#define SD_STATUS_UPDATED		0x00000001
+#define SD_STATUS_LINK_CHG		0x00000002
+#define SD_STATUS_ERROR			0x00000004
+
+	u32				status_tag;
+
+#ifdef __BIG_ENDIAN
+	u16				rx_consumer;
+	u16				rx_jumbo_consumer;
+#else
+	u16				rx_jumbo_consumer;
+	u16				rx_consumer;
+#endif
+
+#ifdef __BIG_ENDIAN
+	u16				reserved;
+	u16				rx_mini_consumer;
+#else
+	u16				rx_mini_consumer;
+	u16				reserved;
+#endif
+	struct {
+#ifdef __BIG_ENDIAN
+		u16			tx_consumer;
+		u16			rx_producer;
+#else
+		u16			rx_producer;
+		u16			tx_consumer;
+#endif
+	}				idx[16];
+};
+
+typedef struct {
+	u32 high, low;
+} tg3_stat64_t;
+
+struct tg3_hw_stats {
+	u8				__reserved0[0x400-0x300];
+
+	/* Statistics maintained by Receive MAC. */
+	tg3_stat64_t			rx_octets;
+	u64				__reserved1;
+	tg3_stat64_t			rx_fragments;
+	tg3_stat64_t			rx_ucast_packets;
+	tg3_stat64_t			rx_mcast_packets;
+	tg3_stat64_t			rx_bcast_packets;
+	tg3_stat64_t			rx_fcs_errors;
+	tg3_stat64_t			rx_align_errors;
+	tg3_stat64_t			rx_xon_pause_rcvd;
+	tg3_stat64_t			rx_xoff_pause_rcvd;
+	tg3_stat64_t			rx_mac_ctrl_rcvd;
+	tg3_stat64_t			rx_xoff_entered;
+	tg3_stat64_t			rx_frame_too_long_errors;
+	tg3_stat64_t			rx_jabbers;
+	tg3_stat64_t			rx_undersize_packets;
+	tg3_stat64_t			rx_in_length_errors;
+	tg3_stat64_t			rx_out_length_errors;
+	tg3_stat64_t			rx_64_or_less_octet_packets;
+	tg3_stat64_t			rx_65_to_127_octet_packets;
+	tg3_stat64_t			rx_128_to_255_octet_packets;
+	tg3_stat64_t			rx_256_to_511_octet_packets;
+	tg3_stat64_t			rx_512_to_1023_octet_packets;
+	tg3_stat64_t			rx_1024_to_1522_octet_packets;
+	tg3_stat64_t			rx_1523_to_2047_octet_packets;
+	tg3_stat64_t			rx_2048_to_4095_octet_packets;
+	tg3_stat64_t			rx_4096_to_8191_octet_packets;
+	tg3_stat64_t			rx_8192_to_9022_octet_packets;
+
+	u64				__unused0[37];
+
+	/* Statistics maintained by Transmit MAC. */
+	tg3_stat64_t			tx_octets;
+	u64				__reserved2;
+	tg3_stat64_t			tx_collisions;
+	tg3_stat64_t			tx_xon_sent;
+	tg3_stat64_t			tx_xoff_sent;
+	tg3_stat64_t			tx_flow_control;
+	tg3_stat64_t			tx_mac_errors;
+	tg3_stat64_t			tx_single_collisions;
+	tg3_stat64_t			tx_mult_collisions;
+	tg3_stat64_t			tx_deferred;
+	u64				__reserved3;
+	tg3_stat64_t			tx_excessive_collisions;
+	tg3_stat64_t			tx_late_collisions;
+	tg3_stat64_t			tx_collide_2times;
+	tg3_stat64_t			tx_collide_3times;
+	tg3_stat64_t			tx_collide_4times;
+	tg3_stat64_t			tx_collide_5times;
+	tg3_stat64_t			tx_collide_6times;
+	tg3_stat64_t			tx_collide_7times;
+	tg3_stat64_t			tx_collide_8times;
+	tg3_stat64_t			tx_collide_9times;
+	tg3_stat64_t			tx_collide_10times;
+	tg3_stat64_t			tx_collide_11times;
+	tg3_stat64_t			tx_collide_12times;
+	tg3_stat64_t			tx_collide_13times;
+	tg3_stat64_t			tx_collide_14times;
+	tg3_stat64_t			tx_collide_15times;
+	tg3_stat64_t			tx_ucast_packets;
+	tg3_stat64_t			tx_mcast_packets;
+	tg3_stat64_t			tx_bcast_packets;
+	tg3_stat64_t			tx_carrier_sense_errors;
+	tg3_stat64_t			tx_discards;
+	tg3_stat64_t			tx_errors;
+
+	u64				__unused1[31];
+
+	/* Statistics maintained by Receive List Placement. */
+	tg3_stat64_t			COS_rx_packets[16];
+	tg3_stat64_t			COS_rx_filter_dropped;
+	tg3_stat64_t			dma_writeq_full;
+	tg3_stat64_t			dma_write_prioq_full;
+	tg3_stat64_t			rxbds_empty;
+	tg3_stat64_t			rx_discards;
+	tg3_stat64_t			rx_errors;
+	tg3_stat64_t			rx_threshold_hit;
+
+	u64				__unused2[9];
+
+	/* Statistics maintained by Send Data Initiator. */
+	tg3_stat64_t			COS_out_packets[16];
+	tg3_stat64_t			dma_readq_full;
+	tg3_stat64_t			dma_read_prioq_full;
+	tg3_stat64_t			tx_comp_queue_full;
+
+	/* Statistics maintained by Host Coalescing. */
+	tg3_stat64_t			ring_set_send_prod_index;
+	tg3_stat64_t			ring_status_update;
+	tg3_stat64_t			nic_irqs;
+	tg3_stat64_t			nic_avoided_irqs;
+	tg3_stat64_t			nic_tx_threshold_hit;
+
+	/* NOT a part of the hardware statistics block format.
+	 * These stats are here as storage for tg3_periodic_fetch_stats().
+	 */
+	tg3_stat64_t			mbuf_lwm_thresh_hit;
+
+	u8				__reserved4[0xb00-0x9c8];
+};
+
+/* 'mapping' is superfluous as the chip does not write into
+ * the tx/rx post rings so we could just fetch it from there.
+ * But the cache behavior is better how we are doing it now.
+ */
+struct ring_info {
+	struct sk_buff			*skb;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+};
+
+struct tg3_tx_ring_info {
+	struct sk_buff			*skb;
+	DEFINE_DMA_UNMAP_ADDR(mapping);
+	bool				fragmented;
+};
+
+struct tg3_link_config {
+	/* Describes what we're trying to get. */
+	u32				advertising;
+	u16				speed;
+	u8				duplex;
+	u8				autoneg;
+	u8				flowctrl;
+
+	/* Describes what we actually have. */
+	u8				active_flowctrl;
+
+	u8				active_duplex;
+#define SPEED_INVALID		0xffff
+#define DUPLEX_INVALID		0xff
+#define AUTONEG_INVALID		0xff
+	u16				active_speed;
+
+	/* When we go in and out of low power mode we need
+	 * to swap with this state.
+	 */
+	u16				orig_speed;
+	u8				orig_duplex;
+	u8				orig_autoneg;
+	u32				orig_advertising;
+};
+
+struct tg3_bufmgr_config {
+	u32		mbuf_read_dma_low_water;
+	u32		mbuf_mac_rx_low_water;
+	u32		mbuf_high_water;
+
+	u32		mbuf_read_dma_low_water_jumbo;
+	u32		mbuf_mac_rx_low_water_jumbo;
+	u32		mbuf_high_water_jumbo;
+
+	u32		dma_low_water;
+	u32		dma_high_water;
+};
+
+struct tg3_ethtool_stats {
+	/* Statistics maintained by Receive MAC. */
+	u64		rx_octets;
+	u64		rx_fragments;
+	u64		rx_ucast_packets;
+	u64		rx_mcast_packets;
+	u64		rx_bcast_packets;
+	u64		rx_fcs_errors;
+	u64		rx_align_errors;
+	u64		rx_xon_pause_rcvd;
+	u64		rx_xoff_pause_rcvd;
+	u64		rx_mac_ctrl_rcvd;
+	u64		rx_xoff_entered;
+	u64		rx_frame_too_long_errors;
+	u64		rx_jabbers;
+	u64		rx_undersize_packets;
+	u64		rx_in_length_errors;
+	u64		rx_out_length_errors;
+	u64		rx_64_or_less_octet_packets;
+	u64		rx_65_to_127_octet_packets;
+	u64		rx_128_to_255_octet_packets;
+	u64		rx_256_to_511_octet_packets;
+	u64		rx_512_to_1023_octet_packets;
+	u64		rx_1024_to_1522_octet_packets;
+	u64		rx_1523_to_2047_octet_packets;
+	u64		rx_2048_to_4095_octet_packets;
+	u64		rx_4096_to_8191_octet_packets;
+	u64		rx_8192_to_9022_octet_packets;
+
+	/* Statistics maintained by Transmit MAC. */
+	u64		tx_octets;
+	u64		tx_collisions;
+	u64		tx_xon_sent;
+	u64		tx_xoff_sent;
+	u64		tx_flow_control;
+	u64		tx_mac_errors;
+	u64		tx_single_collisions;
+	u64		tx_mult_collisions;
+	u64		tx_deferred;
+	u64		tx_excessive_collisions;
+	u64		tx_late_collisions;
+	u64		tx_collide_2times;
+	u64		tx_collide_3times;
+	u64		tx_collide_4times;
+	u64		tx_collide_5times;
+	u64		tx_collide_6times;
+	u64		tx_collide_7times;
+	u64		tx_collide_8times;
+	u64		tx_collide_9times;
+	u64		tx_collide_10times;
+	u64		tx_collide_11times;
+	u64		tx_collide_12times;
+	u64		tx_collide_13times;
+	u64		tx_collide_14times;
+	u64		tx_collide_15times;
+	u64		tx_ucast_packets;
+	u64		tx_mcast_packets;
+	u64		tx_bcast_packets;
+	u64		tx_carrier_sense_errors;
+	u64		tx_discards;
+	u64		tx_errors;
+
+	/* Statistics maintained by Receive List Placement. */
+	u64		dma_writeq_full;
+	u64		dma_write_prioq_full;
+	u64		rxbds_empty;
+	u64		rx_discards;
+	u64		rx_errors;
+	u64		rx_threshold_hit;
+
+	/* Statistics maintained by Send Data Initiator. */
+	u64		dma_readq_full;
+	u64		dma_read_prioq_full;
+	u64		tx_comp_queue_full;
+
+	/* Statistics maintained by Host Coalescing. */
+	u64		ring_set_send_prod_index;
+	u64		ring_status_update;
+	u64		nic_irqs;
+	u64		nic_avoided_irqs;
+	u64		nic_tx_threshold_hit;
+
+	u64		mbuf_lwm_thresh_hit;
+};
+
+struct tg3_rx_prodring_set {
+	u32				rx_std_prod_idx;
+	u32				rx_std_cons_idx;
+	u32				rx_jmb_prod_idx;
+	u32				rx_jmb_cons_idx;
+	struct tg3_rx_buffer_desc	*rx_std;
+	struct tg3_ext_rx_buffer_desc	*rx_jmb;
+	struct ring_info		*rx_std_buffers;
+	struct ring_info		*rx_jmb_buffers;
+	dma_addr_t			rx_std_mapping;
+	dma_addr_t			rx_jmb_mapping;
+};
+
+#define TG3_IRQ_MAX_VECS_RSS		5
+#define TG3_IRQ_MAX_VECS		TG3_IRQ_MAX_VECS_RSS
+
+struct tg3_napi {
+	struct napi_struct		napi	____cacheline_aligned;
+	struct tg3			*tp;
+	struct tg3_hw_status		*hw_status;
+
+	u32				chk_msi_cnt;
+	u32				last_tag;
+	u32				last_irq_tag;
+	u32				int_mbox;
+	u32				coal_now;
+
+	u32				consmbox ____cacheline_aligned;
+	u32				rx_rcb_ptr;
+	u32				last_rx_cons;
+	u16				*rx_rcb_prod_idx;
+	struct tg3_rx_prodring_set	prodring;
+	struct tg3_rx_buffer_desc	*rx_rcb;
+
+	u32				tx_prod	____cacheline_aligned;
+	u32				tx_cons;
+	u32				tx_pending;
+	u32				last_tx_cons;
+	u32				prodmbox;
+	struct tg3_tx_buffer_desc	*tx_ring;
+	struct tg3_tx_ring_info		*tx_buffers;
+
+	dma_addr_t			status_mapping;
+	dma_addr_t			rx_rcb_mapping;
+	dma_addr_t			tx_desc_mapping;
+
+	char				irq_lbl[IFNAMSIZ];
+	unsigned int			irq_vec;
+};
+
+enum TG3_FLAGS {
+	TG3_FLAG_TAGGED_STATUS = 0,
+	TG3_FLAG_TXD_MBOX_HWBUG,
+	TG3_FLAG_USE_LINKCHG_REG,
+	TG3_FLAG_ERROR_PROCESSED,
+	TG3_FLAG_ENABLE_ASF,
+	TG3_FLAG_ASPM_WORKAROUND,
+	TG3_FLAG_POLL_SERDES,
+	TG3_FLAG_MBOX_WRITE_REORDER,
+	TG3_FLAG_PCIX_TARGET_HWBUG,
+	TG3_FLAG_WOL_SPEED_100MB,
+	TG3_FLAG_WOL_ENABLE,
+	TG3_FLAG_EEPROM_WRITE_PROT,
+	TG3_FLAG_NVRAM,
+	TG3_FLAG_NVRAM_BUFFERED,
+	TG3_FLAG_SUPPORT_MSI,
+	TG3_FLAG_SUPPORT_MSIX,
+	TG3_FLAG_PCIX_MODE,
+	TG3_FLAG_PCI_HIGH_SPEED,
+	TG3_FLAG_PCI_32BIT,
+	TG3_FLAG_SRAM_USE_CONFIG,
+	TG3_FLAG_TX_RECOVERY_PENDING,
+	TG3_FLAG_WOL_CAP,
+	TG3_FLAG_JUMBO_RING_ENABLE,
+	TG3_FLAG_PAUSE_AUTONEG,
+	TG3_FLAG_CPMU_PRESENT,
+	TG3_FLAG_40BIT_DMA_BUG,
+	TG3_FLAG_BROKEN_CHECKSUMS,
+	TG3_FLAG_JUMBO_CAPABLE,
+	TG3_FLAG_CHIP_RESETTING,
+	TG3_FLAG_INIT_COMPLETE,
+	TG3_FLAG_RESTART_TIMER,
+	TG3_FLAG_TSO_BUG,
+	TG3_FLAG_IS_5788,
+	TG3_FLAG_MAX_RXPEND_64,
+	TG3_FLAG_TSO_CAPABLE,
+	TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
+	TG3_FLAG_ASF_NEW_HANDSHAKE,
+	TG3_FLAG_HW_AUTONEG,
+	TG3_FLAG_IS_NIC,
+	TG3_FLAG_FLASH,
+	TG3_FLAG_HW_TSO_1,
+	TG3_FLAG_5705_PLUS,
+	TG3_FLAG_5750_PLUS,
+	TG3_FLAG_HW_TSO_3,
+	TG3_FLAG_USING_MSI,
+	TG3_FLAG_USING_MSIX,
+	TG3_FLAG_ICH_WORKAROUND,
+	TG3_FLAG_5780_CLASS,
+	TG3_FLAG_HW_TSO_2,
+	TG3_FLAG_1SHOT_MSI,
+	TG3_FLAG_NO_FWARE_REPORTED,
+	TG3_FLAG_NO_NVRAM_ADDR_TRANS,
+	TG3_FLAG_ENABLE_APE,
+	TG3_FLAG_PROTECTED_NVRAM,
+	TG3_FLAG_5701_DMA_BUG,
+	TG3_FLAG_USE_PHYLIB,
+	TG3_FLAG_MDIOBUS_INITED,
+	TG3_FLAG_LRG_PROD_RING_CAP,
+	TG3_FLAG_RGMII_INBAND_DISABLE,
+	TG3_FLAG_RGMII_EXT_IBND_RX_EN,
+	TG3_FLAG_RGMII_EXT_IBND_TX_EN,
+	TG3_FLAG_CLKREQ_BUG,
+	TG3_FLAG_5755_PLUS,
+	TG3_FLAG_NO_NVRAM,
+	TG3_FLAG_ENABLE_RSS,
+	TG3_FLAG_ENABLE_TSS,
+	TG3_FLAG_SHORT_DMA_BUG,
+	TG3_FLAG_USE_JUMBO_BDFLAG,
+	TG3_FLAG_L1PLLPD_EN,
+	TG3_FLAG_57765_PLUS,
+	TG3_FLAG_APE_HAS_NCSI,
+	TG3_FLAG_5717_PLUS,
+	TG3_FLAG_4K_FIFO_LIMIT,
+
+	/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
+	TG3_FLAG_NUMBER_OF_FLAGS,	/* Last entry in enum TG3_FLAGS */
+};
+
+struct tg3 {
+	/* begin "general, frequently-used members" cacheline section */
+
+	/* If the IRQ handler (which runs lockless) needs to be
+	 * quiesced, the following bitmask state is used.  The
+	 * SYNC flag is set by non-IRQ context code to initiate
+	 * the quiescence.
+	 *
+	 * When the IRQ handler notices that SYNC is set, it
+	 * disables interrupts and returns.
+	 *
+	 * When all outstanding IRQ handlers have returned after
+	 * the SYNC flag has been set, the setter can be assured
+	 * that interrupts will no longer get run.
+	 *
+	 * In this way all SMP driver locks are never acquired
+	 * in hw IRQ context, only sw IRQ context or lower.
+	 */
+	unsigned int			irq_sync;
+
+	/* SMP locking strategy:
+	 *
+	 * lock: Held during reset, PHY access, timer, and when
+	 *       updating tg3_flags.
+	 *
+	 * netif_tx_lock: Held during tg3_start_xmit. tg3_tx holds
+	 *                netif_tx_lock when it needs to call
+	 *                netif_wake_queue.
+	 *
+	 * Both of these locks are to be held with BH safety.
+	 *
+	 * Because the IRQ handler, tg3_poll, and tg3_start_xmit
+	 * are running lockless, it is necessary to completely
+	 * quiesce the chip with tg3_netif_stop and tg3_full_lock
+	 * before reconfiguring the device.
+	 *
+	 * indirect_lock: Held when accessing registers indirectly
+	 *                with IRQ disabling.
+	 */
+	spinlock_t			lock;
+	spinlock_t			indirect_lock;
+
+	u32				(*read32) (struct tg3 *, u32);
+	void				(*write32) (struct tg3 *, u32, u32);
+	u32				(*read32_mbox) (struct tg3 *, u32);
+	void				(*write32_mbox) (struct tg3 *, u32,
+							 u32);
+	void __iomem			*regs;
+	void __iomem			*aperegs;
+	struct net_device		*dev;
+	struct pci_dev			*pdev;
+
+	u32				coal_now;
+	u32				msg_enable;
+
+	/* begin "tx thread" cacheline section */
+	void				(*write32_tx_mbox) (struct tg3 *, u32,
+							    u32);
+
+	/* begin "rx thread" cacheline section */
+	struct tg3_napi			napi[TG3_IRQ_MAX_VECS];
+	void				(*write32_rx_mbox) (struct tg3 *, u32,
+							    u32);
+	u32				rx_copy_thresh;
+	u32				rx_std_ring_mask;
+	u32				rx_jmb_ring_mask;
+	u32				rx_ret_ring_mask;
+	u32				rx_pending;
+	u32				rx_jumbo_pending;
+	u32				rx_std_max_post;
+	u32				rx_offset;
+	u32				rx_pkt_map_sz;
+
+
+	/* begin "everything else" cacheline(s) section */
+	unsigned long			rx_dropped;
+	struct rtnl_link_stats64	net_stats_prev;
+	struct tg3_ethtool_stats	estats;
+	struct tg3_ethtool_stats	estats_prev;
+
+	DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS);
+
+	union {
+	unsigned long			phy_crc_errors;
+	unsigned long			last_event_jiffies;
+	};
+
+	struct timer_list		timer;
+	u16				timer_counter;
+	u16				timer_multiplier;
+	u32				timer_offset;
+	u16				asf_counter;
+	u16				asf_multiplier;
+
+	/* 1 second counter for transient serdes link events */
+	u32				serdes_counter;
+#define SERDES_AN_TIMEOUT_5704S		2
+#define SERDES_PARALLEL_DET_TIMEOUT	1
+#define SERDES_AN_TIMEOUT_5714S		1
+
+	struct tg3_link_config		link_config;
+	struct tg3_bufmgr_config	bufmgr_config;
+
+	/* cache h/w values, often passed straight to h/w */
+	u32				rx_mode;
+	u32				tx_mode;
+	u32				mac_mode;
+	u32				mi_mode;
+	u32				misc_host_ctrl;
+	u32				grc_mode;
+	u32				grc_local_ctrl;
+	u32				dma_rwctrl;
+	u32				coalesce_mode;
+	u32				pwrmgmt_thresh;
+
+	/* PCI block */
+	u32				pci_chip_rev_id;
+	u16				pci_cmd;
+	u8				pci_cacheline_sz;
+	u8				pci_lat_timer;
+
+	int				pci_fn;
+	int				pm_cap;
+	int				msi_cap;
+	int				pcix_cap;
+	int				pcie_readrq;
+
+	struct mii_bus			*mdio_bus;
+	int				mdio_irq[PHY_MAX_ADDR];
+
+	u8				phy_addr;
+
+	/* PHY info */
+	u32				phy_id;
+#define TG3_PHY_ID_MASK			0xfffffff0
+#define TG3_PHY_ID_BCM5400		0x60008040
+#define TG3_PHY_ID_BCM5401		0x60008050
+#define TG3_PHY_ID_BCM5411		0x60008070
+#define TG3_PHY_ID_BCM5701		0x60008110
+#define TG3_PHY_ID_BCM5703		0x60008160
+#define TG3_PHY_ID_BCM5704		0x60008190
+#define TG3_PHY_ID_BCM5705		0x600081a0
+#define TG3_PHY_ID_BCM5750		0x60008180
+#define TG3_PHY_ID_BCM5752		0x60008100
+#define TG3_PHY_ID_BCM5714		0x60008340
+#define TG3_PHY_ID_BCM5780		0x60008350
+#define TG3_PHY_ID_BCM5755		0xbc050cc0
+#define TG3_PHY_ID_BCM5787		0xbc050ce0
+#define TG3_PHY_ID_BCM5756		0xbc050ed0
+#define TG3_PHY_ID_BCM5784		0xbc050fa0
+#define TG3_PHY_ID_BCM5761		0xbc050fd0
+#define TG3_PHY_ID_BCM5718C		0x5c0d8a00
+#define TG3_PHY_ID_BCM5718S		0xbc050ff0
+#define TG3_PHY_ID_BCM57765		0x5c0d8a40
+#define TG3_PHY_ID_BCM5719C		0x5c0d8a20
+#define TG3_PHY_ID_BCM5720C		0x5c0d8b60
+#define TG3_PHY_ID_BCM5906		0xdc00ac40
+#define TG3_PHY_ID_BCM8002		0x60010140
+#define TG3_PHY_ID_INVALID		0xffffffff
+
+#define PHY_ID_RTL8211C			0x001cc910
+#define PHY_ID_RTL8201E			0x00008200
+
+#define TG3_PHY_ID_REV_MASK		0x0000000f
+#define TG3_PHY_REV_BCM5401_B0		0x1
+
+	/* This macro assumes the passed PHY ID is
+	 * already masked with TG3_PHY_ID_MASK.
+	 */
+#define TG3_KNOWN_PHY_ID(X)		\
+	((X) == TG3_PHY_ID_BCM5400 || (X) == TG3_PHY_ID_BCM5401 || \
+	 (X) == TG3_PHY_ID_BCM5411 || (X) == TG3_PHY_ID_BCM5701 || \
+	 (X) == TG3_PHY_ID_BCM5703 || (X) == TG3_PHY_ID_BCM5704 || \
+	 (X) == TG3_PHY_ID_BCM5705 || (X) == TG3_PHY_ID_BCM5750 || \
+	 (X) == TG3_PHY_ID_BCM5752 || (X) == TG3_PHY_ID_BCM5714 || \
+	 (X) == TG3_PHY_ID_BCM5780 || (X) == TG3_PHY_ID_BCM5787 || \
+	 (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \
+	 (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \
+	 (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \
+	 (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM5719C || \
+	 (X) == TG3_PHY_ID_BCM8002)
+
+	u32				phy_flags;
+#define TG3_PHYFLG_IS_LOW_POWER		0x00000001
+#define TG3_PHYFLG_IS_CONNECTED		0x00000002
+#define TG3_PHYFLG_USE_MI_INTERRUPT	0x00000004
+#define TG3_PHYFLG_PHY_SERDES		0x00000010
+#define TG3_PHYFLG_MII_SERDES		0x00000020
+#define TG3_PHYFLG_ANY_SERDES		(TG3_PHYFLG_PHY_SERDES |	\
+					TG3_PHYFLG_MII_SERDES)
+#define TG3_PHYFLG_IS_FET		0x00000040
+#define TG3_PHYFLG_10_100_ONLY		0x00000080
+#define TG3_PHYFLG_ENABLE_APD		0x00000100
+#define TG3_PHYFLG_CAPACITIVE_COUPLING	0x00000200
+#define TG3_PHYFLG_NO_ETH_WIRE_SPEED	0x00000400
+#define TG3_PHYFLG_JITTER_BUG		0x00000800
+#define TG3_PHYFLG_ADJUST_TRIM		0x00001000
+#define TG3_PHYFLG_ADC_BUG		0x00002000
+#define TG3_PHYFLG_5704_A0_BUG		0x00004000
+#define TG3_PHYFLG_BER_BUG		0x00008000
+#define TG3_PHYFLG_SERDES_PREEMPHASIS	0x00010000
+#define TG3_PHYFLG_PARALLEL_DETECT	0x00020000
+#define TG3_PHYFLG_EEE_CAP		0x00040000
+
+	u32				led_ctrl;
+	u32				phy_otp;
+	u32				setlpicnt;
+
+#define TG3_BPN_SIZE			24
+	char				board_part_number[TG3_BPN_SIZE];
+#define TG3_VER_SIZE			ETHTOOL_FWVERS_LEN
+	char				fw_ver[TG3_VER_SIZE];
+	u32				nic_sram_data_cfg;
+	u32				pci_clock_ctrl;
+	struct pci_dev			*pdev_peer;
+
+	struct tg3_hw_stats		*hw_stats;
+	dma_addr_t			stats_mapping;
+	struct work_struct		reset_task;
+
+	int				nvram_lock_cnt;
+	u32				nvram_size;
+#define TG3_NVRAM_SIZE_2KB		0x00000800
+#define TG3_NVRAM_SIZE_64KB		0x00010000
+#define TG3_NVRAM_SIZE_128KB		0x00020000
+#define TG3_NVRAM_SIZE_256KB		0x00040000
+#define TG3_NVRAM_SIZE_512KB		0x00080000
+#define TG3_NVRAM_SIZE_1MB		0x00100000
+#define TG3_NVRAM_SIZE_2MB		0x00200000
+
+	u32				nvram_pagesize;
+	u32				nvram_jedecnum;
+
+#define JEDEC_ATMEL			0x1f
+#define JEDEC_ST			0x20
+#define JEDEC_SAIFUN			0x4f
+#define JEDEC_SST			0xbf
+
+#define ATMEL_AT24C02_CHIP_SIZE		TG3_NVRAM_SIZE_2KB
+#define ATMEL_AT24C02_PAGE_SIZE		(8)
+
+#define ATMEL_AT24C64_CHIP_SIZE		TG3_NVRAM_SIZE_64KB
+#define ATMEL_AT24C64_PAGE_SIZE		(32)
+
+#define ATMEL_AT24C512_CHIP_SIZE	TG3_NVRAM_SIZE_512KB
+#define ATMEL_AT24C512_PAGE_SIZE	(128)
+
+#define ATMEL_AT45DB0X1B_PAGE_POS	9
+#define ATMEL_AT45DB0X1B_PAGE_SIZE	264
+
+#define ATMEL_AT25F512_PAGE_SIZE	256
+
+#define ST_M45PEX0_PAGE_SIZE		256
+
+#define SAIFUN_SA25F0XX_PAGE_SIZE	256
+
+#define SST_25VF0X0_PAGE_SIZE		4098
+
+	unsigned int			irq_max;
+	unsigned int			irq_cnt;
+
+	struct ethtool_coalesce		coal;
+
+	/* firmware info */
+	const char			*fw_needed;
+	const struct firmware		*fw;
+	u32				fw_len; /* includes BSS */
+};
+
+#endif /* !(_T3_H) */