New 7.0 FW: bnx2x, cnic, bnx2i, bnx2fc

New FW/HSI (7.0):
 - Added support to 578xx chips
 - Improved HSI - much less driver's direct access to the FW internal
   memory needed.

New implementation of the HSI handling layer in the bnx2x (bnx2x_sp.c):
 - Introduced chip dependent objects that have chip independent interfaces
   for configuration of MACs, multicast addresses, Rx mode, indirection table,
   fast path queues and function initialization/cleanup.
 - Objects functionality is based on the private function pointers, which
   allows not only a per-chip but also PF/VF differentiation while still
   preserving the same interface towards the driver.
 - Objects interface is not influenced by the HSI changes which do not require
   providing new parameters keeping the code outside the bnx2x_sp.c invariant
   with regard to such HSI chnages.

Changes in a CNIC, bnx2fc and bnx2i modules due to the new HSI.

Signed-off-by: Vladislav Zolotarov <vladz@broadcom.com>
Signed-off-by: Michael Chan <mchan@broadcom.com>
Signed-off-by: Bhanu Prakash Gollapudi <bprakash@broadcom.com>
Signed-off-by: Eilon Greenstein <eilong@broadcom.com>
Signed-off-by: David S. Miller <davem@conan.davemloft.net>
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index c72e1df..ebd8b1c 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -27,7 +27,7 @@
 #include "bnx2x_init.h"
 #include "bnx2x_sp.h"
 
-static int bnx2x_setup_irqs(struct bnx2x *bp);
+
 
 /**
  * bnx2x_bz_fp - zero content of the fastpath structure.
@@ -72,6 +72,8 @@
 	to_fp->napi = orig_napi;
 }
 
+int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
+
 /* free skb in the packet ring at pos idx
  * return idx of last bd freed
  */
@@ -88,8 +90,8 @@
 	/* prefetch skb end pointer to speedup dev_kfree_skb() */
 	prefetch(&skb->end);
 
-	DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
-	   idx, tx_buf, skb);
+	DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
+	   fp->index, idx, tx_buf, skb);
 
 	/* unmap first bd */
 	DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
@@ -97,6 +99,7 @@
 	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
 			 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
 
+
 	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
 #ifdef BNX2X_STOP_ON_ERROR
 	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
@@ -175,6 +178,9 @@
 	 * memory barrier, there is a small possibility that
 	 * start_xmit() will miss it and cause the queue to be stopped
 	 * forever.
+	 * On the other hand we need an rmb() here to ensure the proper
+	 * ordering of bit testing in the following
+	 * netif_tx_queue_stopped(txq) call.
 	 */
 	smp_mb();
 
@@ -226,7 +232,7 @@
 
 	/* First mark all used pages */
 	for (i = 0; i < sge_len; i++)
-		SGE_MASK_CLEAR_BIT(fp,
+		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
 			RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
 
 	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
@@ -238,8 +244,8 @@
 		le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
 
 	last_max = RX_SGE(fp->last_max_sge);
-	last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
-	first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
+	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
+	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
 
 	/* If ring is not full */
 	if (last_elem + 1 != first_elem)
@@ -250,8 +256,8 @@
 		if (likely(fp->sge_mask[i]))
 			break;
 
-		fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
-		delta += RX_SGE_MASK_ELEM_SZ;
+		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
+		delta += BIT_VEC64_ELEM_SZ;
 	}
 
 	if (delta > 0) {
@@ -266,33 +272,56 @@
 }
 
 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
-			    struct sk_buff *skb, u16 cons, u16 prod)
+			    struct sk_buff *skb, u16 cons, u16 prod,
+			    struct eth_fast_path_rx_cqe *cqe)
 {
 	struct bnx2x *bp = fp->bp;
 	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
 	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
 	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
 	dma_addr_t mapping;
+	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
 
-	/* move empty skb from pool to prod and map it */
-	prod_rx_buf->skb = fp->tpa_pool[queue].skb;
-	mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
-				 fp->rx_buf_size, DMA_FROM_DEVICE);
-	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
-
-	/* move partial skb from cons to pool (don't unmap yet) */
-	fp->tpa_pool[queue] = *cons_rx_buf;
-
-	/* mark bin state as start - print error if current state != stop */
-	if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
+	/* print error if current state != stop */
+	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
 		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
 
-	fp->tpa_state[queue] = BNX2X_TPA_START;
+	/* Try to map an empty skb from the aggregation info  */
+	mapping = dma_map_single(&bp->pdev->dev,
+				 first_buf->skb->data,
+				 fp->rx_buf_size, DMA_FROM_DEVICE);
+	/*
+	 *  ...if it fails - move the skb from the consumer to the producer
+	 *  and set the current aggregation state as ERROR to drop it
+	 *  when TPA_STOP arrives.
+	 */
 
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		/* Move the BD from the consumer to the producer */
+		bnx2x_reuse_rx_skb(fp, cons, prod);
+		tpa_info->tpa_state = BNX2X_TPA_ERROR;
+		return;
+	}
+
+	/* move empty skb from pool to prod */
+	prod_rx_buf->skb = first_buf->skb;
+	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
 	/* point prod_bd to new skb */
 	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 
+	/* move partial skb from cons to pool (don't unmap yet) */
+	*first_buf = *cons_rx_buf;
+
+	/* mark bin state as START */
+	tpa_info->parsing_flags =
+		le16_to_cpu(cqe->pars_flags.flags);
+	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
+	tpa_info->tpa_state = BNX2X_TPA_START;
+	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
+	tpa_info->placement_offset = cqe->placement_offset;
+
 #ifdef BNX2X_STOP_ON_ERROR
 	fp->tpa_queue_used |= (1 << queue);
 #ifdef _ASM_GENERIC_INT_L64_H
@@ -323,10 +352,17 @@
 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
 				    u16 len_on_bd)
 {
-	/* TPA arrgregation won't have an IP options and TCP options
-	 * other than timestamp.
+	/*
+	 * TPA arrgregation won't have either IP options or TCP options
+	 * other than timestamp or IPv6 extension headers.
 	 */
-	u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
+	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
+
+	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
+	    PRS_FLAG_OVERETH_IPV6)
+		hdrs_len += sizeof(struct ipv6hdr);
+	else /* IPv4 */
+		hdrs_len += sizeof(struct iphdr);
 
 
 	/* Check if there was a TCP timestamp, if there is it's will
@@ -341,30 +377,30 @@
 }
 
 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
-			       struct sk_buff *skb,
-			       struct eth_fast_path_rx_cqe *fp_cqe,
-			       u16 cqe_idx, u16 parsing_flags)
+			       u16 queue, struct sk_buff *skb,
+			       struct eth_end_agg_rx_cqe *cqe,
+			       u16 cqe_idx)
 {
 	struct sw_rx_page *rx_pg, old_rx_pg;
-	u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
 	u32 i, frag_len, frag_size, pages;
 	int err;
 	int j;
+	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+	u16 len_on_bd = tpa_info->len_on_bd;
 
-	frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
+	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
 	pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
 
 	/* This is needed in order to enable forwarding support */
 	if (frag_size)
-		skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
-							      len_on_bd);
+		skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
+					tpa_info->parsing_flags, len_on_bd);
 
 #ifdef BNX2X_STOP_ON_ERROR
 	if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
 		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
 			  pages, cqe_idx);
-		BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
-			  fp_cqe->pkt_len, len_on_bd);
+		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
 		bnx2x_panic();
 		return -EINVAL;
 	}
@@ -372,8 +408,7 @@
 
 	/* Run through the SGL and compose the fragmented skb */
 	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
-		u16 sge_idx =
-			RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
+		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
 
 		/* FW gives the indices of the SGE as if the ring is an array
 		   (meaning that "next" element will consume 2 indices) */
@@ -408,13 +443,28 @@
 }
 
 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
-			   u16 queue, int pad, int len, union eth_rx_cqe *cqe,
+			   u16 queue, struct eth_end_agg_rx_cqe *cqe,
 			   u16 cqe_idx)
 {
-	struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
+	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
+	u8 pad = tpa_info->placement_offset;
+	u16 len = tpa_info->len_on_bd;
 	struct sk_buff *skb = rx_buf->skb;
 	/* alloc new skb */
-	struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
+	struct sk_buff *new_skb;
+	u8 old_tpa_state = tpa_info->tpa_state;
+
+	tpa_info->tpa_state = BNX2X_TPA_STOP;
+
+	/* If we there was an error during the handling of the TPA_START -
+	 * drop this aggregation.
+	 */
+	if (old_tpa_state == BNX2X_TPA_ERROR)
+		goto drop;
+
+	/* Try to allocate the new skb */
+	new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
 
 	/* Unmap skb in the pool anyway, as we are going to change
 	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
@@ -423,11 +473,6 @@
 			 fp->rx_buf_size, DMA_FROM_DEVICE);
 
 	if (likely(new_skb)) {
-		/* fix ip xsum and give it to the stack */
-		/* (no need to map the new skb) */
-		u16 parsing_flags =
-			le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
-
 		prefetch(skb);
 		prefetch(((char *)(skb)) + L1_CACHE_BYTES);
 
@@ -447,21 +492,9 @@
 		skb->protocol = eth_type_trans(skb, bp->dev);
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 
-		{
-			struct iphdr *iph;
-
-			iph = (struct iphdr *)skb->data;
-			iph->check = 0;
-			iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
-		}
-
-		if (!bnx2x_fill_frag_skb(bp, fp, skb,
-					 &cqe->fast_path_cqe, cqe_idx,
-					 parsing_flags)) {
-			if (parsing_flags & PARSING_FLAGS_VLAN)
-				__vlan_hwaccel_put_tag(skb,
-						 le16_to_cpu(cqe->fast_path_cqe.
-							     vlan_tag));
+		if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
+			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
+				__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
 			napi_gro_receive(&fp->napi, skb);
 		} else {
 			DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
@@ -471,16 +504,16 @@
 
 
 		/* put new skb in bin */
-		fp->tpa_pool[queue].skb = new_skb;
+		rx_buf->skb = new_skb;
 
-	} else {
-		/* else drop the packet and keep the buffer in the bin */
-		DP(NETIF_MSG_RX_STATUS,
-		   "Failed to allocate new skb - dropping packet!\n");
-		fp->eth_q_stats.rx_skb_alloc_failed++;
+		return;
 	}
 
-	fp->tpa_state[queue] = BNX2X_TPA_STOP;
+drop:
+	/* drop the packet and keep the buffer in the bin */
+	DP(NETIF_MSG_RX_STATUS,
+	   "Failed to allocate or map a new skb - dropping packet!\n");
+	fp->eth_q_stats.rx_skb_alloc_failed++;
 }
 
 /* Set Toeplitz hash value in the skb using the value from the
@@ -534,9 +567,16 @@
 		struct sw_rx_bd *rx_buf = NULL;
 		struct sk_buff *skb;
 		union eth_rx_cqe *cqe;
+		struct eth_fast_path_rx_cqe *cqe_fp;
 		u8 cqe_fp_flags;
+		enum eth_rx_cqe_type cqe_fp_type;
 		u16 len, pad;
 
+#ifdef BNX2X_STOP_ON_ERROR
+		if (unlikely(bp->panic))
+			return 0;
+#endif
+
 		comp_ring_cons = RCQ_BD(sw_comp_cons);
 		bd_prod = RX_BD(bd_prod);
 		bd_cons = RX_BD(bd_cons);
@@ -549,17 +589,18 @@
 				  PAGE_SIZE + 1));
 
 		cqe = &fp->rx_comp_ring[comp_ring_cons];
-		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
+		cqe_fp = &cqe->fast_path_cqe;
+		cqe_fp_flags = cqe_fp->type_error_flags;
+		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
 
 		DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
 		   "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
-		   cqe_fp_flags, cqe->fast_path_cqe.status_flags,
-		   le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
-		   le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
-		   le16_to_cpu(cqe->fast_path_cqe.pkt_len));
+		   cqe_fp_flags, cqe_fp->status_flags,
+		   le32_to_cpu(cqe_fp->rss_hash_result),
+		   le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
 
 		/* is this a slowpath msg? */
-		if (unlikely(CQE_TYPE(cqe_fp_flags))) {
+		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
 			bnx2x_sp_event(fp, cqe);
 			goto next_cqe;
 
@@ -568,61 +609,59 @@
 			rx_buf = &fp->rx_buf_ring[bd_cons];
 			skb = rx_buf->skb;
 			prefetch(skb);
-			len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
-			pad = cqe->fast_path_cqe.placement_offset;
 
-			/* - If CQE is marked both TPA_START and TPA_END it is
-			 *   a non-TPA CQE.
-			 * - FP CQE will always have either TPA_START or/and
-			 *   TPA_STOP flags set.
-			 */
-			if ((!fp->disable_tpa) &&
-			    (TPA_TYPE(cqe_fp_flags) !=
-					(TPA_TYPE_START | TPA_TYPE_END))) {
-				u16 queue = cqe->fast_path_cqe.queue_index;
+			if (!CQE_TYPE_FAST(cqe_fp_type)) {
+#ifdef BNX2X_STOP_ON_ERROR
+				/* sanity check */
+				if (fp->disable_tpa &&
+				    (CQE_TYPE_START(cqe_fp_type) ||
+				     CQE_TYPE_STOP(cqe_fp_type)))
+					BNX2X_ERR("START/STOP packet while "
+						  "disable_tpa type %x\n",
+						  CQE_TYPE(cqe_fp_type));
+#endif
 
-				if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
+				if (CQE_TYPE_START(cqe_fp_type)) {
+					u16 queue = cqe_fp->queue_index;
 					DP(NETIF_MSG_RX_STATUS,
 					   "calling tpa_start on queue %d\n",
 					   queue);
 
 					bnx2x_tpa_start(fp, queue, skb,
-							bd_cons, bd_prod);
+							bd_cons, bd_prod,
+							cqe_fp);
 
-					/* Set Toeplitz hash for an LRO skb */
+					/* Set Toeplitz hash for LRO skb */
 					bnx2x_set_skb_rxhash(bp, cqe, skb);
 
 					goto next_rx;
-				} else { /* TPA_STOP */
+
+				} else {
+					u16 queue =
+						cqe->end_agg_cqe.queue_index;
 					DP(NETIF_MSG_RX_STATUS,
 					   "calling tpa_stop on queue %d\n",
 					   queue);
 
-					if (!BNX2X_RX_SUM_FIX(cqe))
-						BNX2X_ERR("STOP on none TCP "
-							  "data\n");
-
-					/* This is a size of the linear data
-					   on this skb */
-					len = le16_to_cpu(cqe->fast_path_cqe.
-								len_on_bd);
-					bnx2x_tpa_stop(bp, fp, queue, pad,
-						    len, cqe, comp_ring_cons);
+					bnx2x_tpa_stop(bp, fp, queue,
+						       &cqe->end_agg_cqe,
+						       comp_ring_cons);
 #ifdef BNX2X_STOP_ON_ERROR
 					if (bp->panic)
 						return 0;
 #endif
 
-					bnx2x_update_sge_prod(fp,
-							&cqe->fast_path_cqe);
+					bnx2x_update_sge_prod(fp, cqe_fp);
 					goto next_cqe;
 				}
 			}
-
+			/* non TPA */
+			len = le16_to_cpu(cqe_fp->pkt_len);
+			pad = cqe_fp->placement_offset;
 			dma_sync_single_for_device(&bp->pdev->dev,
 					dma_unmap_addr(rx_buf, mapping),
-						   pad + RX_COPY_THRESH,
-						   DMA_FROM_DEVICE);
+						       pad + RX_COPY_THRESH,
+						       DMA_FROM_DEVICE);
 			prefetch(((char *)(skb)) + L1_CACHE_BYTES);
 
 			/* is this an error packet? */
@@ -641,8 +680,7 @@
 			    (len <= RX_COPY_THRESH)) {
 				struct sk_buff *new_skb;
 
-				new_skb = netdev_alloc_skb(bp->dev,
-							   len + pad);
+				new_skb = netdev_alloc_skb(bp->dev, len + pad);
 				if (new_skb == NULL) {
 					DP(NETIF_MSG_RX_ERR,
 					   "ERROR  packet dropped "
@@ -688,6 +726,7 @@
 			skb_checksum_none_assert(skb);
 
 			if (bp->dev->features & NETIF_F_RXCSUM) {
+
 				if (likely(BNX2X_RX_CSUM_OK(cqe)))
 					skb->ip_summed = CHECKSUM_UNNECESSARY;
 				else
@@ -697,10 +736,10 @@
 
 		skb_record_rx_queue(skb, fp->index);
 
-		if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
-		     PARSING_FLAGS_VLAN)
+		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
+		    PARSING_FLAGS_VLAN)
 			__vlan_hwaccel_put_tag(skb,
-				le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
+					       le16_to_cpu(cqe_fp->vlan_tag));
 		napi_gro_receive(&fp->napi, skb);
 
 
@@ -926,7 +965,7 @@
 {
 	int func = BP_FUNC(bp);
 	int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
-					      ETH_MAX_AGGREGATION_QUEUES_E1H;
+					      ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
 	u16 ring_prod;
 	int i, j;
 
@@ -938,11 +977,16 @@
 		   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
 
 		if (!fp->disable_tpa) {
-			/* Fill the per-aggregation pool */
+			/* Fill the per-aggregtion pool */
 			for (i = 0; i < max_agg_queues; i++) {
-				fp->tpa_pool[i].skb =
-				   netdev_alloc_skb(bp->dev, fp->rx_buf_size);
-				if (!fp->tpa_pool[i].skb) {
+				struct bnx2x_agg_info *tpa_info =
+					&fp->tpa_info[i];
+				struct sw_rx_bd *first_buf =
+					&tpa_info->first_buf;
+
+				first_buf->skb = netdev_alloc_skb(bp->dev,
+						       fp->rx_buf_size);
+				if (!first_buf->skb) {
 					BNX2X_ERR("Failed to allocate TPA "
 						  "skb pool for queue[%d] - "
 						  "disabling TPA on this "
@@ -951,10 +995,8 @@
 					fp->disable_tpa = 1;
 					break;
 				}
-				dma_unmap_addr_set((struct sw_rx_bd *)
-							&bp->fp->tpa_pool[i],
-						   mapping, 0);
-				fp->tpa_state[i] = BNX2X_TPA_STOP;
+				dma_unmap_addr_set(first_buf, mapping, 0);
+				tpa_info->tpa_state = BNX2X_TPA_STOP;
 			}
 
 			/* "next page" elements initialization */
@@ -970,13 +1012,13 @@
 				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
 					BNX2X_ERR("was only able to allocate "
 						  "%d rx sges\n", i);
-					BNX2X_ERR("disabling TPA for"
-						  " queue[%d]\n", j);
+					BNX2X_ERR("disabling TPA for "
+						  "queue[%d]\n", j);
 					/* Cleanup already allocated elements */
-					bnx2x_free_rx_sge_range(bp,
-								fp, ring_prod);
-					bnx2x_free_tpa_pool(bp,
-							    fp, max_agg_queues);
+					bnx2x_free_rx_sge_range(bp, fp,
+								ring_prod);
+					bnx2x_free_tpa_pool(bp, fp,
+							    max_agg_queues);
 					fp->disable_tpa = 1;
 					ring_prod = 0;
 					break;
@@ -1004,7 +1046,7 @@
 		if (j != 0)
 			continue;
 
-		if (!CHIP_IS_E2(bp)) {
+		if (CHIP_IS_E1(bp)) {
 			REG_WR(bp, BAR_USTRORM_INTMEM +
 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
 			       U64_LO(fp->rx_comp_mapping));
@@ -1048,7 +1090,6 @@
 
 		if (skb == NULL)
 			continue;
-
 		dma_unmap_single(&bp->pdev->dev,
 				 dma_unmap_addr(rx_buf, mapping),
 				 fp->rx_buf_size, DMA_FROM_DEVICE);
@@ -1070,7 +1111,7 @@
 		if (!fp->disable_tpa)
 			bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
 					    ETH_MAX_AGGREGATION_QUEUES_E1 :
-					    ETH_MAX_AGGREGATION_QUEUES_E1H);
+					    ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
 	}
 }
 
@@ -1234,7 +1275,6 @@
 		}
 
 		offset++;
-		fp->state = BNX2X_FP_STATE_IRQ;
 	}
 
 	i = BNX2X_NUM_ETH_QUEUES(bp);
@@ -1274,25 +1314,46 @@
 
 	rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
 			 bp->dev->name, bp->dev);
-	if (!rc)
-		bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
-
 	return rc;
 }
 
-static void bnx2x_napi_enable(struct bnx2x *bp)
+static inline int bnx2x_setup_irqs(struct bnx2x *bp)
+{
+	int rc = 0;
+	if (bp->flags & USING_MSIX_FLAG) {
+		rc = bnx2x_req_msix_irqs(bp);
+		if (rc)
+			return rc;
+	} else {
+		bnx2x_ack_int(bp);
+		rc = bnx2x_req_irq(bp);
+		if (rc) {
+			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
+			return rc;
+		}
+		if (bp->flags & USING_MSI_FLAG) {
+			bp->dev->irq = bp->pdev->irq;
+			netdev_info(bp->dev, "using MSI  IRQ %d\n",
+			       bp->pdev->irq);
+		}
+	}
+
+	return 0;
+}
+
+static inline void bnx2x_napi_enable(struct bnx2x *bp)
 {
 	int i;
 
-	for_each_napi_queue(bp, i)
+	for_each_rx_queue(bp, i)
 		napi_enable(&bnx2x_fp(bp, i, napi));
 }
 
-static void bnx2x_napi_disable(struct bnx2x *bp)
+static inline void bnx2x_napi_disable(struct bnx2x *bp)
 {
 	int i;
 
-	for_each_napi_queue(bp, i)
+	for_each_rx_queue(bp, i)
 		napi_disable(&bnx2x_fp(bp, i, napi));
 }
 
@@ -1310,7 +1371,6 @@
 {
 	bnx2x_int_disable_sync(bp, disable_hw);
 	bnx2x_napi_disable(bp);
-	netif_tx_disable(bp->dev);
 }
 
 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
@@ -1361,26 +1421,6 @@
 	bp->num_queues += NONE_ETH_CONTEXT_USE;
 }
 
-#ifdef BCM_CNIC
-static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
-{
-	if (!NO_FCOE(bp)) {
-		if (!IS_MF_SD(bp))
-			bnx2x_set_fip_eth_mac_addr(bp, 1);
-		bnx2x_set_all_enode_macs(bp, 1);
-		bp->flags |= FCOE_MACS_SET;
-	}
-}
-#endif
-
-static void bnx2x_release_firmware(struct bnx2x *bp)
-{
-	kfree(bp->init_ops_offsets);
-	kfree(bp->init_ops);
-	kfree(bp->init_data);
-	release_firmware(bp->firmware);
-}
-
 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
 {
 	int rc, num = bp->num_queues;
@@ -1412,27 +1452,198 @@
 			 */
 			fp->rx_buf_size =
 				BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
-				BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+				BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
 		else
 			fp->rx_buf_size =
-				bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
-				IP_HEADER_ALIGNMENT_PADDING;
+				bp->dev->mtu + ETH_OVREHEAD +
+				BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
 	}
 }
 
+static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
+{
+	int i;
+	u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
+	u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
+
+	/*
+	 * Prepare the inital contents fo the indirection table if RSS is
+	 * enabled
+	 */
+	if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
+		for (i = 0; i < sizeof(ind_table); i++)
+			ind_table[i] =
+				bp->fp->cl_id +	(i % num_eth_queues);
+	}
+
+	/*
+	 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
+	 * per-port, so if explicit configuration is needed , do it only
+	 * for a PMF.
+	 *
+	 * For 57712 and newer on the other hand it's a per-function
+	 * configuration.
+	 */
+	return bnx2x_config_rss_pf(bp, ind_table,
+				   bp->port.pmf || !CHIP_IS_E1x(bp));
+}
+
+int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
+{
+	struct bnx2x_config_rss_params params = {0};
+	int i;
+
+	/* Although RSS is meaningless when there is a single HW queue we
+	 * still need it enabled in order to have HW Rx hash generated.
+	 *
+	 * if (!is_eth_multi(bp))
+	 *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
+	 */
+
+	params.rss_obj = &bp->rss_conf_obj;
+
+	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
+
+	/* RSS mode */
+	switch (bp->multi_mode) {
+	case ETH_RSS_MODE_DISABLED:
+		__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
+		break;
+	case ETH_RSS_MODE_REGULAR:
+		__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
+		break;
+	case ETH_RSS_MODE_VLAN_PRI:
+		__set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
+		break;
+	case ETH_RSS_MODE_E1HOV_PRI:
+		__set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
+		break;
+	case ETH_RSS_MODE_IP_DSCP:
+		__set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
+		break;
+	default:
+		BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
+		return -EINVAL;
+	}
+
+	/* If RSS is enabled */
+	if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
+		/* RSS configuration */
+		__set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
+		__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
+		__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
+		__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
+
+		/* Hash bits */
+		params.rss_result_mask = MULTI_MASK;
+
+		memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
+
+		if (config_hash) {
+			/* RSS keys */
+			for (i = 0; i < sizeof(params.rss_key) / 4; i++)
+				params.rss_key[i] = random32();
+
+			__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
+		}
+	}
+
+	return bnx2x_config_rss(bp, &params);
+}
+
+static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
+{
+	struct bnx2x_func_state_params func_params = {0};
+
+	/* Prepare parameters for function state transitions */
+	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
+
+	func_params.f_obj = &bp->func_obj;
+	func_params.cmd = BNX2X_F_CMD_HW_INIT;
+
+	func_params.params.hw_init.load_phase = load_code;
+
+	return bnx2x_func_state_change(bp, &func_params);
+}
+
+/*
+ * Cleans the object that have internal lists without sending
+ * ramrods. Should be run when interrutps are disabled.
+ */
+static void bnx2x_squeeze_objects(struct bnx2x *bp)
+{
+	int rc;
+	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
+	struct bnx2x_mcast_ramrod_params rparam = {0};
+	struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
+
+	/***************** Cleanup MACs' object first *************************/
+
+	/* Wait for completion of requested */
+	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+	/* Perform a dry cleanup */
+	__set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
+
+	/* Clean ETH primary MAC */
+	__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
+	rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
+				 &ramrod_flags);
+	if (rc != 0)
+		BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
+
+	/* Cleanup UC list */
+	vlan_mac_flags = 0;
+	__set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
+	rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
+				 &ramrod_flags);
+	if (rc != 0)
+		BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
+
+	/***************** Now clean mcast object *****************************/
+	rparam.mcast_obj = &bp->mcast_obj;
+	__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
+
+	/* Add a DEL command... */
+	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
+	if (rc < 0)
+		BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
+			  "object: %d\n", rc);
+
+	/* ...and wait until all pending commands are cleared */
+	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+	while (rc != 0) {
+		if (rc < 0) {
+			BNX2X_ERR("Failed to clean multi-cast object: %d\n",
+				  rc);
+			return;
+		}
+
+		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
+	}
+}
+
+#ifndef BNX2X_STOP_ON_ERROR
+#define LOAD_ERROR_EXIT(bp, label) \
+	do { \
+		(bp)->state = BNX2X_STATE_ERROR; \
+		goto label; \
+	} while (0)
+#else
+#define LOAD_ERROR_EXIT(bp, label) \
+	do { \
+		(bp)->state = BNX2X_STATE_ERROR; \
+		(bp)->panic = 1; \
+		return -EBUSY; \
+	} while (0)
+#endif
+
 /* must be called with rtnl_lock */
 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
 {
+	int port = BP_PORT(bp);
 	u32 load_code;
 	int i, rc;
 
-	/* Set init arrays */
-	rc = bnx2x_init_firmware(bp);
-	if (rc) {
-		BNX2X_ERR("Error loading firmware\n");
-		return rc;
-	}
-
 #ifdef BNX2X_STOP_ON_ERROR
 	if (unlikely(bp->panic))
 		return -EPERM;
@@ -1459,6 +1670,10 @@
 	/* Set the receive queues buffer size */
 	bnx2x_set_rx_buf_size(bp);
 
+	/*
+	 * set the tpa flag for each queue. The tpa flag determines the queue
+	 * minimal size so it must be set prior to queue memory allocation
+	 */
 	for_each_queue(bp, i)
 		bnx2x_fp(bp, i, disable_tpa) =
 					((bp->flags & TPA_ENABLE_FLAG) == 0);
@@ -1478,31 +1693,30 @@
 	rc = bnx2x_set_real_num_queues(bp);
 	if (rc) {
 		BNX2X_ERR("Unable to set real_num_queues\n");
-		goto load_error0;
+		LOAD_ERROR_EXIT(bp, load_error0);
 	}
 
 	bnx2x_napi_enable(bp);
 
 	/* Send LOAD_REQUEST command to MCP
-	   Returns the type of LOAD command:
-	   if it is the first port to be initialized
-	   common blocks should be initialized, otherwise - not
-	*/
+	 * Returns the type of LOAD command:
+	 * if it is the first port to be initialized
+	 * common blocks should be initialized, otherwise - not
+	 */
 	if (!BP_NOMCP(bp)) {
 		load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
 		if (!load_code) {
 			BNX2X_ERR("MCP response failure, aborting\n");
 			rc = -EBUSY;
-			goto load_error1;
+			LOAD_ERROR_EXIT(bp, load_error1);
 		}
 		if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
 			rc = -EBUSY; /* other port in diagnostic mode */
-			goto load_error1;
+			LOAD_ERROR_EXIT(bp, load_error1);
 		}
 
 	} else {
 		int path = BP_PATH(bp);
-		int port = BP_PORT(bp);
 
 		DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
 		   path, load_count[path][0], load_count[path][1],
@@ -1528,30 +1742,45 @@
 		bp->port.pmf = 0;
 	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
 
+	/* Init Function state controlling object */
+	bnx2x__init_func_obj(bp);
+
 	/* Initialize HW */
 	rc = bnx2x_init_hw(bp, load_code);
 	if (rc) {
 		BNX2X_ERR("HW init failed, aborting\n");
 		bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
-		goto load_error2;
+		LOAD_ERROR_EXIT(bp, load_error2);
 	}
 
 	/* Connect to IRQs */
 	rc = bnx2x_setup_irqs(bp);
 	if (rc) {
 		bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
-		goto load_error2;
+		LOAD_ERROR_EXIT(bp, load_error2);
 	}
 
 	/* Setup NIC internals and enable interrupts */
 	bnx2x_nic_init(bp, load_code);
 
+	/* Init per-function objects */
+	bnx2x_init_bp_objs(bp);
+
 	if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
 	    (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
-	    (bp->common.shmem2_base))
-		SHMEM2_WR(bp, dcc_support,
-			  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
-			   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+	    (bp->common.shmem2_base)) {
+		if (SHMEM2_HAS(bp, dcc_support))
+			SHMEM2_WR(bp, dcc_support,
+				  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
+				   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
+	}
+
+	bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
+	rc = bnx2x_func_start(bp);
+	if (rc) {
+		BNX2X_ERR("Function start failed!\n");
+		LOAD_ERROR_EXIT(bp, load_error3);
+	}
 
 	/* Send LOAD_DONE command to MCP */
 	if (!BP_NOMCP(bp)) {
@@ -1559,74 +1788,38 @@
 		if (!load_code) {
 			BNX2X_ERR("MCP response failure, aborting\n");
 			rc = -EBUSY;
-			goto load_error3;
+			LOAD_ERROR_EXIT(bp, load_error3);
 		}
 	}
 
-	bnx2x_dcbx_init(bp);
-
-	bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
-
-	rc = bnx2x_func_start(bp);
-	if (rc) {
-		BNX2X_ERR("Function start failed!\n");
-#ifndef BNX2X_STOP_ON_ERROR
-		goto load_error3;
-#else
-		bp->panic = 1;
-		return -EBUSY;
-#endif
-	}
-
-	rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
+	rc = bnx2x_setup_leading(bp);
 	if (rc) {
 		BNX2X_ERR("Setup leading failed!\n");
-#ifndef BNX2X_STOP_ON_ERROR
-		goto load_error3;
-#else
-		bp->panic = 1;
-		return -EBUSY;
-#endif
-	}
-
-	if (!CHIP_IS_E1(bp) &&
-	    (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
-		DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
-		bp->flags |= MF_FUNC_DIS;
+		LOAD_ERROR_EXIT(bp, load_error3);
 	}
 
 #ifdef BCM_CNIC
 	/* Enable Timer scan */
-	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
+	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
 #endif
 
 	for_each_nondefault_queue(bp, i) {
-		rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
+		rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
 		if (rc)
-#ifdef BCM_CNIC
-			goto load_error4;
-#else
-			goto load_error3;
-#endif
+			LOAD_ERROR_EXIT(bp, load_error4);
 	}
 
+	rc = bnx2x_init_rss_pf(bp);
+	if (rc)
+		LOAD_ERROR_EXIT(bp, load_error4);
+
 	/* Now when Clients are configured we are ready to work */
 	bp->state = BNX2X_STATE_OPEN;
 
-#ifdef BCM_CNIC
-	bnx2x_set_fcoe_eth_macs(bp);
-#endif
-
-	bnx2x_set_eth_mac(bp, 1);
-
-	/* Clear MC configuration */
-	if (CHIP_IS_E1(bp))
-		bnx2x_invalidate_e1_mc_list(bp);
-	else
-		bnx2x_invalidate_e1h_mc_list(bp);
-
-	/* Clear UC lists configuration */
-	bnx2x_invalidate_uc_list(bp);
+	/* Configure a ucast MAC */
+	rc = bnx2x_set_eth_mac(bp, true);
+	if (rc)
+		LOAD_ERROR_EXIT(bp, load_error4);
 
 	if (bp->pending_max) {
 		bnx2x_update_max_mf_config(bp, bp->pending_max);
@@ -1636,15 +1829,18 @@
 	if (bp->port.pmf)
 		bnx2x_initial_phy_init(bp, load_mode);
 
-	/* Initialize Rx filtering */
-	bnx2x_set_rx_mode(bp->dev);
-
 	/* Start fast path */
+
+	/* Initialize Rx filter. */
+	netif_addr_lock_bh(bp->dev);
+	bnx2x_set_rx_mode(bp->dev);
+	netif_addr_unlock_bh(bp->dev);
+
+	/* Start the Tx */
 	switch (load_mode) {
 	case LOAD_NORMAL:
 		/* Tx queue should be only reenabled */
 		netif_tx_wake_all_queues(bp->dev);
-		/* Initialize the receive filter. */
 		break;
 
 	case LOAD_OPEN:
@@ -1673,18 +1869,28 @@
 #endif
 	bnx2x_inc_load_cnt(bp);
 
-	bnx2x_release_firmware(bp);
+	/* Wait for all pending SP commands to complete */
+	if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
+		BNX2X_ERR("Timeout waiting for SP elements to complete\n");
+		bnx2x_nic_unload(bp, UNLOAD_CLOSE);
+		return -EBUSY;
+	}
 
+	bnx2x_dcbx_init(bp);
 	return 0;
 
-#ifdef BCM_CNIC
+#ifndef BNX2X_STOP_ON_ERROR
 load_error4:
+#ifdef BCM_CNIC
 	/* Disable Timer scan */
-	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
+	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
 #endif
 load_error3:
 	bnx2x_int_disable_sync(bp, 1);
 
+	/* Clean queueable objects */
+	bnx2x_squeeze_objects(bp);
+
 	/* Free SKBs, SGEs, TPA pool and driver internals */
 	bnx2x_free_skbs(bp);
 	for_each_rx_queue(bp, i)
@@ -1704,9 +1910,8 @@
 load_error0:
 	bnx2x_free_mem(bp);
 
-	bnx2x_release_firmware(bp);
-
 	return rc;
+#endif /* ! BNX2X_STOP_ON_ERROR */
 }
 
 /* must be called with rtnl_lock */
@@ -1728,18 +1933,19 @@
 	bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
 #endif
 	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
+	smp_mb();
 
-	/* Set "drop all" */
 	bp->rx_mode = BNX2X_RX_MODE_NONE;
-	bnx2x_set_storm_rx_mode(bp);
 
 	/* Stop Tx */
 	bnx2x_tx_disable(bp);
 
 	del_timer_sync(&bp->timer);
 
-	SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
-		 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
+	/* Set ALWAYS_ALIVE bit in shmem */
+	bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
+
+	bnx2x_drv_pulse(bp);
 
 	bnx2x_stats_handle(bp, STATS_EVENT_STOP);
 
@@ -1754,6 +1960,12 @@
 		bnx2x_free_irq(bp);
 	}
 
+	/*
+	 * At this stage no more interrupts will arrive so we may safly clean
+	 * the queueable objects here in case they failed to get cleaned so far.
+	 */
+	bnx2x_squeeze_objects(bp);
+
 	bp->port.pmf = 0;
 
 	/* Free SKBs, SGEs, TPA pool and driver internals */
@@ -2154,7 +2366,6 @@
 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
 	struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
 {
-
 	tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
 
 	if (xmit_type & XMIT_CSUM_V4)
@@ -2166,7 +2377,6 @@
 
 	if (!(xmit_type & XMIT_CSUM_TCP))
 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
-
 }
 
 /**
@@ -2234,7 +2444,7 @@
 	struct bnx2x_fastpath *fp;
 	struct netdev_queue *txq;
 	struct sw_tx_bd *tx_buf;
-	struct eth_tx_start_bd *tx_start_bd;
+	struct eth_tx_start_bd *tx_start_bd, *first_bd;
 	struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
 	struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
 	struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
@@ -2296,7 +2506,15 @@
 		}
 	}
 #endif
-
+	/* Map skb linear data for DMA */
+	mapping = dma_map_single(&bp->pdev->dev, skb->data,
+				 skb_headlen(skb), DMA_TO_DEVICE);
+	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+		DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
+		   "silently dropping this SKB\n");
+		dev_kfree_skb_any(skb);
+		return NETDEV_TX_OK;
+	}
 	/*
 	Please read carefully. First we use one BD which we mark as start,
 	then we have a parsing info BD (used for TSO or xsum),
@@ -2306,12 +2524,19 @@
 	And above all, all pdb sizes are in words - NOT DWORDS!
 	*/
 
-	pkt_prod = fp->tx_pkt_prod++;
+	/* get current pkt produced now - advance it just before sending packet
+	 * since mapping of pages may fail and cause packet to be dropped
+	 */
+	pkt_prod = fp->tx_pkt_prod;
 	bd_prod = TX_BD(fp->tx_bd_prod);
 
-	/* get a tx_buf and first BD */
+	/* get a tx_buf and first BD
+	 * tx_start_bd may be changed during SPLIT,
+	 * but first_bd will always stay first
+	 */
 	tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
 	tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
+	first_bd = tx_start_bd;
 
 	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
 	SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
@@ -2343,7 +2568,7 @@
 	if (xmit_type & XMIT_CSUM)
 		bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
 
-	if (CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1x(bp)) {
 		pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
 		memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
 		/* Set PBD in checksum offload case */
@@ -2351,6 +2576,20 @@
 			hlen = bnx2x_set_pbd_csum_e2(bp, skb,
 						     &pbd_e2_parsing_data,
 						     xmit_type);
+		if (IS_MF_SI(bp)) {
+			/*
+			 * fill in the MAC addresses in the PBD - for local
+			 * switching
+			 */
+			bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
+					      &pbd_e2->src_mac_addr_mid,
+					      &pbd_e2->src_mac_addr_lo,
+					      eth->h_source);
+			bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
+					      &pbd_e2->dst_mac_addr_mid,
+					      &pbd_e2->dst_mac_addr_lo,
+					      eth->h_dest);
+		}
 	} else {
 		pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
 		memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
@@ -2360,15 +2599,10 @@
 
 	}
 
-	/* Map skb linear data for DMA */
-	mapping = dma_map_single(&bp->pdev->dev, skb->data,
-				 skb_headlen(skb), DMA_TO_DEVICE);
-
 	/* Setup the data pointer of the first BD of the packet */
 	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
-	nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
-	tx_start_bd->nbd = cpu_to_le16(nbd);
+	nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
 	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
 	pkt_size = tx_start_bd->nbytes;
 
@@ -2391,7 +2625,7 @@
 		if (unlikely(skb_headlen(skb) > hlen))
 			bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
 						 hlen, bd_prod, ++nbd);
-		if (CHIP_IS_E2(bp))
+		if (!CHIP_IS_E1x(bp))
 			bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
 					     xmit_type);
 		else
@@ -2410,19 +2644,34 @@
 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
+		mapping = dma_map_page(&bp->pdev->dev, frag->page,
+				       frag->page_offset, frag->size,
+				       DMA_TO_DEVICE);
+		if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+
+			DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
+						"dropping packet...\n");
+
+			/* we need unmap all buffers already mapped
+			 * for this SKB;
+			 * first_bd->nbd need to be properly updated
+			 * before call to bnx2x_free_tx_pkt
+			 */
+			first_bd->nbd = cpu_to_le16(nbd);
+			bnx2x_free_tx_pkt(bp, fp, TX_BD(fp->tx_pkt_prod));
+			return NETDEV_TX_OK;
+		}
+
 		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 		tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
 		if (total_pkt_bd == NULL)
 			total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
 
-		mapping = dma_map_page(&bp->pdev->dev, frag->page,
-				       frag->page_offset,
-				       frag->size, DMA_TO_DEVICE);
-
 		tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
 		tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
 		tx_data_bd->nbytes = cpu_to_le16(frag->size);
 		le16_add_cpu(&pkt_size, frag->size);
+		nbd++;
 
 		DP(NETIF_MSG_TX_QUEUED,
 		   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
@@ -2432,6 +2681,9 @@
 
 	DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
 
+	/* update with actual num BDs */
+	first_bd->nbd = cpu_to_le16(nbd);
+
 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
 
 	/* now send a tx doorbell, counting the next BD
@@ -2440,6 +2692,13 @@
 	if (TX_BD_POFF(bd_prod) < nbd)
 		nbd++;
 
+	/* total_pkt_bytes should be set on the first data BD if
+	 * it's not an LSO packet and there is more than one
+	 * data BD. In this case pkt_size is limited by an MTU value.
+	 * However we prefer to set it for an LSO packet (while we don't
+	 * have to) in order to save some CPU cycles in a none-LSO
+	 * case, when we much more care about them.
+	 */
 	if (total_pkt_bd != NULL)
 		total_pkt_bd->total_pkt_bytes = pkt_size;
 
@@ -2460,6 +2719,7 @@
 		   pbd_e2->parsing_data);
 	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
 
+	fp->tx_pkt_prod++;
 	/*
 	 * Make sure that the BD data is updated before updating the producer
 	 * since FW might read the BD right after the producer is updated.
@@ -2500,15 +2760,23 @@
 {
 	struct sockaddr *addr = p;
 	struct bnx2x *bp = netdev_priv(dev);
+	int rc = 0;
 
 	if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
 		return -EINVAL;
 
-	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
-	if (netif_running(dev))
-		bnx2x_set_eth_mac(bp, 1);
+	if (netif_running(dev))  {
+		rc = bnx2x_set_eth_mac(bp, false);
+		if (rc)
+			return rc;
+	}
 
-	return 0;
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	if (netif_running(dev))
+		rc = bnx2x_set_eth_mac(bp, true);
+
+	return rc;
 }
 
 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
@@ -2525,7 +2793,7 @@
 	} else {
 #endif
 		/* status blocks */
-		if (CHIP_IS_E2(bp))
+		if (!CHIP_IS_E1x(bp))
 			BNX2X_PCI_FREE(sb->e2_sb,
 				       bnx2x_fp(bp, fp_index,
 						status_blk_mapping),
@@ -2581,7 +2849,7 @@
 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
 {
 	union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
-	if (CHIP_IS_E2(bp)) {
+	if (!CHIP_IS_E1x(bp)) {
 		bnx2x_fp(bp, index, sb_index_values) =
 			(__le16 *)status_blk.e2_sb->sb.index_values;
 		bnx2x_fp(bp, index, sb_running_index) =
@@ -2618,7 +2886,7 @@
 	if (!IS_FCOE_IDX(index)) {
 #endif
 		/* status blocks */
-		if (CHIP_IS_E2(bp))
+		if (!CHIP_IS_E1x(bp))
 			BNX2X_PCI_ALLOC(sb->e2_sb,
 				&bnx2x_fp(bp, index, status_blk_mapping),
 				sizeof(struct host_hc_status_block_e2));
@@ -2747,30 +3015,6 @@
 	return 0;
 }
 
-static int bnx2x_setup_irqs(struct bnx2x *bp)
-{
-	int rc = 0;
-	if (bp->flags & USING_MSIX_FLAG) {
-		rc = bnx2x_req_msix_irqs(bp);
-		if (rc)
-			return rc;
-	} else {
-		bnx2x_ack_int(bp);
-		rc = bnx2x_req_irq(bp);
-		if (rc) {
-			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
-			return rc;
-		}
-		if (bp->flags & USING_MSI_FLAG) {
-			bp->dev->irq = bp->pdev->irq;
-			netdev_info(bp->dev, "using MSI  IRQ %d\n",
-			       bp->pdev->irq);
-		}
-	}
-
-	return 0;
-}
-
 void bnx2x_free_mem_bp(struct bnx2x *bp)
 {
 	kfree(bp->fp);
@@ -3021,3 +3265,57 @@
 
 	return rc;
 }
+
+
+void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
+			      u32 cid)
+{
+	/* ustorm cxt validation */
+	cxt->ustorm_ag_context.cdu_usage =
+		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+			CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
+	/* xcontext validation */
+	cxt->xstorm_ag_context.cdu_reserved =
+		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
+			CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
+}
+
+static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
+					     u8 fw_sb_id, u8 sb_index,
+					     u8 ticks)
+{
+
+	u32 addr = BAR_CSTRORM_INTMEM +
+		   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
+	REG_WR8(bp, addr, ticks);
+	DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
+			  port, fw_sb_id, sb_index, ticks);
+}
+
+static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
+					     u16 fw_sb_id, u8 sb_index,
+					     u8 disable)
+{
+	u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
+	u32 addr = BAR_CSTRORM_INTMEM +
+		   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
+	u16 flags = REG_RD16(bp, addr);
+	/* clear and set */
+	flags &= ~HC_INDEX_DATA_HC_ENABLED;
+	flags |= enable_flag;
+	REG_WR16(bp, addr, flags);
+	DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
+			  port, fw_sb_id, sb_index, disable);
+}
+
+void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
+				    u8 sb_index, u8 disable, u16 usec)
+{
+	int port = BP_PORT(bp);
+	u8 ticks = usec / BNX2X_BTR;
+
+	storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
+
+	disable = disable ? 1 : (usec ? 0 : 1);
+	storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
+}