blob: 37e5790681ad19e413a44293ecc0a9b4f40da5ff [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Dmitry Kravkov5de92402011-05-04 23:51:13 +00003 * Copyright (c) 2007-2011 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000018#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000019#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000020#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000021#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000022#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070023#include <net/ip6_checksum.h>
Dmitry Kravkov6891dd22010-08-03 21:49:40 +000024#include <linux/firmware.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000025#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000026#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000027#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000028#include "bnx2x_sp.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000029
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030030
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000031
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000032/**
33 * bnx2x_bz_fp - zero content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @index: fastpath index to be zeroed
37 *
38 * Makes sure the contents of the bp->fp[index].napi is kept
39 * intact.
40 */
41static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
42{
43 struct bnx2x_fastpath *fp = &bp->fp[index];
44 struct napi_struct orig_napi = fp->napi;
45 /* bzero bnx2x_fastpath contents */
46 memset(fp, 0, sizeof(*fp));
47
48 /* Restore the NAPI object as it has been already initialized */
49 fp->napi = orig_napi;
Ariel Elior6383c0b2011-07-14 08:31:57 +000050
51 fp->bp = bp;
52 fp->index = index;
53 if (IS_ETH_FP(fp))
54 fp->max_cos = bp->max_cos;
55 else
56 /* Special queues support only one CoS */
57 fp->max_cos = 1;
58
59 /*
60 * set the tpa flag for each queue. The tpa flag determines the queue
61 * minimal size so it must be set prior to queue memory allocation
62 */
63 fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
64
65#ifdef BCM_CNIC
Vladislav Zolotarov45d35392011-08-09 03:08:09 +000066 /* We don't want TPA on an FCoE L2 ring */
67 if (IS_FCOE_FP(fp))
68 fp->disable_tpa = 1;
Ariel Elior6383c0b2011-07-14 08:31:57 +000069#endif
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000070}
71
72/**
73 * bnx2x_move_fp - move content of the fastpath structure.
74 *
75 * @bp: driver handle
76 * @from: source FP index
77 * @to: destination FP index
78 *
79 * Makes sure the contents of the bp->fp[to].napi is kept
80 * intact.
81 */
82static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
83{
84 struct bnx2x_fastpath *from_fp = &bp->fp[from];
85 struct bnx2x_fastpath *to_fp = &bp->fp[to];
86 struct napi_struct orig_napi = to_fp->napi;
87 /* Move bnx2x_fastpath contents */
88 memcpy(to_fp, from_fp, sizeof(*to_fp));
89 to_fp->index = to;
90
91 /* Restore the NAPI object as it has been already initialized */
92 to_fp->napi = orig_napi;
93}
94
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030095int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
96
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000097/* free skb in the packet ring at pos idx
98 * return idx of last bd freed
99 */
Ariel Elior6383c0b2011-07-14 08:31:57 +0000100static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000101 u16 idx)
102{
Ariel Elior6383c0b2011-07-14 08:31:57 +0000103 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000104 struct eth_tx_start_bd *tx_start_bd;
105 struct eth_tx_bd *tx_data_bd;
106 struct sk_buff *skb = tx_buf->skb;
107 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
108 int nbd;
109
110 /* prefetch skb end pointer to speedup dev_kfree_skb() */
111 prefetch(&skb->end);
112
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300113 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000114 txdata->txq_index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000115
116 /* unmap first bd */
117 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000118 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000119 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000120 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000121
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300122
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000123 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
124#ifdef BNX2X_STOP_ON_ERROR
125 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
126 BNX2X_ERR("BAD nbd!\n");
127 bnx2x_panic();
128 }
129#endif
130 new_cons = nbd + tx_buf->first_bd;
131
132 /* Get the next bd */
133 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
134
135 /* Skip a parse bd... */
136 --nbd;
137 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
138
139 /* ...and the TSO split header bd since they have no mapping */
140 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
141 --nbd;
142 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
143 }
144
145 /* now free frags */
146 while (nbd > 0) {
147
148 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000149 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000150 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
151 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
152 if (--nbd)
153 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
154 }
155
156 /* release skb */
157 WARN_ON(!skb);
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000158 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000159 tx_buf->first_bd = 0;
160 tx_buf->skb = NULL;
161
162 return new_cons;
163}
164
Ariel Elior6383c0b2011-07-14 08:31:57 +0000165int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000166{
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000167 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000168 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000169
170#ifdef BNX2X_STOP_ON_ERROR
171 if (unlikely(bp->panic))
172 return -1;
173#endif
174
Ariel Elior6383c0b2011-07-14 08:31:57 +0000175 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
176 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
177 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000178
179 while (sw_cons != hw_cons) {
180 u16 pkt_cons;
181
182 pkt_cons = TX_BD(sw_cons);
183
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000184 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
185 " pkt_cons %u\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +0000186 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000187
Ariel Elior6383c0b2011-07-14 08:31:57 +0000188 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000189 sw_cons++;
190 }
191
Ariel Elior6383c0b2011-07-14 08:31:57 +0000192 txdata->tx_pkt_cons = sw_cons;
193 txdata->tx_bd_cons = bd_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000194
195 /* Need to make the tx_bd_cons update visible to start_xmit()
196 * before checking for netif_tx_queue_stopped(). Without the
197 * memory barrier, there is a small possibility that
198 * start_xmit() will miss it and cause the queue to be stopped
199 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300200 * On the other hand we need an rmb() here to ensure the proper
201 * ordering of bit testing in the following
202 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000203 */
204 smp_mb();
205
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000206 if (unlikely(netif_tx_queue_stopped(txq))) {
207 /* Taking tx_lock() is needed to prevent reenabling the queue
208 * while it's empty. This could have happen if rx_action() gets
209 * suspended in bnx2x_tx_int() after the condition before
210 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
211 *
212 * stops the queue->sees fresh tx_bd_cons->releases the queue->
213 * sends some packets consuming the whole queue again->
214 * stops the queue
215 */
216
217 __netif_tx_lock(txq, smp_processor_id());
218
219 if ((netif_tx_queue_stopped(txq)) &&
220 (bp->state == BNX2X_STATE_OPEN) &&
Ariel Elior6383c0b2011-07-14 08:31:57 +0000221 (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000222 netif_tx_wake_queue(txq);
223
224 __netif_tx_unlock(txq);
225 }
226 return 0;
227}
228
229static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
230 u16 idx)
231{
232 u16 last_max = fp->last_max_sge;
233
234 if (SUB_S16(idx, last_max) > 0)
235 fp->last_max_sge = idx;
236}
237
238static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
239 struct eth_fast_path_rx_cqe *fp_cqe)
240{
241 struct bnx2x *bp = fp->bp;
242 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
243 le16_to_cpu(fp_cqe->len_on_bd)) >>
244 SGE_PAGE_SHIFT;
245 u16 last_max, last_elem, first_elem;
246 u16 delta = 0;
247 u16 i;
248
249 if (!sge_len)
250 return;
251
252 /* First mark all used pages */
253 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300254 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000255 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000256
257 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000258 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000259
260 /* Here we assume that the last SGE index is the biggest */
261 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000262 bnx2x_update_last_max_sge(fp,
263 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000264
265 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300266 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
267 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000268
269 /* If ring is not full */
270 if (last_elem + 1 != first_elem)
271 last_elem++;
272
273 /* Now update the prod */
274 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
275 if (likely(fp->sge_mask[i]))
276 break;
277
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300278 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
279 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000280 }
281
282 if (delta > 0) {
283 fp->rx_sge_prod += delta;
284 /* clear page-end entries */
285 bnx2x_clear_sge_mask_next_elems(fp);
286 }
287
288 DP(NETIF_MSG_RX_STATUS,
289 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
290 fp->last_max_sge, fp->rx_sge_prod);
291}
292
293static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300294 struct sk_buff *skb, u16 cons, u16 prod,
295 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000296{
297 struct bnx2x *bp = fp->bp;
298 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
299 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
300 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
301 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300302 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
303 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000304
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300305 /* print error if current state != stop */
306 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000307 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
308
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300309 /* Try to map an empty skb from the aggregation info */
310 mapping = dma_map_single(&bp->pdev->dev,
311 first_buf->skb->data,
312 fp->rx_buf_size, DMA_FROM_DEVICE);
313 /*
314 * ...if it fails - move the skb from the consumer to the producer
315 * and set the current aggregation state as ERROR to drop it
316 * when TPA_STOP arrives.
317 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000318
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300319 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
320 /* Move the BD from the consumer to the producer */
321 bnx2x_reuse_rx_skb(fp, cons, prod);
322 tpa_info->tpa_state = BNX2X_TPA_ERROR;
323 return;
324 }
325
326 /* move empty skb from pool to prod */
327 prod_rx_buf->skb = first_buf->skb;
328 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000329 /* point prod_bd to new skb */
330 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
331 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
332
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300333 /* move partial skb from cons to pool (don't unmap yet) */
334 *first_buf = *cons_rx_buf;
335
336 /* mark bin state as START */
337 tpa_info->parsing_flags =
338 le16_to_cpu(cqe->pars_flags.flags);
339 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
340 tpa_info->tpa_state = BNX2X_TPA_START;
341 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
342 tpa_info->placement_offset = cqe->placement_offset;
343
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000344#ifdef BNX2X_STOP_ON_ERROR
345 fp->tpa_queue_used |= (1 << queue);
346#ifdef _ASM_GENERIC_INT_L64_H
347 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
348#else
349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
350#endif
351 fp->tpa_queue_used);
352#endif
353}
354
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000355/* Timestamp option length allowed for TPA aggregation:
356 *
357 * nop nop kind length echo val
358 */
359#define TPA_TSTAMP_OPT_LEN 12
360/**
Dmitry Kravkove8920672011-05-04 23:52:40 +0000361 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000362 *
Dmitry Kravkove8920672011-05-04 23:52:40 +0000363 * @bp: driver handle
364 * @parsing_flags: parsing flags from the START CQE
365 * @len_on_bd: total length of the first packet for the
366 * aggregation.
367 *
368 * Approximate value of the MSS for this aggregation calculated using
369 * the first packet of it.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000370 */
371static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
372 u16 len_on_bd)
373{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300374 /*
375 * TPA arrgregation won't have either IP options or TCP options
376 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000377 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300378 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
379
380 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
381 PRS_FLAG_OVERETH_IPV6)
382 hdrs_len += sizeof(struct ipv6hdr);
383 else /* IPv4 */
384 hdrs_len += sizeof(struct iphdr);
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000385
386
387 /* Check if there was a TCP timestamp, if there is it's will
388 * always be 12 bytes length: nop nop kind length echo val.
389 *
390 * Otherwise FW would close the aggregation.
391 */
392 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
393 hdrs_len += TPA_TSTAMP_OPT_LEN;
394
395 return len_on_bd - hdrs_len;
396}
397
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000398static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300399 u16 queue, struct sk_buff *skb,
400 struct eth_end_agg_rx_cqe *cqe,
401 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000402{
403 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000404 u32 i, frag_len, frag_size, pages;
405 int err;
406 int j;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300407 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
408 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000409
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300410 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000411 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
412
413 /* This is needed in order to enable forwarding support */
414 if (frag_size)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300415 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
416 tpa_info->parsing_flags, len_on_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000417
418#ifdef BNX2X_STOP_ON_ERROR
419 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
420 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
421 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300422 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000423 bnx2x_panic();
424 return -EINVAL;
425 }
426#endif
427
428 /* Run through the SGL and compose the fragmented skb */
429 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300430 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000431
432 /* FW gives the indices of the SGE as if the ring is an array
433 (meaning that "next" element will consume 2 indices) */
434 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
435 rx_pg = &fp->rx_page_ring[sge_idx];
436 old_rx_pg = *rx_pg;
437
438 /* If we fail to allocate a substitute page, we simply stop
439 where we are and drop the whole packet */
440 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
441 if (unlikely(err)) {
442 fp->eth_q_stats.rx_skb_alloc_failed++;
443 return err;
444 }
445
446 /* Unmap the page as we r going to pass it to the stack */
447 dma_unmap_page(&bp->pdev->dev,
448 dma_unmap_addr(&old_rx_pg, mapping),
449 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
450
451 /* Add one frag and update the appropriate fields in the skb */
452 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
453
454 skb->data_len += frag_len;
455 skb->truesize += frag_len;
456 skb->len += frag_len;
457
458 frag_size -= frag_len;
459 }
460
461 return 0;
462}
463
464static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300465 u16 queue, struct eth_end_agg_rx_cqe *cqe,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000466 u16 cqe_idx)
467{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300468 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
469 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
470 u8 pad = tpa_info->placement_offset;
471 u16 len = tpa_info->len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000472 struct sk_buff *skb = rx_buf->skb;
473 /* alloc new skb */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300474 struct sk_buff *new_skb;
475 u8 old_tpa_state = tpa_info->tpa_state;
476
477 tpa_info->tpa_state = BNX2X_TPA_STOP;
478
479 /* If we there was an error during the handling of the TPA_START -
480 * drop this aggregation.
481 */
482 if (old_tpa_state == BNX2X_TPA_ERROR)
483 goto drop;
484
485 /* Try to allocate the new skb */
486 new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000487
488 /* Unmap skb in the pool anyway, as we are going to change
489 pool entry status to BNX2X_TPA_STOP even if new skb allocation
490 fails. */
491 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800492 fp->rx_buf_size, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000493
494 if (likely(new_skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000495 prefetch(skb);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000496 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000497
498#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800499 if (pad + len > fp->rx_buf_size) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000500 BNX2X_ERR("skb_put is about to fail... "
501 "pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800502 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000503 bnx2x_panic();
504 return;
505 }
506#endif
507
508 skb_reserve(skb, pad);
509 skb_put(skb, len);
510
511 skb->protocol = eth_type_trans(skb, bp->dev);
512 skb->ip_summed = CHECKSUM_UNNECESSARY;
513
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300514 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
515 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
516 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Hao Zheng9bcc0892010-10-20 13:56:11 +0000517 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000518 } else {
519 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
520 " - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000521 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000522 }
523
524
525 /* put new skb in bin */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300526 rx_buf->skb = new_skb;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000527
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300528 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000529 }
530
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300531drop:
532 /* drop the packet and keep the buffer in the bin */
533 DP(NETIF_MSG_RX_STATUS,
534 "Failed to allocate or map a new skb - dropping packet!\n");
535 fp->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000536}
537
538/* Set Toeplitz hash value in the skb using the value from the
539 * CQE (calculated by HW).
540 */
541static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
542 struct sk_buff *skb)
543{
544 /* Set Toeplitz hash from CQE */
545 if ((bp->dev->features & NETIF_F_RXHASH) &&
546 (cqe->fast_path_cqe.status_flags &
547 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
548 skb->rxhash =
549 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
550}
551
552int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
553{
554 struct bnx2x *bp = fp->bp;
555 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
556 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
557 int rx_pkt = 0;
558
559#ifdef BNX2X_STOP_ON_ERROR
560 if (unlikely(bp->panic))
561 return 0;
562#endif
563
564 /* CQ "next element" is of the size of the regular element,
565 that's why it's ok here */
566 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
567 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
568 hw_comp_cons++;
569
570 bd_cons = fp->rx_bd_cons;
571 bd_prod = fp->rx_bd_prod;
572 bd_prod_fw = bd_prod;
573 sw_comp_cons = fp->rx_comp_cons;
574 sw_comp_prod = fp->rx_comp_prod;
575
576 /* Memory barrier necessary as speculative reads of the rx
577 * buffer can be ahead of the index in the status block
578 */
579 rmb();
580
581 DP(NETIF_MSG_RX_STATUS,
582 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
583 fp->index, hw_comp_cons, sw_comp_cons);
584
585 while (sw_comp_cons != hw_comp_cons) {
586 struct sw_rx_bd *rx_buf = NULL;
587 struct sk_buff *skb;
588 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300589 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000590 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300591 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000592 u16 len, pad;
593
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300594#ifdef BNX2X_STOP_ON_ERROR
595 if (unlikely(bp->panic))
596 return 0;
597#endif
598
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000599 comp_ring_cons = RCQ_BD(sw_comp_cons);
600 bd_prod = RX_BD(bd_prod);
601 bd_cons = RX_BD(bd_cons);
602
603 /* Prefetch the page containing the BD descriptor
604 at producer's index. It will be needed when new skb is
605 allocated */
606 prefetch((void *)(PAGE_ALIGN((unsigned long)
607 (&fp->rx_desc_ring[bd_prod])) -
608 PAGE_SIZE + 1));
609
610 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300611 cqe_fp = &cqe->fast_path_cqe;
612 cqe_fp_flags = cqe_fp->type_error_flags;
613 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000614
615 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
616 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300617 cqe_fp_flags, cqe_fp->status_flags,
618 le32_to_cpu(cqe_fp->rss_hash_result),
619 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000620
621 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300622 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000623 bnx2x_sp_event(fp, cqe);
624 goto next_cqe;
625
626 /* this is an rx packet */
627 } else {
628 rx_buf = &fp->rx_buf_ring[bd_cons];
629 skb = rx_buf->skb;
630 prefetch(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000631
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300632 if (!CQE_TYPE_FAST(cqe_fp_type)) {
633#ifdef BNX2X_STOP_ON_ERROR
634 /* sanity check */
635 if (fp->disable_tpa &&
636 (CQE_TYPE_START(cqe_fp_type) ||
637 CQE_TYPE_STOP(cqe_fp_type)))
638 BNX2X_ERR("START/STOP packet while "
639 "disable_tpa type %x\n",
640 CQE_TYPE(cqe_fp_type));
641#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000642
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300643 if (CQE_TYPE_START(cqe_fp_type)) {
644 u16 queue = cqe_fp->queue_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000645 DP(NETIF_MSG_RX_STATUS,
646 "calling tpa_start on queue %d\n",
647 queue);
648
649 bnx2x_tpa_start(fp, queue, skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300650 bd_cons, bd_prod,
651 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000652
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300653 /* Set Toeplitz hash for LRO skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000654 bnx2x_set_skb_rxhash(bp, cqe, skb);
655
656 goto next_rx;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300657
658 } else {
659 u16 queue =
660 cqe->end_agg_cqe.queue_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000661 DP(NETIF_MSG_RX_STATUS,
662 "calling tpa_stop on queue %d\n",
663 queue);
664
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300665 bnx2x_tpa_stop(bp, fp, queue,
666 &cqe->end_agg_cqe,
667 comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000668#ifdef BNX2X_STOP_ON_ERROR
669 if (bp->panic)
670 return 0;
671#endif
672
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300673 bnx2x_update_sge_prod(fp, cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000674 goto next_cqe;
675 }
676 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300677 /* non TPA */
678 len = le16_to_cpu(cqe_fp->pkt_len);
679 pad = cqe_fp->placement_offset;
Vladislav Zolotarov9924caf2011-07-19 01:37:42 +0000680 dma_sync_single_for_cpu(&bp->pdev->dev,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000681 dma_unmap_addr(rx_buf, mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300682 pad + RX_COPY_THRESH,
683 DMA_FROM_DEVICE);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000684 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000685
686 /* is this an error packet? */
687 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
688 DP(NETIF_MSG_RX_ERR,
689 "ERROR flags %x rx packet %u\n",
690 cqe_fp_flags, sw_comp_cons);
691 fp->eth_q_stats.rx_err_discard_pkt++;
692 goto reuse_rx;
693 }
694
695 /* Since we don't have a jumbo ring
696 * copy small packets if mtu > 1500
697 */
698 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
699 (len <= RX_COPY_THRESH)) {
700 struct sk_buff *new_skb;
701
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300702 new_skb = netdev_alloc_skb(bp->dev, len + pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000703 if (new_skb == NULL) {
704 DP(NETIF_MSG_RX_ERR,
705 "ERROR packet dropped "
706 "because of alloc failure\n");
707 fp->eth_q_stats.rx_skb_alloc_failed++;
708 goto reuse_rx;
709 }
710
711 /* aligned copy */
712 skb_copy_from_linear_data_offset(skb, pad,
713 new_skb->data + pad, len);
714 skb_reserve(new_skb, pad);
715 skb_put(new_skb, len);
716
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000717 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000718
719 skb = new_skb;
720
721 } else
722 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
723 dma_unmap_single(&bp->pdev->dev,
724 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800725 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000726 DMA_FROM_DEVICE);
727 skb_reserve(skb, pad);
728 skb_put(skb, len);
729
730 } else {
731 DP(NETIF_MSG_RX_ERR,
732 "ERROR packet dropped because "
733 "of alloc failure\n");
734 fp->eth_q_stats.rx_skb_alloc_failed++;
735reuse_rx:
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000736 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000737 goto next_rx;
738 }
739
740 skb->protocol = eth_type_trans(skb, bp->dev);
741
742 /* Set Toeplitz hash for a none-LRO skb */
743 bnx2x_set_skb_rxhash(bp, cqe, skb);
744
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700745 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000746
Michał Mirosław66371c42011-04-12 09:38:23 +0000747 if (bp->dev->features & NETIF_F_RXCSUM) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300748
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000749 if (likely(BNX2X_RX_CSUM_OK(cqe)))
750 skb->ip_summed = CHECKSUM_UNNECESSARY;
751 else
752 fp->eth_q_stats.hw_csum_err++;
753 }
754 }
755
756 skb_record_rx_queue(skb, fp->index);
757
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300758 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
759 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000760 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300761 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +0000762 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000763
764
765next_rx:
766 rx_buf->skb = NULL;
767
768 bd_cons = NEXT_RX_IDX(bd_cons);
769 bd_prod = NEXT_RX_IDX(bd_prod);
770 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
771 rx_pkt++;
772next_cqe:
773 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
774 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
775
776 if (rx_pkt == budget)
777 break;
778 } /* while */
779
780 fp->rx_bd_cons = bd_cons;
781 fp->rx_bd_prod = bd_prod_fw;
782 fp->rx_comp_cons = sw_comp_cons;
783 fp->rx_comp_prod = sw_comp_prod;
784
785 /* Update producers */
786 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
787 fp->rx_sge_prod);
788
789 fp->rx_pkt += rx_pkt;
790 fp->rx_calls++;
791
792 return rx_pkt;
793}
794
795static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
796{
797 struct bnx2x_fastpath *fp = fp_cookie;
798 struct bnx2x *bp = fp->bp;
Ariel Elior6383c0b2011-07-14 08:31:57 +0000799 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000800
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000801 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
802 "[fp %d fw_sd %d igusb %d]\n",
803 fp->index, fp->fw_sb_id, fp->igu_sb_id);
804 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000805
806#ifdef BNX2X_STOP_ON_ERROR
807 if (unlikely(bp->panic))
808 return IRQ_HANDLED;
809#endif
810
811 /* Handle Rx and Tx according to MSI-X vector */
812 prefetch(fp->rx_cons_sb);
Ariel Elior6383c0b2011-07-14 08:31:57 +0000813
814 for_each_cos_in_tx_queue(fp, cos)
815 prefetch(fp->txdata[cos].tx_cons_sb);
816
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000817 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000818 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
819
820 return IRQ_HANDLED;
821}
822
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000823/* HW Lock for shared dual port PHYs */
824void bnx2x_acquire_phy_lock(struct bnx2x *bp)
825{
826 mutex_lock(&bp->port.phy_mutex);
827
828 if (bp->port.need_hw_lock)
829 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
830}
831
832void bnx2x_release_phy_lock(struct bnx2x *bp)
833{
834 if (bp->port.need_hw_lock)
835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
836
837 mutex_unlock(&bp->port.phy_mutex);
838}
839
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800840/* calculates MF speed according to current linespeed and MF configuration */
841u16 bnx2x_get_mf_speed(struct bnx2x *bp)
842{
843 u16 line_speed = bp->link_vars.line_speed;
844 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000845 u16 maxCfg = bnx2x_extract_max_cfg(bp,
846 bp->mf_config[BP_VN(bp)]);
847
848 /* Calculate the current MAX line speed limit for the MF
849 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800850 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000851 if (IS_MF_SI(bp))
852 line_speed = (line_speed * maxCfg) / 100;
853 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800854 u16 vn_max_rate = maxCfg * 100;
855
856 if (vn_max_rate < line_speed)
857 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000858 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800859 }
860
861 return line_speed;
862}
863
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000864/**
865 * bnx2x_fill_report_data - fill link report data to report
866 *
867 * @bp: driver handle
868 * @data: link state to update
869 *
870 * It uses a none-atomic bit operations because is called under the mutex.
871 */
872static inline void bnx2x_fill_report_data(struct bnx2x *bp,
873 struct bnx2x_link_report_data *data)
874{
875 u16 line_speed = bnx2x_get_mf_speed(bp);
876
877 memset(data, 0, sizeof(*data));
878
879 /* Fill the report data: efective line speed */
880 data->line_speed = line_speed;
881
882 /* Link is down */
883 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
884 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
885 &data->link_report_flags);
886
887 /* Full DUPLEX */
888 if (bp->link_vars.duplex == DUPLEX_FULL)
889 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
890
891 /* Rx Flow Control is ON */
892 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
893 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
894
895 /* Tx Flow Control is ON */
896 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
897 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
898}
899
900/**
901 * bnx2x_link_report - report link status to OS.
902 *
903 * @bp: driver handle
904 *
905 * Calls the __bnx2x_link_report() under the same locking scheme
906 * as a link/PHY state managing code to ensure a consistent link
907 * reporting.
908 */
909
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000910void bnx2x_link_report(struct bnx2x *bp)
911{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000912 bnx2x_acquire_phy_lock(bp);
913 __bnx2x_link_report(bp);
914 bnx2x_release_phy_lock(bp);
915}
916
917/**
918 * __bnx2x_link_report - report link status to OS.
919 *
920 * @bp: driver handle
921 *
922 * None atomic inmlementation.
923 * Should be called under the phy_lock.
924 */
925void __bnx2x_link_report(struct bnx2x *bp)
926{
927 struct bnx2x_link_report_data cur_data;
928
929 /* reread mf_cfg */
930 if (!CHIP_IS_E1(bp))
931 bnx2x_read_mf_cfg(bp);
932
933 /* Read the current link report info */
934 bnx2x_fill_report_data(bp, &cur_data);
935
936 /* Don't report link down or exactly the same link status twice */
937 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
938 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
939 &bp->last_reported_link.link_report_flags) &&
940 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
941 &cur_data.link_report_flags)))
942 return;
943
944 bp->link_cnt++;
945
946 /* We are going to report a new link parameters now -
947 * remember the current data for the next time.
948 */
949 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
950
951 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
952 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000953 netif_carrier_off(bp->dev);
954 netdev_err(bp->dev, "NIC Link is Down\n");
955 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000956 } else {
957 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000958 netdev_info(bp->dev, "NIC Link is Up, ");
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000959 pr_cont("%d Mbps ", cur_data.line_speed);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000960
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000961 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
962 &cur_data.link_report_flags))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000963 pr_cont("full duplex");
964 else
965 pr_cont("half duplex");
966
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000967 /* Handle the FC at the end so that only these flags would be
968 * possibly set. This way we may easily check if there is no FC
969 * enabled.
970 */
971 if (cur_data.link_report_flags) {
972 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
973 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000974 pr_cont(", receive ");
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000975 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
976 &cur_data.link_report_flags))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000977 pr_cont("& transmit ");
978 } else {
979 pr_cont(", transmit ");
980 }
981 pr_cont("flow control ON");
982 }
983 pr_cont("\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000984 }
985}
986
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000987void bnx2x_init_rx_rings(struct bnx2x *bp)
988{
989 int func = BP_FUNC(bp);
990 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300991 ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000992 u16 ring_prod;
993 int i, j;
994
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000995 /* Allocate TPA resources */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000996 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000997 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000998
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800999 DP(NETIF_MSG_IFUP,
1000 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1001
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001002 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001003 /* Fill the per-aggregtion pool */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001004 for (i = 0; i < max_agg_queues; i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001005 struct bnx2x_agg_info *tpa_info =
1006 &fp->tpa_info[i];
1007 struct sw_rx_bd *first_buf =
1008 &tpa_info->first_buf;
1009
1010 first_buf->skb = netdev_alloc_skb(bp->dev,
1011 fp->rx_buf_size);
1012 if (!first_buf->skb) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001013 BNX2X_ERR("Failed to allocate TPA "
1014 "skb pool for queue[%d] - "
1015 "disabling TPA on this "
1016 "queue!\n", j);
1017 bnx2x_free_tpa_pool(bp, fp, i);
1018 fp->disable_tpa = 1;
1019 break;
1020 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001021 dma_unmap_addr_set(first_buf, mapping, 0);
1022 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001023 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001024
1025 /* "next page" elements initialization */
1026 bnx2x_set_next_page_sgl(fp);
1027
1028 /* set SGEs bit mask */
1029 bnx2x_init_sge_ring_bit_mask(fp);
1030
1031 /* Allocate SGEs and initialize the ring elements */
1032 for (i = 0, ring_prod = 0;
1033 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1034
1035 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1036 BNX2X_ERR("was only able to allocate "
1037 "%d rx sges\n", i);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001038 BNX2X_ERR("disabling TPA for "
1039 "queue[%d]\n", j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001040 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001041 bnx2x_free_rx_sge_range(bp, fp,
1042 ring_prod);
1043 bnx2x_free_tpa_pool(bp, fp,
1044 max_agg_queues);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001045 fp->disable_tpa = 1;
1046 ring_prod = 0;
1047 break;
1048 }
1049 ring_prod = NEXT_SGE_IDX(ring_prod);
1050 }
1051
1052 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001053 }
1054 }
1055
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001056 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001057 struct bnx2x_fastpath *fp = &bp->fp[j];
1058
1059 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001060
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001061 /* Activate BD ring */
1062 /* Warning!
1063 * this will generate an interrupt (to the TSTORM)
1064 * must only be done after chip is initialized
1065 */
1066 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1067 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001068
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001069 if (j != 0)
1070 continue;
1071
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001072 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001073 REG_WR(bp, BAR_USTRORM_INTMEM +
1074 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1075 U64_LO(fp->rx_comp_mapping));
1076 REG_WR(bp, BAR_USTRORM_INTMEM +
1077 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1078 U64_HI(fp->rx_comp_mapping));
1079 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001080 }
1081}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001082
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001083static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1084{
1085 int i;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001086 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001087
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001088 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001089 struct bnx2x_fastpath *fp = &bp->fp[i];
Ariel Elior6383c0b2011-07-14 08:31:57 +00001090 for_each_cos_in_tx_queue(fp, cos) {
1091 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001092
Ariel Elior6383c0b2011-07-14 08:31:57 +00001093 u16 bd_cons = txdata->tx_bd_cons;
1094 u16 sw_prod = txdata->tx_pkt_prod;
1095 u16 sw_cons = txdata->tx_pkt_cons;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001096
Ariel Elior6383c0b2011-07-14 08:31:57 +00001097 while (sw_cons != sw_prod) {
1098 bd_cons = bnx2x_free_tx_pkt(bp, txdata,
1099 TX_BD(sw_cons));
1100 sw_cons++;
1101 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001102 }
1103 }
1104}
1105
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001106static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1107{
1108 struct bnx2x *bp = fp->bp;
1109 int i;
1110
1111 /* ring wasn't allocated */
1112 if (fp->rx_buf_ring == NULL)
1113 return;
1114
1115 for (i = 0; i < NUM_RX_BD; i++) {
1116 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1117 struct sk_buff *skb = rx_buf->skb;
1118
1119 if (skb == NULL)
1120 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001121 dma_unmap_single(&bp->pdev->dev,
1122 dma_unmap_addr(rx_buf, mapping),
1123 fp->rx_buf_size, DMA_FROM_DEVICE);
1124
1125 rx_buf->skb = NULL;
1126 dev_kfree_skb(skb);
1127 }
1128}
1129
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001130static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1131{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001132 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001133
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001134 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001135 struct bnx2x_fastpath *fp = &bp->fp[j];
1136
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001137 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001138
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001139 if (!fp->disable_tpa)
1140 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1141 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001142 ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001143 }
1144}
1145
1146void bnx2x_free_skbs(struct bnx2x *bp)
1147{
1148 bnx2x_free_tx_skbs(bp);
1149 bnx2x_free_rx_skbs(bp);
1150}
1151
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001152void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1153{
1154 /* load old values */
1155 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1156
1157 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1158 /* leave all but MAX value */
1159 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1160
1161 /* set new MAX value */
1162 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1163 & FUNC_MF_CFG_MAX_BW_MASK;
1164
1165 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1166 }
1167}
1168
Dmitry Kravkovca924292011-06-14 01:33:08 +00001169/**
1170 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1171 *
1172 * @bp: driver handle
1173 * @nvecs: number of vectors to be released
1174 */
1175static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001176{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001177 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001178
Dmitry Kravkovca924292011-06-14 01:33:08 +00001179 if (nvecs == offset)
1180 return;
1181 free_irq(bp->msix_table[offset].vector, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001182 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Dmitry Kravkovca924292011-06-14 01:33:08 +00001183 bp->msix_table[offset].vector);
1184 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001185#ifdef BCM_CNIC
Dmitry Kravkovca924292011-06-14 01:33:08 +00001186 if (nvecs == offset)
1187 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001188 offset++;
1189#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001190
Dmitry Kravkovca924292011-06-14 01:33:08 +00001191 for_each_eth_queue(bp, i) {
1192 if (nvecs == offset)
1193 return;
1194 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1195 "irq\n", i, bp->msix_table[offset].vector);
1196
1197 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001198 }
1199}
1200
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001201void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001202{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001203 if (bp->flags & USING_MSIX_FLAG)
Dmitry Kravkovca924292011-06-14 01:33:08 +00001204 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
Ariel Elior6383c0b2011-07-14 08:31:57 +00001205 CNIC_PRESENT + 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001206 else if (bp->flags & USING_MSI_FLAG)
1207 free_irq(bp->pdev->irq, bp->dev);
1208 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001209 free_irq(bp->pdev->irq, bp->dev);
1210}
1211
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001212int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001213{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001214 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001215
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001216 bp->msix_table[msix_vec].entry = msix_vec;
1217 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1218 bp->msix_table[0].entry);
1219 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001220
1221#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001222 bp->msix_table[msix_vec].entry = msix_vec;
1223 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1224 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1225 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001226#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001227 /* We need separate vectors for ETH queues only (not FCoE) */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001228 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001229 bp->msix_table[msix_vec].entry = msix_vec;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001230 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001231 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1232 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001233 }
1234
Ariel Elior6383c0b2011-07-14 08:31:57 +00001235 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_PRESENT + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001236
1237 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001238
1239 /*
1240 * reconfigure number of tx/rx queues according to available
1241 * MSI-X vectors
1242 */
1243 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001244 /* how less vectors we will have? */
1245 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001246
1247 DP(NETIF_MSG_IFUP,
1248 "Trying to use less MSI-X vectors: %d\n", rc);
1249
1250 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1251
1252 if (rc) {
1253 DP(NETIF_MSG_IFUP,
1254 "MSI-X is not attainable rc %d\n", rc);
1255 return rc;
1256 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001257 /*
1258 * decrease number of queues by number of unallocated entries
1259 */
1260 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001261
1262 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1263 bp->num_queues);
1264 } else if (rc) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001265 /* fall to INTx if not enough memory */
1266 if (rc == -ENOMEM)
1267 bp->flags |= DISABLE_MSI_FLAG;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001268 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1269 return rc;
1270 }
1271
1272 bp->flags |= USING_MSIX_FLAG;
1273
1274 return 0;
1275}
1276
1277static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1278{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001279 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001280
Dmitry Kravkovca924292011-06-14 01:33:08 +00001281 rc = request_irq(bp->msix_table[offset++].vector,
1282 bnx2x_msix_sp_int, 0,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001283 bp->dev->name, bp->dev);
1284 if (rc) {
1285 BNX2X_ERR("request sp irq failed\n");
1286 return -EBUSY;
1287 }
1288
1289#ifdef BCM_CNIC
1290 offset++;
1291#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001292 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001293 struct bnx2x_fastpath *fp = &bp->fp[i];
1294 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1295 bp->dev->name, i);
1296
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001297 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001298 bnx2x_msix_fp_int, 0, fp->name, fp);
1299 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001300 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1301 bp->msix_table[offset].vector, rc);
1302 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001303 return -EBUSY;
1304 }
1305
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001306 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001307 }
1308
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001309 i = BNX2X_NUM_ETH_QUEUES(bp);
Ariel Elior6383c0b2011-07-14 08:31:57 +00001310 offset = 1 + CNIC_PRESENT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001311 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1312 " ... fp[%d] %d\n",
1313 bp->msix_table[0].vector,
1314 0, bp->msix_table[offset].vector,
1315 i - 1, bp->msix_table[offset + i - 1].vector);
1316
1317 return 0;
1318}
1319
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001320int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001321{
1322 int rc;
1323
1324 rc = pci_enable_msi(bp->pdev);
1325 if (rc) {
1326 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1327 return -1;
1328 }
1329 bp->flags |= USING_MSI_FLAG;
1330
1331 return 0;
1332}
1333
1334static int bnx2x_req_irq(struct bnx2x *bp)
1335{
1336 unsigned long flags;
1337 int rc;
1338
1339 if (bp->flags & USING_MSI_FLAG)
1340 flags = 0;
1341 else
1342 flags = IRQF_SHARED;
1343
1344 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1345 bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001346 return rc;
1347}
1348
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001349static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1350{
1351 int rc = 0;
1352 if (bp->flags & USING_MSIX_FLAG) {
1353 rc = bnx2x_req_msix_irqs(bp);
1354 if (rc)
1355 return rc;
1356 } else {
1357 bnx2x_ack_int(bp);
1358 rc = bnx2x_req_irq(bp);
1359 if (rc) {
1360 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1361 return rc;
1362 }
1363 if (bp->flags & USING_MSI_FLAG) {
1364 bp->dev->irq = bp->pdev->irq;
1365 netdev_info(bp->dev, "using MSI IRQ %d\n",
1366 bp->pdev->irq);
1367 }
1368 }
1369
1370 return 0;
1371}
1372
1373static inline void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001374{
1375 int i;
1376
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001377 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001378 napi_enable(&bnx2x_fp(bp, i, napi));
1379}
1380
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001381static inline void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001382{
1383 int i;
1384
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001385 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001386 napi_disable(&bnx2x_fp(bp, i, napi));
1387}
1388
1389void bnx2x_netif_start(struct bnx2x *bp)
1390{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001391 if (netif_running(bp->dev)) {
1392 bnx2x_napi_enable(bp);
1393 bnx2x_int_enable(bp);
1394 if (bp->state == BNX2X_STATE_OPEN)
1395 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001396 }
1397}
1398
1399void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1400{
1401 bnx2x_int_disable_sync(bp, disable_hw);
1402 bnx2x_napi_disable(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001403}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001404
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001405u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1406{
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001407 struct bnx2x *bp = netdev_priv(dev);
Vladislav Zolotarovcdb9d6a2011-08-09 03:08:55 +00001408
Dmitry Kravkovfaa28312011-07-16 13:35:51 -07001409#ifdef BCM_CNIC
Vladislav Zolotarovcdb9d6a2011-08-09 03:08:55 +00001410 if (!NO_FCOE(bp)) {
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001411 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1412 u16 ether_type = ntohs(hdr->h_proto);
1413
1414 /* Skip VLAN tag if present */
1415 if (ether_type == ETH_P_8021Q) {
1416 struct vlan_ethhdr *vhdr =
1417 (struct vlan_ethhdr *)skb->data;
1418
1419 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1420 }
1421
1422 /* If ethertype is FCoE or FIP - use FCoE ring */
1423 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
Ariel Elior6383c0b2011-07-14 08:31:57 +00001424 return bnx2x_fcoe_tx(bp, txq_index);
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001425 }
1426#endif
Vladislav Zolotarovcdb9d6a2011-08-09 03:08:55 +00001427 /* select a non-FCoE queue */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001428 return __skb_tx_hash(dev, skb, BNX2X_NUM_ETH_QUEUES(bp));
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001429}
1430
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001431void bnx2x_set_num_queues(struct bnx2x *bp)
1432{
1433 switch (bp->multi_mode) {
1434 case ETH_RSS_MODE_DISABLED:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001435 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001436 break;
1437 case ETH_RSS_MODE_REGULAR:
1438 bp->num_queues = bnx2x_calc_num_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001439 break;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001440
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001441 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001442 bp->num_queues = 1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001443 break;
1444 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001445
1446 /* Add special queues */
Ariel Elior6383c0b2011-07-14 08:31:57 +00001447 bp->num_queues += NON_ETH_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001448}
1449
Vladislav Zolotarovcdb9d6a2011-08-09 03:08:55 +00001450/**
1451 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1452 *
1453 * @bp: Driver handle
1454 *
1455 * We currently support for at most 16 Tx queues for each CoS thus we will
1456 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1457 * bp->max_cos.
1458 *
1459 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1460 * index after all ETH L2 indices.
1461 *
1462 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1463 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1464 * 16..31,...) with indicies that are not coupled with any real Tx queue.
1465 *
1466 * The proper configuration of skb->queue_mapping is handled by
1467 * bnx2x_select_queue() and __skb_tx_hash().
1468 *
1469 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1470 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1471 */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001472static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1473{
Ariel Elior6383c0b2011-07-14 08:31:57 +00001474 int rc, tx, rx;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001475
Ariel Elior6383c0b2011-07-14 08:31:57 +00001476 tx = MAX_TXQS_PER_COS * bp->max_cos;
1477 rx = BNX2X_NUM_ETH_QUEUES(bp);
1478
1479/* account for fcoe queue */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001480#ifdef BCM_CNIC
Ariel Elior6383c0b2011-07-14 08:31:57 +00001481 if (!NO_FCOE(bp)) {
1482 rx += FCOE_PRESENT;
1483 tx += FCOE_PRESENT;
1484 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001485#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00001486
1487 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1488 if (rc) {
1489 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1490 return rc;
1491 }
1492 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1493 if (rc) {
1494 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1495 return rc;
1496 }
1497
1498 DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
1499 tx, rx);
1500
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001501 return rc;
1502}
1503
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001504static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1505{
1506 int i;
1507
1508 for_each_queue(bp, i) {
1509 struct bnx2x_fastpath *fp = &bp->fp[i];
1510
1511 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1512 if (IS_FCOE_IDX(i))
1513 /*
1514 * Although there are no IP frames expected to arrive to
1515 * this ring we still want to add an
1516 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1517 * overrun attack.
1518 */
1519 fp->rx_buf_size =
1520 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001521 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001522 else
1523 fp->rx_buf_size =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001524 bp->dev->mtu + ETH_OVREHEAD +
1525 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001526 }
1527}
1528
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001529static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1530{
1531 int i;
1532 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1533 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1534
1535 /*
1536 * Prepare the inital contents fo the indirection table if RSS is
1537 * enabled
1538 */
1539 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1540 for (i = 0; i < sizeof(ind_table); i++)
1541 ind_table[i] =
1542 bp->fp->cl_id + (i % num_eth_queues);
1543 }
1544
1545 /*
1546 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1547 * per-port, so if explicit configuration is needed , do it only
1548 * for a PMF.
1549 *
1550 * For 57712 and newer on the other hand it's a per-function
1551 * configuration.
1552 */
1553 return bnx2x_config_rss_pf(bp, ind_table,
1554 bp->port.pmf || !CHIP_IS_E1x(bp));
1555}
1556
1557int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1558{
1559 struct bnx2x_config_rss_params params = {0};
1560 int i;
1561
1562 /* Although RSS is meaningless when there is a single HW queue we
1563 * still need it enabled in order to have HW Rx hash generated.
1564 *
1565 * if (!is_eth_multi(bp))
1566 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1567 */
1568
1569 params.rss_obj = &bp->rss_conf_obj;
1570
1571 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1572
1573 /* RSS mode */
1574 switch (bp->multi_mode) {
1575 case ETH_RSS_MODE_DISABLED:
1576 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1577 break;
1578 case ETH_RSS_MODE_REGULAR:
1579 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1580 break;
1581 case ETH_RSS_MODE_VLAN_PRI:
1582 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1583 break;
1584 case ETH_RSS_MODE_E1HOV_PRI:
1585 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1586 break;
1587 case ETH_RSS_MODE_IP_DSCP:
1588 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1589 break;
1590 default:
1591 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1592 return -EINVAL;
1593 }
1594
1595 /* If RSS is enabled */
1596 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1597 /* RSS configuration */
1598 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1599 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1600 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1601 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1602
1603 /* Hash bits */
1604 params.rss_result_mask = MULTI_MASK;
1605
1606 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1607
1608 if (config_hash) {
1609 /* RSS keys */
1610 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1611 params.rss_key[i] = random32();
1612
1613 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1614 }
1615 }
1616
1617 return bnx2x_config_rss(bp, &params);
1618}
1619
1620static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1621{
1622 struct bnx2x_func_state_params func_params = {0};
1623
1624 /* Prepare parameters for function state transitions */
1625 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1626
1627 func_params.f_obj = &bp->func_obj;
1628 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1629
1630 func_params.params.hw_init.load_phase = load_code;
1631
1632 return bnx2x_func_state_change(bp, &func_params);
1633}
1634
1635/*
1636 * Cleans the object that have internal lists without sending
1637 * ramrods. Should be run when interrutps are disabled.
1638 */
1639static void bnx2x_squeeze_objects(struct bnx2x *bp)
1640{
1641 int rc;
1642 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1643 struct bnx2x_mcast_ramrod_params rparam = {0};
1644 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1645
1646 /***************** Cleanup MACs' object first *************************/
1647
1648 /* Wait for completion of requested */
1649 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1650 /* Perform a dry cleanup */
1651 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1652
1653 /* Clean ETH primary MAC */
1654 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1655 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1656 &ramrod_flags);
1657 if (rc != 0)
1658 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1659
1660 /* Cleanup UC list */
1661 vlan_mac_flags = 0;
1662 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1663 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1664 &ramrod_flags);
1665 if (rc != 0)
1666 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1667
1668 /***************** Now clean mcast object *****************************/
1669 rparam.mcast_obj = &bp->mcast_obj;
1670 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1671
1672 /* Add a DEL command... */
1673 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1674 if (rc < 0)
1675 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1676 "object: %d\n", rc);
1677
1678 /* ...and wait until all pending commands are cleared */
1679 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1680 while (rc != 0) {
1681 if (rc < 0) {
1682 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1683 rc);
1684 return;
1685 }
1686
1687 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1688 }
1689}
1690
1691#ifndef BNX2X_STOP_ON_ERROR
1692#define LOAD_ERROR_EXIT(bp, label) \
1693 do { \
1694 (bp)->state = BNX2X_STATE_ERROR; \
1695 goto label; \
1696 } while (0)
1697#else
1698#define LOAD_ERROR_EXIT(bp, label) \
1699 do { \
1700 (bp)->state = BNX2X_STATE_ERROR; \
1701 (bp)->panic = 1; \
1702 return -EBUSY; \
1703 } while (0)
1704#endif
1705
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001706/* must be called with rtnl_lock */
1707int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1708{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001709 int port = BP_PORT(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001710 u32 load_code;
1711 int i, rc;
1712
1713#ifdef BNX2X_STOP_ON_ERROR
1714 if (unlikely(bp->panic))
1715 return -EPERM;
1716#endif
1717
1718 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1719
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001720 /* Set the initial link reported state to link down */
1721 bnx2x_acquire_phy_lock(bp);
1722 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1723 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1724 &bp->last_reported_link.link_report_flags);
1725 bnx2x_release_phy_lock(bp);
1726
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001727 /* must be called before memory allocation and HW init */
1728 bnx2x_ilt_set_info(bp);
1729
Ariel Elior6383c0b2011-07-14 08:31:57 +00001730 /*
1731 * Zero fastpath structures preserving invariants like napi, which are
1732 * allocated only once, fp index, max_cos, bp pointer.
1733 * Also set fp->disable_tpa.
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001734 */
1735 for_each_queue(bp, i)
1736 bnx2x_bz_fp(bp, i);
1737
Ariel Elior6383c0b2011-07-14 08:31:57 +00001738
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001739 /* Set the receive queues buffer size */
1740 bnx2x_set_rx_buf_size(bp);
1741
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001742 if (bnx2x_alloc_mem(bp))
1743 return -ENOMEM;
1744
1745 /* As long as bnx2x_alloc_mem() may possibly update
1746 * bp->num_queues, bnx2x_set_real_num_queues() should always
1747 * come after it.
1748 */
1749 rc = bnx2x_set_real_num_queues(bp);
1750 if (rc) {
1751 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001752 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001753 }
1754
Ariel Elior6383c0b2011-07-14 08:31:57 +00001755 /* configure multi cos mappings in kernel.
1756 * this configuration may be overriden by a multi class queue discipline
1757 * or by a dcbx negotiation result.
1758 */
1759 bnx2x_setup_tc(bp->dev, bp->max_cos);
1760
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001761 bnx2x_napi_enable(bp);
1762
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001763 /* Send LOAD_REQUEST command to MCP
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001764 * Returns the type of LOAD command:
1765 * if it is the first port to be initialized
1766 * common blocks should be initialized, otherwise - not
1767 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001768 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001769 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001770 if (!load_code) {
1771 BNX2X_ERR("MCP response failure, aborting\n");
1772 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001773 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001774 }
1775 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1776 rc = -EBUSY; /* other port in diagnostic mode */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001777 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001778 }
1779
1780 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001781 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001782
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001783 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1784 path, load_count[path][0], load_count[path][1],
1785 load_count[path][2]);
1786 load_count[path][0]++;
1787 load_count[path][1 + port]++;
1788 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1789 path, load_count[path][0], load_count[path][1],
1790 load_count[path][2]);
1791 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001792 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001793 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001794 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1795 else
1796 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1797 }
1798
1799 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001800 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Yaniv Rosner3deb8162011-06-14 01:34:33 +00001801 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001802 bp->port.pmf = 1;
Yaniv Rosner3deb8162011-06-14 01:34:33 +00001803 /*
1804 * We need the barrier to ensure the ordering between the
1805 * writing to bp->port.pmf here and reading it from the
1806 * bnx2x_periodic_task().
1807 */
1808 smp_mb();
1809 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
1810 } else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001811 bp->port.pmf = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00001812
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001813 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1814
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001815 /* Init Function state controlling object */
1816 bnx2x__init_func_obj(bp);
1817
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001818 /* Initialize HW */
1819 rc = bnx2x_init_hw(bp, load_code);
1820 if (rc) {
1821 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001822 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001823 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001824 }
1825
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001826 /* Connect to IRQs */
1827 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001828 if (rc) {
1829 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001830 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001831 }
1832
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001833 /* Setup NIC internals and enable interrupts */
1834 bnx2x_nic_init(bp, load_code);
1835
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001836 /* Init per-function objects */
1837 bnx2x_init_bp_objs(bp);
1838
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001839 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1840 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001841 (bp->common.shmem2_base)) {
1842 if (SHMEM2_HAS(bp, dcc_support))
1843 SHMEM2_WR(bp, dcc_support,
1844 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1845 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1846 }
1847
1848 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1849 rc = bnx2x_func_start(bp);
1850 if (rc) {
1851 BNX2X_ERR("Function start failed!\n");
Dmitry Kravkovc6363222011-07-19 01:38:53 +00001852 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001853 LOAD_ERROR_EXIT(bp, load_error3);
1854 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001855
1856 /* Send LOAD_DONE command to MCP */
1857 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001858 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001859 if (!load_code) {
1860 BNX2X_ERR("MCP response failure, aborting\n");
1861 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001862 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001863 }
1864 }
1865
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001866 rc = bnx2x_setup_leading(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001867 if (rc) {
1868 BNX2X_ERR("Setup leading failed!\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001869 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001870 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001871
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001872#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001873 /* Enable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001874 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001875#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001876
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001877 for_each_nondefault_queue(bp, i) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001878 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001879 if (rc)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001880 LOAD_ERROR_EXIT(bp, load_error4);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001881 }
1882
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001883 rc = bnx2x_init_rss_pf(bp);
1884 if (rc)
1885 LOAD_ERROR_EXIT(bp, load_error4);
1886
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001887 /* Now when Clients are configured we are ready to work */
1888 bp->state = BNX2X_STATE_OPEN;
1889
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001890 /* Configure a ucast MAC */
1891 rc = bnx2x_set_eth_mac(bp, true);
1892 if (rc)
1893 LOAD_ERROR_EXIT(bp, load_error4);
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08001894
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001895 if (bp->pending_max) {
1896 bnx2x_update_max_mf_config(bp, bp->pending_max);
1897 bp->pending_max = 0;
1898 }
1899
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001900 if (bp->port.pmf)
1901 bnx2x_initial_phy_init(bp, load_mode);
1902
1903 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001904
1905 /* Initialize Rx filter. */
1906 netif_addr_lock_bh(bp->dev);
1907 bnx2x_set_rx_mode(bp->dev);
1908 netif_addr_unlock_bh(bp->dev);
1909
1910 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001911 switch (load_mode) {
1912 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001913 /* Tx queue should be only reenabled */
1914 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001915 break;
1916
1917 case LOAD_OPEN:
1918 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001919 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001920 break;
1921
1922 case LOAD_DIAG:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001923 bp->state = BNX2X_STATE_DIAG;
1924 break;
1925
1926 default:
1927 break;
1928 }
1929
1930 if (!bp->port.pmf)
1931 bnx2x__link_status_update(bp);
1932
1933 /* start the timer */
1934 mod_timer(&bp->timer, jiffies + bp->current_interval);
1935
1936#ifdef BCM_CNIC
1937 bnx2x_setup_cnic_irq_info(bp);
1938 if (bp->state == BNX2X_STATE_OPEN)
1939 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1940#endif
1941 bnx2x_inc_load_cnt(bp);
1942
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001943 /* Wait for all pending SP commands to complete */
1944 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1945 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1946 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1947 return -EBUSY;
1948 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001949
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001950 bnx2x_dcbx_init(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001951 return 0;
1952
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001953#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001954load_error4:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001955#ifdef BCM_CNIC
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001956 /* Disable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001957 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001958#endif
1959load_error3:
1960 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001961
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001962 /* Clean queueable objects */
1963 bnx2x_squeeze_objects(bp);
1964
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001965 /* Free SKBs, SGEs, TPA pool and driver internals */
1966 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001967 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001968 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001969
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001970 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001971 bnx2x_free_irq(bp);
1972load_error2:
1973 if (!BP_NOMCP(bp)) {
1974 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1975 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1976 }
1977
1978 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001979load_error1:
1980 bnx2x_napi_disable(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001981load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001982 bnx2x_free_mem(bp);
1983
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001984 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001985#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001986}
1987
1988/* must be called with rtnl_lock */
1989int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1990{
1991 int i;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00001992 bool global = false;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001993
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00001994 if ((bp->state == BNX2X_STATE_CLOSED) ||
1995 (bp->state == BNX2X_STATE_ERROR)) {
1996 /* We can get here if the driver has been unloaded
1997 * during parity error recovery and is either waiting for a
1998 * leader to complete or for other functions to unload and
1999 * then ifdown has been issued. In this case we want to
2000 * unload and let other functions to complete a recovery
2001 * process.
2002 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002003 bp->recovery_state = BNX2X_RECOVERY_DONE;
2004 bp->is_leader = 0;
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002005 bnx2x_release_leader_lock(bp);
2006 smp_mb();
2007
2008 DP(NETIF_MSG_HW, "Releasing a leadership...\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002009
2010 return -EINVAL;
2011 }
2012
Vladislav Zolotarov87b7ba32011-08-02 01:35:43 -07002013 /*
2014 * It's important to set the bp->state to the value different from
2015 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2016 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2017 */
2018 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2019 smp_mb();
2020
Vladislav Zolotarov9505ee32011-07-19 01:39:41 +00002021 /* Stop Tx */
2022 bnx2x_tx_disable(bp);
2023
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002024#ifdef BCM_CNIC
2025 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2026#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002027
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002028 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002029
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002030 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002031
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002032 /* Set ALWAYS_ALIVE bit in shmem */
2033 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2034
2035 bnx2x_drv_pulse(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002036
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002037 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002038
2039 /* Cleanup the chip if needed */
2040 if (unload_mode != UNLOAD_RECOVERY)
2041 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002042 else {
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002043 /* Send the UNLOAD_REQUEST to the MCP */
2044 bnx2x_send_unload_req(bp, unload_mode);
2045
2046 /*
2047 * Prevent transactions to host from the functions on the
2048 * engine that doesn't reset global blocks in case of global
2049 * attention once gloabl blocks are reset and gates are opened
2050 * (the engine which leader will perform the recovery
2051 * last).
2052 */
2053 if (!CHIP_IS_E1x(bp))
2054 bnx2x_pf_disable(bp);
2055
2056 /* Disable HW interrupts, NAPI */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002057 bnx2x_netif_stop(bp, 1);
2058
2059 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002060 bnx2x_free_irq(bp);
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002061
2062 /* Report UNLOAD_DONE to MCP */
2063 bnx2x_send_unload_done(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002064 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002065
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002066 /*
2067 * At this stage no more interrupts will arrive so we may safly clean
2068 * the queueable objects here in case they failed to get cleaned so far.
2069 */
2070 bnx2x_squeeze_objects(bp);
2071
Vladislav Zolotarov79616892011-07-21 07:58:54 +00002072 /* There should be no more pending SP commands at this stage */
2073 bp->sp_state = 0;
2074
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002075 bp->port.pmf = 0;
2076
2077 /* Free SKBs, SGEs, TPA pool and driver internals */
2078 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002079 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002080 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002081
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002082 bnx2x_free_mem(bp);
2083
2084 bp->state = BNX2X_STATE_CLOSED;
2085
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002086 /* Check if there are pending parity attentions. If there are - set
2087 * RECOVERY_IN_PROGRESS.
2088 */
2089 if (bnx2x_chk_parity_attn(bp, &global, false)) {
2090 bnx2x_set_reset_in_progress(bp);
2091
2092 /* Set RESET_IS_GLOBAL if needed */
2093 if (global)
2094 bnx2x_set_reset_global(bp);
2095 }
2096
2097
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002098 /* The last driver must disable a "close the gate" if there is no
2099 * parity attention or "process kill" pending.
2100 */
Vladislav Zolotarovc9ee9202011-06-14 01:33:51 +00002101 if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002102 bnx2x_disable_close_the_gate(bp);
2103
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002104 return 0;
2105}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002106
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002107int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
2108{
2109 u16 pmcsr;
2110
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00002111 /* If there is no power capability, silently succeed */
2112 if (!bp->pm_cap) {
2113 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2114 return 0;
2115 }
2116
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002117 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2118
2119 switch (state) {
2120 case PCI_D0:
2121 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2122 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2123 PCI_PM_CTRL_PME_STATUS));
2124
2125 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2126 /* delay required during transition out of D3hot */
2127 msleep(20);
2128 break;
2129
2130 case PCI_D3hot:
2131 /* If there are other clients above don't
2132 shut down the power */
2133 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2134 return 0;
2135 /* Don't shut down the power for emulation and FPGA */
2136 if (CHIP_REV_IS_SLOW(bp))
2137 return 0;
2138
2139 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2140 pmcsr |= 3;
2141
2142 if (bp->wol)
2143 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2144
2145 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2146 pmcsr);
2147
2148 /* No more memory access after this point until
2149 * device is brought back to D0.
2150 */
2151 break;
2152
2153 default:
2154 return -EINVAL;
2155 }
2156 return 0;
2157}
2158
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002159/*
2160 * net_device service functions
2161 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002162int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002163{
2164 int work_done = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002165 u8 cos;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002166 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2167 napi);
2168 struct bnx2x *bp = fp->bp;
2169
2170 while (1) {
2171#ifdef BNX2X_STOP_ON_ERROR
2172 if (unlikely(bp->panic)) {
2173 napi_complete(napi);
2174 return 0;
2175 }
2176#endif
2177
Ariel Elior6383c0b2011-07-14 08:31:57 +00002178 for_each_cos_in_tx_queue(fp, cos)
2179 if (bnx2x_tx_queue_has_work(&fp->txdata[cos]))
2180 bnx2x_tx_int(bp, &fp->txdata[cos]);
2181
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002182
2183 if (bnx2x_has_rx_work(fp)) {
2184 work_done += bnx2x_rx_int(fp, budget - work_done);
2185
2186 /* must not complete if we consumed full budget */
2187 if (work_done >= budget)
2188 break;
2189 }
2190
2191 /* Fall out from the NAPI loop if needed */
2192 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002193#ifdef BCM_CNIC
2194 /* No need to update SB for FCoE L2 ring as long as
2195 * it's connected to the default SB and the SB
2196 * has been updated when NAPI was scheduled.
2197 */
2198 if (IS_FCOE_FP(fp)) {
2199 napi_complete(napi);
2200 break;
2201 }
2202#endif
2203
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002204 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002205 /* bnx2x_has_rx_work() reads the status block,
2206 * thus we need to ensure that status block indices
2207 * have been actually read (bnx2x_update_fpsb_idx)
2208 * prior to this check (bnx2x_has_rx_work) so that
2209 * we won't write the "newer" value of the status block
2210 * to IGU (if there was a DMA right after
2211 * bnx2x_has_rx_work and if there is no rmb, the memory
2212 * reading (bnx2x_update_fpsb_idx) may be postponed
2213 * to right before bnx2x_ack_sb). In this case there
2214 * will never be another interrupt until there is
2215 * another update of the status block, while there
2216 * is still unhandled work.
2217 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002218 rmb();
2219
2220 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2221 napi_complete(napi);
2222 /* Re-enable interrupts */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002223 DP(NETIF_MSG_HW,
2224 "Update index to %d\n", fp->fp_hc_idx);
2225 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2226 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002227 IGU_INT_ENABLE, 1);
2228 break;
2229 }
2230 }
2231 }
2232
2233 return work_done;
2234}
2235
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002236/* we split the first BD into headers and data BDs
2237 * to ease the pain of our fellow microcode engineers
2238 * we use one mapping for both BDs
2239 * So far this has only been observed to happen
2240 * in Other Operating Systems(TM)
2241 */
2242static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
Ariel Elior6383c0b2011-07-14 08:31:57 +00002243 struct bnx2x_fp_txdata *txdata,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002244 struct sw_tx_bd *tx_buf,
2245 struct eth_tx_start_bd **tx_bd, u16 hlen,
2246 u16 bd_prod, int nbd)
2247{
2248 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2249 struct eth_tx_bd *d_tx_bd;
2250 dma_addr_t mapping;
2251 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2252
2253 /* first fix first BD */
2254 h_tx_bd->nbd = cpu_to_le16(nbd);
2255 h_tx_bd->nbytes = cpu_to_le16(hlen);
2256
2257 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2258 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2259 h_tx_bd->addr_lo, h_tx_bd->nbd);
2260
2261 /* now get a new data BD
2262 * (after the pbd) and fill it */
2263 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002264 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002265
2266 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2267 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2268
2269 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2270 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2271 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2272
2273 /* this marks the BD as one that has no individual mapping */
2274 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2275
2276 DP(NETIF_MSG_TX_QUEUED,
2277 "TSO split data size is %d (%x:%x)\n",
2278 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2279
2280 /* update tx_bd */
2281 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2282
2283 return bd_prod;
2284}
2285
2286static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2287{
2288 if (fix > 0)
2289 csum = (u16) ~csum_fold(csum_sub(csum,
2290 csum_partial(t_header - fix, fix, 0)));
2291
2292 else if (fix < 0)
2293 csum = (u16) ~csum_fold(csum_add(csum,
2294 csum_partial(t_header, -fix, 0)));
2295
2296 return swab16(csum);
2297}
2298
2299static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2300{
2301 u32 rc;
2302
2303 if (skb->ip_summed != CHECKSUM_PARTIAL)
2304 rc = XMIT_PLAIN;
2305
2306 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00002307 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002308 rc = XMIT_CSUM_V6;
2309 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2310 rc |= XMIT_CSUM_TCP;
2311
2312 } else {
2313 rc = XMIT_CSUM_V4;
2314 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2315 rc |= XMIT_CSUM_TCP;
2316 }
2317 }
2318
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00002319 if (skb_is_gso_v6(skb))
2320 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2321 else if (skb_is_gso(skb))
2322 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002323
2324 return rc;
2325}
2326
2327#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2328/* check if packet requires linearization (packet is too fragmented)
2329 no need to check fragmentation if page size > 8K (there will be no
2330 violation to FW restrictions) */
2331static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2332 u32 xmit_type)
2333{
2334 int to_copy = 0;
2335 int hlen = 0;
2336 int first_bd_sz = 0;
2337
2338 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2339 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2340
2341 if (xmit_type & XMIT_GSO) {
2342 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2343 /* Check if LSO packet needs to be copied:
2344 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2345 int wnd_size = MAX_FETCH_BD - 3;
2346 /* Number of windows to check */
2347 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2348 int wnd_idx = 0;
2349 int frag_idx = 0;
2350 u32 wnd_sum = 0;
2351
2352 /* Headers length */
2353 hlen = (int)(skb_transport_header(skb) - skb->data) +
2354 tcp_hdrlen(skb);
2355
2356 /* Amount of data (w/o headers) on linear part of SKB*/
2357 first_bd_sz = skb_headlen(skb) - hlen;
2358
2359 wnd_sum = first_bd_sz;
2360
2361 /* Calculate the first sum - it's special */
2362 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2363 wnd_sum +=
2364 skb_shinfo(skb)->frags[frag_idx].size;
2365
2366 /* If there was data on linear skb data - check it */
2367 if (first_bd_sz > 0) {
2368 if (unlikely(wnd_sum < lso_mss)) {
2369 to_copy = 1;
2370 goto exit_lbl;
2371 }
2372
2373 wnd_sum -= first_bd_sz;
2374 }
2375
2376 /* Others are easier: run through the frag list and
2377 check all windows */
2378 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2379 wnd_sum +=
2380 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2381
2382 if (unlikely(wnd_sum < lso_mss)) {
2383 to_copy = 1;
2384 break;
2385 }
2386 wnd_sum -=
2387 skb_shinfo(skb)->frags[wnd_idx].size;
2388 }
2389 } else {
2390 /* in non-LSO too fragmented packet should always
2391 be linearized */
2392 to_copy = 1;
2393 }
2394 }
2395
2396exit_lbl:
2397 if (unlikely(to_copy))
2398 DP(NETIF_MSG_TX_QUEUED,
2399 "Linearization IS REQUIRED for %s packet. "
2400 "num_frags %d hlen %d first_bd_sz %d\n",
2401 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2402 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2403
2404 return to_copy;
2405}
2406#endif
2407
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002408static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2409 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002410{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002411 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2412 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2413 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002414 if ((xmit_type & XMIT_GSO_V6) &&
2415 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002416 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002417}
2418
2419/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002420 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002421 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002422 * @skb: packet skb
2423 * @pbd: parse BD
2424 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002425 */
2426static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2427 struct eth_tx_parse_bd_e1x *pbd,
2428 u32 xmit_type)
2429{
2430 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2431 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2432 pbd->tcp_flags = pbd_tcp_flags(skb);
2433
2434 if (xmit_type & XMIT_GSO_V4) {
2435 pbd->ip_id = swab16(ip_hdr(skb)->id);
2436 pbd->tcp_pseudo_csum =
2437 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2438 ip_hdr(skb)->daddr,
2439 0, IPPROTO_TCP, 0));
2440
2441 } else
2442 pbd->tcp_pseudo_csum =
2443 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2444 &ipv6_hdr(skb)->daddr,
2445 0, IPPROTO_TCP, 0));
2446
2447 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2448}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002449
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002450/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002451 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002452 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002453 * @bp: driver handle
2454 * @skb: packet skb
2455 * @parsing_data: data to be updated
2456 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002457 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002458 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002459 */
2460static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002461 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002462{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002463 *parsing_data |=
2464 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2465 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2466 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002467
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002468 if (xmit_type & XMIT_CSUM_TCP) {
2469 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2470 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2471 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002472
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002473 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2474 } else
2475 /* We support checksum offload for TCP and UDP only.
2476 * No need to pass the UDP header length - it's a constant.
2477 */
2478 return skb_transport_header(skb) +
2479 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002480}
2481
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002482static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2483 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2484{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002485 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2486
2487 if (xmit_type & XMIT_CSUM_V4)
2488 tx_start_bd->bd_flags.as_bitfield |=
2489 ETH_TX_BD_FLAGS_IP_CSUM;
2490 else
2491 tx_start_bd->bd_flags.as_bitfield |=
2492 ETH_TX_BD_FLAGS_IPV6;
2493
2494 if (!(xmit_type & XMIT_CSUM_TCP))
2495 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002496}
2497
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002498/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002499 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002500 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002501 * @bp: driver handle
2502 * @skb: packet skb
2503 * @pbd: parse BD to be updated
2504 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002505 */
2506static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2507 struct eth_tx_parse_bd_e1x *pbd,
2508 u32 xmit_type)
2509{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002510 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002511
2512 /* for now NS flag is not used in Linux */
2513 pbd->global_data =
2514 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2515 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2516
2517 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002518 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002519
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002520 hlen += pbd->ip_hlen_w;
2521
2522 /* We support checksum offload for TCP and UDP only */
2523 if (xmit_type & XMIT_CSUM_TCP)
2524 hlen += tcp_hdrlen(skb) / 2;
2525 else
2526 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002527
2528 pbd->total_hlen_w = cpu_to_le16(hlen);
2529 hlen = hlen*2;
2530
2531 if (xmit_type & XMIT_CSUM_TCP) {
2532 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2533
2534 } else {
2535 s8 fix = SKB_CS_OFF(skb); /* signed! */
2536
2537 DP(NETIF_MSG_TX_QUEUED,
2538 "hlen %d fix %d csum before fix %x\n",
2539 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2540
2541 /* HW bug: fixup the CSUM */
2542 pbd->tcp_pseudo_csum =
2543 bnx2x_csum_fix(skb_transport_header(skb),
2544 SKB_CS(skb), fix);
2545
2546 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2547 pbd->tcp_pseudo_csum);
2548 }
2549
2550 return hlen;
2551}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002552
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002553/* called with netif_tx_lock
2554 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2555 * netif_wake_queue()
2556 */
2557netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2558{
2559 struct bnx2x *bp = netdev_priv(dev);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002560
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002561 struct bnx2x_fastpath *fp;
2562 struct netdev_queue *txq;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002563 struct bnx2x_fp_txdata *txdata;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002564 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002565 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002566 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002567 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002568 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002569 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002570 u16 pkt_prod, bd_prod;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002571 int nbd, txq_index, fp_index, txdata_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002572 dma_addr_t mapping;
2573 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2574 int i;
2575 u8 hlen = 0;
2576 __le16 pkt_size = 0;
2577 struct ethhdr *eth;
2578 u8 mac_type = UNICAST_ADDRESS;
2579
2580#ifdef BNX2X_STOP_ON_ERROR
2581 if (unlikely(bp->panic))
2582 return NETDEV_TX_BUSY;
2583#endif
2584
Ariel Elior6383c0b2011-07-14 08:31:57 +00002585 txq_index = skb_get_queue_mapping(skb);
2586 txq = netdev_get_tx_queue(dev, txq_index);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002587
Ariel Elior6383c0b2011-07-14 08:31:57 +00002588 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT);
2589
2590 /* decode the fastpath index and the cos index from the txq */
2591 fp_index = TXQ_TO_FP(txq_index);
2592 txdata_index = TXQ_TO_COS(txq_index);
2593
2594#ifdef BCM_CNIC
2595 /*
2596 * Override the above for the FCoE queue:
2597 * - FCoE fp entry is right after the ETH entries.
2598 * - FCoE L2 queue uses bp->txdata[0] only.
2599 */
2600 if (unlikely(!NO_FCOE(bp) && (txq_index ==
2601 bnx2x_fcoe_tx(bp, txq_index)))) {
2602 fp_index = FCOE_IDX;
2603 txdata_index = 0;
2604 }
2605#endif
2606
2607 /* enable this debug print to view the transmission queue being used
2608 DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d",
2609 txq_index, fp_index, txdata_index); */
2610
2611 /* locate the fastpath and the txdata */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002612 fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00002613 txdata = &fp->txdata[txdata_index];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002614
Ariel Elior6383c0b2011-07-14 08:31:57 +00002615 /* enable this debug print to view the tranmission details
2616 DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
2617 " tx_data ptr %p fp pointer %p",
2618 txdata->cid, fp_index, txdata_index, txdata, fp); */
2619
2620 if (unlikely(bnx2x_tx_avail(bp, txdata) <
2621 (skb_shinfo(skb)->nr_frags + 3))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002622 fp->eth_q_stats.driver_xoff++;
2623 netif_tx_stop_queue(txq);
2624 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2625 return NETDEV_TX_BUSY;
2626 }
2627
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002628 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2629 "protocol(%x,%x) gso type %x xmit_type %x\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002630 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002631 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2632
2633 eth = (struct ethhdr *)skb->data;
2634
2635 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2636 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2637 if (is_broadcast_ether_addr(eth->h_dest))
2638 mac_type = BROADCAST_ADDRESS;
2639 else
2640 mac_type = MULTICAST_ADDRESS;
2641 }
2642
2643#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2644 /* First, check if we need to linearize the skb (due to FW
2645 restrictions). No need to check fragmentation if page size > 8K
2646 (there will be no violation to FW restrictions) */
2647 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2648 /* Statistics of linearization */
2649 bp->lin_cnt++;
2650 if (skb_linearize(skb) != 0) {
2651 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2652 "silently dropping this SKB\n");
2653 dev_kfree_skb_any(skb);
2654 return NETDEV_TX_OK;
2655 }
2656 }
2657#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002658 /* Map skb linear data for DMA */
2659 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2660 skb_headlen(skb), DMA_TO_DEVICE);
2661 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2662 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2663 "silently dropping this SKB\n");
2664 dev_kfree_skb_any(skb);
2665 return NETDEV_TX_OK;
2666 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002667 /*
2668 Please read carefully. First we use one BD which we mark as start,
2669 then we have a parsing info BD (used for TSO or xsum),
2670 and only then we have the rest of the TSO BDs.
2671 (don't forget to mark the last one as last,
2672 and to unmap only AFTER you write to the BD ...)
2673 And above all, all pdb sizes are in words - NOT DWORDS!
2674 */
2675
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002676 /* get current pkt produced now - advance it just before sending packet
2677 * since mapping of pages may fail and cause packet to be dropped
2678 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002679 pkt_prod = txdata->tx_pkt_prod;
2680 bd_prod = TX_BD(txdata->tx_bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002681
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002682 /* get a tx_buf and first BD
2683 * tx_start_bd may be changed during SPLIT,
2684 * but first_bd will always stay first
2685 */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002686 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
2687 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002688 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002689
2690 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002691 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2692 mac_type);
2693
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002694 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002695 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002696
2697 /* remember the first BD of the packet */
Ariel Elior6383c0b2011-07-14 08:31:57 +00002698 tx_buf->first_bd = txdata->tx_bd_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002699 tx_buf->skb = skb;
2700 tx_buf->flags = 0;
2701
2702 DP(NETIF_MSG_TX_QUEUED,
2703 "sending pkt %u @%p next_idx %u bd %u @%p\n",
Ariel Elior6383c0b2011-07-14 08:31:57 +00002704 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002705
Jesse Grosseab6d182010-10-20 13:56:03 +00002706 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002707 tx_start_bd->vlan_or_ethertype =
2708 cpu_to_le16(vlan_tx_tag_get(skb));
2709 tx_start_bd->bd_flags.as_bitfield |=
2710 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002711 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002712 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002713
2714 /* turn on parsing and get a BD */
2715 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002716
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002717 if (xmit_type & XMIT_CSUM)
2718 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002719
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002720 if (!CHIP_IS_E1x(bp)) {
Ariel Elior6383c0b2011-07-14 08:31:57 +00002721 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002722 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2723 /* Set PBD in checksum offload case */
2724 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002725 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2726 &pbd_e2_parsing_data,
2727 xmit_type);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002728 if (IS_MF_SI(bp)) {
2729 /*
2730 * fill in the MAC addresses in the PBD - for local
2731 * switching
2732 */
2733 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2734 &pbd_e2->src_mac_addr_mid,
2735 &pbd_e2->src_mac_addr_lo,
2736 eth->h_source);
2737 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2738 &pbd_e2->dst_mac_addr_mid,
2739 &pbd_e2->dst_mac_addr_lo,
2740 eth->h_dest);
2741 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002742 } else {
Ariel Elior6383c0b2011-07-14 08:31:57 +00002743 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002744 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2745 /* Set PBD in checksum offload case */
2746 if (xmit_type & XMIT_CSUM)
2747 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002748
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002749 }
2750
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002751 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002752 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2753 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002754 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002755 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2756 pkt_size = tx_start_bd->nbytes;
2757
2758 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2759 " nbytes %d flags %x vlan %x\n",
2760 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2761 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002762 tx_start_bd->bd_flags.as_bitfield,
2763 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002764
2765 if (xmit_type & XMIT_GSO) {
2766
2767 DP(NETIF_MSG_TX_QUEUED,
2768 "TSO packet len %d hlen %d total len %d tso size %d\n",
2769 skb->len, hlen, skb_headlen(skb),
2770 skb_shinfo(skb)->gso_size);
2771
2772 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2773
2774 if (unlikely(skb_headlen(skb) > hlen))
Ariel Elior6383c0b2011-07-14 08:31:57 +00002775 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
2776 &tx_start_bd, hlen,
2777 bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002778 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002779 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2780 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002781 else
2782 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002783 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002784
2785 /* Set the PBD's parsing_data field if not zero
2786 * (for the chips newer than 57711).
2787 */
2788 if (pbd_e2_parsing_data)
2789 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2790
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002791 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2792
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002793 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002794 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2795 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2796
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002797 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2798 frag->page_offset, frag->size,
2799 DMA_TO_DEVICE);
2800 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2801
2802 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2803 "dropping packet...\n");
2804
2805 /* we need unmap all buffers already mapped
2806 * for this SKB;
2807 * first_bd->nbd need to be properly updated
2808 * before call to bnx2x_free_tx_pkt
2809 */
2810 first_bd->nbd = cpu_to_le16(nbd);
Ariel Elior6383c0b2011-07-14 08:31:57 +00002811 bnx2x_free_tx_pkt(bp, txdata,
2812 TX_BD(txdata->tx_pkt_prod));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002813 return NETDEV_TX_OK;
2814 }
2815
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002816 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Ariel Elior6383c0b2011-07-14 08:31:57 +00002817 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002818 if (total_pkt_bd == NULL)
Ariel Elior6383c0b2011-07-14 08:31:57 +00002819 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002820
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002821 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2822 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2823 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2824 le16_add_cpu(&pkt_size, frag->size);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002825 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002826
2827 DP(NETIF_MSG_TX_QUEUED,
2828 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2829 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2830 le16_to_cpu(tx_data_bd->nbytes));
2831 }
2832
2833 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2834
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002835 /* update with actual num BDs */
2836 first_bd->nbd = cpu_to_le16(nbd);
2837
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002838 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2839
2840 /* now send a tx doorbell, counting the next BD
2841 * if the packet contains or ends with it
2842 */
2843 if (TX_BD_POFF(bd_prod) < nbd)
2844 nbd++;
2845
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002846 /* total_pkt_bytes should be set on the first data BD if
2847 * it's not an LSO packet and there is more than one
2848 * data BD. In this case pkt_size is limited by an MTU value.
2849 * However we prefer to set it for an LSO packet (while we don't
2850 * have to) in order to save some CPU cycles in a none-LSO
2851 * case, when we much more care about them.
2852 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002853 if (total_pkt_bd != NULL)
2854 total_pkt_bd->total_pkt_bytes = pkt_size;
2855
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002856 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002857 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002858 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002859 " tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002860 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2861 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2862 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2863 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002864 if (pbd_e2)
2865 DP(NETIF_MSG_TX_QUEUED,
2866 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2867 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2868 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2869 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2870 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002871 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2872
Ariel Elior6383c0b2011-07-14 08:31:57 +00002873 txdata->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002874 /*
2875 * Make sure that the BD data is updated before updating the producer
2876 * since FW might read the BD right after the producer is updated.
2877 * This is only applicable for weak-ordered memory model archs such
2878 * as IA-64. The following barrier is also mandatory since FW will
2879 * assumes packets must have BDs.
2880 */
2881 wmb();
2882
Ariel Elior6383c0b2011-07-14 08:31:57 +00002883 txdata->tx_db.data.prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002884 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002885
Ariel Elior6383c0b2011-07-14 08:31:57 +00002886 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002887
2888 mmiowb();
2889
Ariel Elior6383c0b2011-07-14 08:31:57 +00002890 txdata->tx_bd_prod += nbd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002891
Ariel Elior6383c0b2011-07-14 08:31:57 +00002892 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_SKB_FRAGS + 3)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002893 netif_tx_stop_queue(txq);
2894
2895 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2896 * ordering of set_bit() in netif_tx_stop_queue() and read of
2897 * fp->bd_tx_cons */
2898 smp_mb();
2899
2900 fp->eth_q_stats.driver_xoff++;
Ariel Elior6383c0b2011-07-14 08:31:57 +00002901 if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 3)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002902 netif_tx_wake_queue(txq);
2903 }
Ariel Elior6383c0b2011-07-14 08:31:57 +00002904 txdata->tx_pkt++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002905
2906 return NETDEV_TX_OK;
2907}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002908
Ariel Elior6383c0b2011-07-14 08:31:57 +00002909/**
2910 * bnx2x_setup_tc - routine to configure net_device for multi tc
2911 *
2912 * @netdev: net device to configure
2913 * @tc: number of traffic classes to enable
2914 *
2915 * callback connected to the ndo_setup_tc function pointer
2916 */
2917int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
2918{
2919 int cos, prio, count, offset;
2920 struct bnx2x *bp = netdev_priv(dev);
2921
2922 /* setup tc must be called under rtnl lock */
2923 ASSERT_RTNL();
2924
2925 /* no traffic classes requested. aborting */
2926 if (!num_tc) {
2927 netdev_reset_tc(dev);
2928 return 0;
2929 }
2930
2931 /* requested to support too many traffic classes */
2932 if (num_tc > bp->max_cos) {
2933 DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
2934 " requested: %d. max supported is %d",
2935 num_tc, bp->max_cos);
2936 return -EINVAL;
2937 }
2938
2939 /* declare amount of supported traffic classes */
2940 if (netdev_set_num_tc(dev, num_tc)) {
2941 DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes",
2942 num_tc);
2943 return -EINVAL;
2944 }
2945
2946 /* configure priority to traffic class mapping */
2947 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
2948 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
2949 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d",
2950 prio, bp->prio_to_cos[prio]);
2951 }
2952
2953
2954 /* Use this configuration to diffrentiate tc0 from other COSes
2955 This can be used for ets or pfc, and save the effort of setting
2956 up a multio class queue disc or negotiating DCBX with a switch
2957 netdev_set_prio_tc_map(dev, 0, 0);
2958 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", 0, 0);
2959 for (prio = 1; prio < 16; prio++) {
2960 netdev_set_prio_tc_map(dev, prio, 1);
2961 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d", prio, 1);
2962 } */
2963
2964 /* configure traffic class to transmission queue mapping */
2965 for (cos = 0; cos < bp->max_cos; cos++) {
2966 count = BNX2X_NUM_ETH_QUEUES(bp);
2967 offset = cos * MAX_TXQS_PER_COS;
2968 netdev_set_tc_queue(dev, cos, count, offset);
2969 DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d",
2970 cos, offset, count);
2971 }
2972
2973 return 0;
2974}
2975
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002976/* called with rtnl_lock */
2977int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2978{
2979 struct sockaddr *addr = p;
2980 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002981 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002982
2983 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2984 return -EINVAL;
2985
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002986 if (netif_running(dev)) {
2987 rc = bnx2x_set_eth_mac(bp, false);
2988 if (rc)
2989 return rc;
2990 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002991
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002992 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2993
2994 if (netif_running(dev))
2995 rc = bnx2x_set_eth_mac(bp, true);
2996
2997 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002998}
2999
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003000static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
3001{
3002 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
3003 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
Ariel Elior6383c0b2011-07-14 08:31:57 +00003004 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003005
3006 /* Common */
3007#ifdef BCM_CNIC
3008 if (IS_FCOE_IDX(fp_index)) {
3009 memset(sb, 0, sizeof(union host_hc_status_block));
3010 fp->status_blk_mapping = 0;
3011
3012 } else {
3013#endif
3014 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003015 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003016 BNX2X_PCI_FREE(sb->e2_sb,
3017 bnx2x_fp(bp, fp_index,
3018 status_blk_mapping),
3019 sizeof(struct host_hc_status_block_e2));
3020 else
3021 BNX2X_PCI_FREE(sb->e1x_sb,
3022 bnx2x_fp(bp, fp_index,
3023 status_blk_mapping),
3024 sizeof(struct host_hc_status_block_e1x));
3025#ifdef BCM_CNIC
3026 }
3027#endif
3028 /* Rx */
3029 if (!skip_rx_queue(bp, fp_index)) {
3030 bnx2x_free_rx_bds(fp);
3031
3032 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3033 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
3034 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
3035 bnx2x_fp(bp, fp_index, rx_desc_mapping),
3036 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3037
3038 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
3039 bnx2x_fp(bp, fp_index, rx_comp_mapping),
3040 sizeof(struct eth_fast_path_rx_cqe) *
3041 NUM_RCQ_BD);
3042
3043 /* SGE ring */
3044 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
3045 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
3046 bnx2x_fp(bp, fp_index, rx_sge_mapping),
3047 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3048 }
3049
3050 /* Tx */
3051 if (!skip_tx_queue(bp, fp_index)) {
3052 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003053 for_each_cos_in_tx_queue(fp, cos) {
3054 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3055
3056 DP(BNX2X_MSG_SP,
3057 "freeing tx memory of fp %d cos %d cid %d",
3058 fp_index, cos, txdata->cid);
3059
3060 BNX2X_FREE(txdata->tx_buf_ring);
3061 BNX2X_PCI_FREE(txdata->tx_desc_ring,
3062 txdata->tx_desc_mapping,
3063 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
3064 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003065 }
3066 /* end of fastpath */
3067}
3068
3069void bnx2x_free_fp_mem(struct bnx2x *bp)
3070{
3071 int i;
3072 for_each_queue(bp, i)
3073 bnx2x_free_fp_mem_at(bp, i);
3074}
3075
3076static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
3077{
3078 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003079 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003080 bnx2x_fp(bp, index, sb_index_values) =
3081 (__le16 *)status_blk.e2_sb->sb.index_values;
3082 bnx2x_fp(bp, index, sb_running_index) =
3083 (__le16 *)status_blk.e2_sb->sb.running_index;
3084 } else {
3085 bnx2x_fp(bp, index, sb_index_values) =
3086 (__le16 *)status_blk.e1x_sb->sb.index_values;
3087 bnx2x_fp(bp, index, sb_running_index) =
3088 (__le16 *)status_blk.e1x_sb->sb.running_index;
3089 }
3090}
3091
3092static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
3093{
3094 union host_hc_status_block *sb;
3095 struct bnx2x_fastpath *fp = &bp->fp[index];
3096 int ring_size = 0;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003097 u8 cos;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003098
3099 /* if rx_ring_size specified - use it */
3100 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
Ariel Elior6383c0b2011-07-14 08:31:57 +00003101 MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003102
3103 /* allocate at least number of buffers required by FW */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003104 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003105 MIN_RX_SIZE_TPA,
3106 rx_ring_size);
3107
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003108 /* Common */
3109 sb = &bnx2x_fp(bp, index, status_blk);
3110#ifdef BCM_CNIC
3111 if (!IS_FCOE_IDX(index)) {
3112#endif
3113 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003114 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003115 BNX2X_PCI_ALLOC(sb->e2_sb,
3116 &bnx2x_fp(bp, index, status_blk_mapping),
3117 sizeof(struct host_hc_status_block_e2));
3118 else
3119 BNX2X_PCI_ALLOC(sb->e1x_sb,
3120 &bnx2x_fp(bp, index, status_blk_mapping),
3121 sizeof(struct host_hc_status_block_e1x));
3122#ifdef BCM_CNIC
3123 }
3124#endif
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003125
3126 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
3127 * set shortcuts for it.
3128 */
3129 if (!IS_FCOE_IDX(index))
3130 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003131
3132 /* Tx */
3133 if (!skip_tx_queue(bp, index)) {
3134 /* fastpath tx rings: tx_buf tx_desc */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003135 for_each_cos_in_tx_queue(fp, cos) {
3136 struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
3137
3138 DP(BNX2X_MSG_SP, "allocating tx memory of "
3139 "fp %d cos %d",
3140 index, cos);
3141
3142 BNX2X_ALLOC(txdata->tx_buf_ring,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003143 sizeof(struct sw_tx_bd) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003144 BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
3145 &txdata->tx_desc_mapping,
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003146 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Ariel Elior6383c0b2011-07-14 08:31:57 +00003147 }
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003148 }
3149
3150 /* Rx */
3151 if (!skip_rx_queue(bp, index)) {
3152 /* fastpath rx rings: rx_buf rx_desc rx_comp */
3153 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
3154 sizeof(struct sw_rx_bd) * NUM_RX_BD);
3155 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
3156 &bnx2x_fp(bp, index, rx_desc_mapping),
3157 sizeof(struct eth_rx_bd) * NUM_RX_BD);
3158
3159 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
3160 &bnx2x_fp(bp, index, rx_comp_mapping),
3161 sizeof(struct eth_fast_path_rx_cqe) *
3162 NUM_RCQ_BD);
3163
3164 /* SGE ring */
3165 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
3166 sizeof(struct sw_rx_page) * NUM_RX_SGE);
3167 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
3168 &bnx2x_fp(bp, index, rx_sge_mapping),
3169 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
3170 /* RX BD ring */
3171 bnx2x_set_next_page_rx_bd(fp);
3172
3173 /* CQ ring */
3174 bnx2x_set_next_page_rx_cq(fp);
3175
3176 /* BDs */
3177 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
3178 if (ring_size < rx_ring_size)
3179 goto alloc_mem_err;
3180 }
3181
3182 return 0;
3183
3184/* handles low memory cases */
3185alloc_mem_err:
3186 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
3187 index, ring_size);
3188 /* FW will drop all packets if queue is not big enough,
3189 * In these cases we disable the queue
Ariel Elior6383c0b2011-07-14 08:31:57 +00003190 * Min size is different for OOO, TPA and non-TPA queues
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003191 */
3192 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00003193 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003194 /* release memory allocated for this queue */
3195 bnx2x_free_fp_mem_at(bp, index);
3196 return -ENOMEM;
3197 }
3198 return 0;
3199}
3200
3201int bnx2x_alloc_fp_mem(struct bnx2x *bp)
3202{
3203 int i;
3204
3205 /**
3206 * 1. Allocate FP for leading - fatal if error
3207 * 2. {CNIC} Allocate FCoE FP - fatal if error
Ariel Elior6383c0b2011-07-14 08:31:57 +00003208 * 3. {CNIC} Allocate OOO + FWD - disable OOO if error
3209 * 4. Allocate RSS - fix number of queues if error
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003210 */
3211
3212 /* leading */
3213 if (bnx2x_alloc_fp_mem_at(bp, 0))
3214 return -ENOMEM;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003215
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003216#ifdef BCM_CNIC
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00003217 if (!NO_FCOE(bp))
3218 /* FCoE */
3219 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
3220 /* we will fail load process instead of mark
3221 * NO_FCOE_FLAG
3222 */
3223 return -ENOMEM;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003224#endif
Ariel Elior6383c0b2011-07-14 08:31:57 +00003225
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003226 /* RSS */
3227 for_each_nondefault_eth_queue(bp, i)
3228 if (bnx2x_alloc_fp_mem_at(bp, i))
3229 break;
3230
3231 /* handle memory failures */
3232 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
3233 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
3234
3235 WARN_ON(delta < 0);
3236#ifdef BCM_CNIC
3237 /**
3238 * move non eth FPs next to last eth FP
3239 * must be done in that order
3240 * FCOE_IDX < FWD_IDX < OOO_IDX
3241 */
3242
Ariel Elior6383c0b2011-07-14 08:31:57 +00003243 /* move FCoE fp even NO_FCOE_FLAG is on */
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00003244 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3245#endif
3246 bp->num_queues -= delta;
3247 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3248 bp->num_queues + delta, bp->num_queues);
3249 }
3250
3251 return 0;
3252}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003253
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003254void bnx2x_free_mem_bp(struct bnx2x *bp)
3255{
3256 kfree(bp->fp);
3257 kfree(bp->msix_table);
3258 kfree(bp->ilt);
3259}
3260
3261int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3262{
3263 struct bnx2x_fastpath *fp;
3264 struct msix_entry *tbl;
3265 struct bnx2x_ilt *ilt;
Ariel Elior6383c0b2011-07-14 08:31:57 +00003266 int msix_table_size = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003267
Ariel Elior6383c0b2011-07-14 08:31:57 +00003268 /*
3269 * The biggest MSI-X table we might need is as a maximum number of fast
3270 * path IGU SBs plus default SB (for PF).
3271 */
3272 msix_table_size = bp->igu_sb_cnt + 1;
3273
3274 /* fp array: RSS plus CNIC related L2 queues */
3275 fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
3276 sizeof(*fp), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003277 if (!fp)
3278 goto alloc_err;
3279 bp->fp = fp;
3280
3281 /* msix table */
Ariel Elior6383c0b2011-07-14 08:31:57 +00003282 tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003283 if (!tbl)
3284 goto alloc_err;
3285 bp->msix_table = tbl;
3286
3287 /* ilt */
3288 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3289 if (!ilt)
3290 goto alloc_err;
3291 bp->ilt = ilt;
3292
3293 return 0;
3294alloc_err:
3295 bnx2x_free_mem_bp(bp);
3296 return -ENOMEM;
3297
3298}
3299
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00003300int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00003301{
3302 struct bnx2x *bp = netdev_priv(dev);
3303
3304 if (unlikely(!netif_running(dev)))
3305 return 0;
3306
3307 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3308 return bnx2x_nic_load(bp, LOAD_NORMAL);
3309}
3310
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00003311int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3312{
3313 u32 sel_phy_idx = 0;
3314 if (bp->link_params.num_phys <= 1)
3315 return INT_PHY;
3316
3317 if (bp->link_vars.link_up) {
3318 sel_phy_idx = EXT_PHY1;
3319 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3320 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3321 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3322 sel_phy_idx = EXT_PHY2;
3323 } else {
3324
3325 switch (bnx2x_phy_selection(&bp->link_params)) {
3326 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3327 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3328 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3329 sel_phy_idx = EXT_PHY1;
3330 break;
3331 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3332 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3333 sel_phy_idx = EXT_PHY2;
3334 break;
3335 }
3336 }
3337
3338 return sel_phy_idx;
3339
3340}
3341int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3342{
3343 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3344 /*
3345 * The selected actived PHY is always after swapping (in case PHY
3346 * swapping is enabled). So when swapping is enabled, we need to reverse
3347 * the configuration
3348 */
3349
3350 if (bp->link_params.multi_phy_config &
3351 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3352 if (sel_phy_idx == EXT_PHY1)
3353 sel_phy_idx = EXT_PHY2;
3354 else if (sel_phy_idx == EXT_PHY2)
3355 sel_phy_idx = EXT_PHY1;
3356 }
3357 return LINK_CONFIG_IDX(sel_phy_idx);
3358}
3359
Vladislav Zolotarovbf61ee12011-07-21 07:56:51 +00003360#if defined(NETDEV_FCOE_WWNN) && defined(BCM_CNIC)
3361int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
3362{
3363 struct bnx2x *bp = netdev_priv(dev);
3364 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
3365
3366 switch (type) {
3367 case NETDEV_FCOE_WWNN:
3368 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
3369 cp->fcoe_wwn_node_name_lo);
3370 break;
3371 case NETDEV_FCOE_WWPN:
3372 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
3373 cp->fcoe_wwn_port_name_lo);
3374 break;
3375 default:
3376 return -EINVAL;
3377 }
3378
3379 return 0;
3380}
3381#endif
3382
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003383/* called with rtnl_lock */
3384int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3385{
3386 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003387
3388 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3389 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3390 return -EAGAIN;
3391 }
3392
3393 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3394 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3395 return -EINVAL;
3396
3397 /* This does not race with packet allocation
3398 * because the actual alloc size is
3399 * only updated as part of load
3400 */
3401 dev->mtu = new_mtu;
3402
Michał Mirosław66371c42011-04-12 09:38:23 +00003403 return bnx2x_reload_if_running(dev);
3404}
3405
3406u32 bnx2x_fix_features(struct net_device *dev, u32 features)
3407{
3408 struct bnx2x *bp = netdev_priv(dev);
3409
3410 /* TPA requires Rx CSUM offloading */
3411 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3412 features &= ~NETIF_F_LRO;
3413
3414 return features;
3415}
3416
3417int bnx2x_set_features(struct net_device *dev, u32 features)
3418{
3419 struct bnx2x *bp = netdev_priv(dev);
3420 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003421 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00003422
3423 if (features & NETIF_F_LRO)
3424 flags |= TPA_ENABLE_FLAG;
3425 else
3426 flags &= ~TPA_ENABLE_FLAG;
3427
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003428 if (features & NETIF_F_LOOPBACK) {
3429 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3430 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3431 bnx2x_reload = true;
3432 }
3433 } else {
3434 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3435 bp->link_params.loopback_mode = LOOPBACK_NONE;
3436 bnx2x_reload = true;
3437 }
3438 }
3439
Michał Mirosław66371c42011-04-12 09:38:23 +00003440 if (flags ^ bp->flags) {
3441 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003442 bnx2x_reload = true;
3443 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003444
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003445 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003446 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3447 return bnx2x_reload_if_running(dev);
3448 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003449 }
3450
Michał Mirosław66371c42011-04-12 09:38:23 +00003451 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003452}
3453
3454void bnx2x_tx_timeout(struct net_device *dev)
3455{
3456 struct bnx2x *bp = netdev_priv(dev);
3457
3458#ifdef BNX2X_STOP_ON_ERROR
3459 if (!bp->panic)
3460 bnx2x_panic();
3461#endif
Ariel Elior7be08a72011-07-14 08:31:19 +00003462
3463 smp_mb__before_clear_bit();
3464 set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
3465 smp_mb__after_clear_bit();
3466
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003467 /* This allows the netif to be shutdown gracefully before resetting */
Ariel Elior7be08a72011-07-14 08:31:19 +00003468 schedule_delayed_work(&bp->sp_rtnl_task, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003469}
3470
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003471int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3472{
3473 struct net_device *dev = pci_get_drvdata(pdev);
3474 struct bnx2x *bp;
3475
3476 if (!dev) {
3477 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3478 return -ENODEV;
3479 }
3480 bp = netdev_priv(dev);
3481
3482 rtnl_lock();
3483
3484 pci_save_state(pdev);
3485
3486 if (!netif_running(dev)) {
3487 rtnl_unlock();
3488 return 0;
3489 }
3490
3491 netif_device_detach(dev);
3492
3493 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3494
3495 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3496
3497 rtnl_unlock();
3498
3499 return 0;
3500}
3501
3502int bnx2x_resume(struct pci_dev *pdev)
3503{
3504 struct net_device *dev = pci_get_drvdata(pdev);
3505 struct bnx2x *bp;
3506 int rc;
3507
3508 if (!dev) {
3509 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3510 return -ENODEV;
3511 }
3512 bp = netdev_priv(dev);
3513
3514 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3515 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3516 return -EAGAIN;
3517 }
3518
3519 rtnl_lock();
3520
3521 pci_restore_state(pdev);
3522
3523 if (!netif_running(dev)) {
3524 rtnl_unlock();
3525 return 0;
3526 }
3527
3528 bnx2x_set_power_state(bp, PCI_D0);
3529 netif_device_attach(dev);
3530
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003531 /* Since the chip was reset, clear the FW sequence number */
3532 bp->fw_seq = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003533 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3534
3535 rtnl_unlock();
3536
3537 return rc;
3538}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003539
3540
3541void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3542 u32 cid)
3543{
3544 /* ustorm cxt validation */
3545 cxt->ustorm_ag_context.cdu_usage =
3546 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3547 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3548 /* xcontext validation */
3549 cxt->xstorm_ag_context.cdu_reserved =
3550 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3551 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3552}
3553
3554static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3555 u8 fw_sb_id, u8 sb_index,
3556 u8 ticks)
3557{
3558
3559 u32 addr = BAR_CSTRORM_INTMEM +
3560 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3561 REG_WR8(bp, addr, ticks);
3562 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3563 port, fw_sb_id, sb_index, ticks);
3564}
3565
3566static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3567 u16 fw_sb_id, u8 sb_index,
3568 u8 disable)
3569{
3570 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3571 u32 addr = BAR_CSTRORM_INTMEM +
3572 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3573 u16 flags = REG_RD16(bp, addr);
3574 /* clear and set */
3575 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3576 flags |= enable_flag;
3577 REG_WR16(bp, addr, flags);
3578 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3579 port, fw_sb_id, sb_index, disable);
3580}
3581
3582void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3583 u8 sb_index, u8 disable, u16 usec)
3584{
3585 int port = BP_PORT(bp);
3586 u8 ticks = usec / BNX2X_BTR;
3587
3588 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3589
3590 disable = disable ? 1 : (usec ? 0 : 1);
3591 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3592}