blob: 6fac8e183c5979f041dcecf69f48af66db786e02 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000018#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000019#include <linux/if_vlan.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000021#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070022#include <net/ip6_checksum.h>
Dmitry Kravkov6891dd22010-08-03 21:49:40 +000023#include <linux/firmware.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000024#include "bnx2x_cmn.h"
25
Dmitry Kravkov523224a2010-10-06 03:23:26 +000026#include "bnx2x_init.h"
27
stephen hemminger8d962862010-10-21 07:50:56 +000028static int bnx2x_setup_irqs(struct bnx2x *bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000029
30/* free skb in the packet ring at pos idx
31 * return idx of last bd freed
32 */
33static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
34 u16 idx)
35{
36 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
37 struct eth_tx_start_bd *tx_start_bd;
38 struct eth_tx_bd *tx_data_bd;
39 struct sk_buff *skb = tx_buf->skb;
40 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
41 int nbd;
42
43 /* prefetch skb end pointer to speedup dev_kfree_skb() */
44 prefetch(&skb->end);
45
46 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
47 idx, tx_buf, skb);
48
49 /* unmap first bd */
50 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
51 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
52 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +000053 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000054
55 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
56#ifdef BNX2X_STOP_ON_ERROR
57 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
58 BNX2X_ERR("BAD nbd!\n");
59 bnx2x_panic();
60 }
61#endif
62 new_cons = nbd + tx_buf->first_bd;
63
64 /* Get the next bd */
65 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
66
67 /* Skip a parse bd... */
68 --nbd;
69 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
70
71 /* ...and the TSO split header bd since they have no mapping */
72 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
73 --nbd;
74 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
75 }
76
77 /* now free frags */
78 while (nbd > 0) {
79
80 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
81 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
82 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
83 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
84 if (--nbd)
85 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
86 }
87
88 /* release skb */
89 WARN_ON(!skb);
90 dev_kfree_skb(skb);
91 tx_buf->first_bd = 0;
92 tx_buf->skb = NULL;
93
94 return new_cons;
95}
96
97int bnx2x_tx_int(struct bnx2x_fastpath *fp)
98{
99 struct bnx2x *bp = fp->bp;
100 struct netdev_queue *txq;
101 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
102
103#ifdef BNX2X_STOP_ON_ERROR
104 if (unlikely(bp->panic))
105 return -1;
106#endif
107
108 txq = netdev_get_tx_queue(bp->dev, fp->index);
109 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
110 sw_cons = fp->tx_pkt_cons;
111
112 while (sw_cons != hw_cons) {
113 u16 pkt_cons;
114
115 pkt_cons = TX_BD(sw_cons);
116
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000117 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
118 " pkt_cons %u\n",
119 fp->index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000120
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000121 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
122 sw_cons++;
123 }
124
125 fp->tx_pkt_cons = sw_cons;
126 fp->tx_bd_cons = bd_cons;
127
128 /* Need to make the tx_bd_cons update visible to start_xmit()
129 * before checking for netif_tx_queue_stopped(). Without the
130 * memory barrier, there is a small possibility that
131 * start_xmit() will miss it and cause the queue to be stopped
132 * forever.
133 */
134 smp_mb();
135
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000136 if (unlikely(netif_tx_queue_stopped(txq))) {
137 /* Taking tx_lock() is needed to prevent reenabling the queue
138 * while it's empty. This could have happen if rx_action() gets
139 * suspended in bnx2x_tx_int() after the condition before
140 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
141 *
142 * stops the queue->sees fresh tx_bd_cons->releases the queue->
143 * sends some packets consuming the whole queue again->
144 * stops the queue
145 */
146
147 __netif_tx_lock(txq, smp_processor_id());
148
149 if ((netif_tx_queue_stopped(txq)) &&
150 (bp->state == BNX2X_STATE_OPEN) &&
151 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
152 netif_tx_wake_queue(txq);
153
154 __netif_tx_unlock(txq);
155 }
156 return 0;
157}
158
159static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
160 u16 idx)
161{
162 u16 last_max = fp->last_max_sge;
163
164 if (SUB_S16(idx, last_max) > 0)
165 fp->last_max_sge = idx;
166}
167
168static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
169 struct eth_fast_path_rx_cqe *fp_cqe)
170{
171 struct bnx2x *bp = fp->bp;
172 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
173 le16_to_cpu(fp_cqe->len_on_bd)) >>
174 SGE_PAGE_SHIFT;
175 u16 last_max, last_elem, first_elem;
176 u16 delta = 0;
177 u16 i;
178
179 if (!sge_len)
180 return;
181
182 /* First mark all used pages */
183 for (i = 0; i < sge_len; i++)
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000184 SGE_MASK_CLEAR_BIT(fp,
185 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000186
187 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000188 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000189
190 /* Here we assume that the last SGE index is the biggest */
191 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000192 bnx2x_update_last_max_sge(fp,
193 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000194
195 last_max = RX_SGE(fp->last_max_sge);
196 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
197 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
198
199 /* If ring is not full */
200 if (last_elem + 1 != first_elem)
201 last_elem++;
202
203 /* Now update the prod */
204 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
205 if (likely(fp->sge_mask[i]))
206 break;
207
208 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
209 delta += RX_SGE_MASK_ELEM_SZ;
210 }
211
212 if (delta > 0) {
213 fp->rx_sge_prod += delta;
214 /* clear page-end entries */
215 bnx2x_clear_sge_mask_next_elems(fp);
216 }
217
218 DP(NETIF_MSG_RX_STATUS,
219 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
220 fp->last_max_sge, fp->rx_sge_prod);
221}
222
223static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
224 struct sk_buff *skb, u16 cons, u16 prod)
225{
226 struct bnx2x *bp = fp->bp;
227 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
228 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
229 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
230 dma_addr_t mapping;
231
232 /* move empty skb from pool to prod and map it */
233 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
234 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800235 fp->rx_buf_size, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000236 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
237
238 /* move partial skb from cons to pool (don't unmap yet) */
239 fp->tpa_pool[queue] = *cons_rx_buf;
240
241 /* mark bin state as start - print error if current state != stop */
242 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
243 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
244
245 fp->tpa_state[queue] = BNX2X_TPA_START;
246
247 /* point prod_bd to new skb */
248 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
249 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
250
251#ifdef BNX2X_STOP_ON_ERROR
252 fp->tpa_queue_used |= (1 << queue);
253#ifdef _ASM_GENERIC_INT_L64_H
254 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
255#else
256 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
257#endif
258 fp->tpa_queue_used);
259#endif
260}
261
262static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
263 struct sk_buff *skb,
264 struct eth_fast_path_rx_cqe *fp_cqe,
265 u16 cqe_idx)
266{
267 struct sw_rx_page *rx_pg, old_rx_pg;
268 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
269 u32 i, frag_len, frag_size, pages;
270 int err;
271 int j;
272
273 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
274 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
275
276 /* This is needed in order to enable forwarding support */
277 if (frag_size)
278 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
279 max(frag_size, (u32)len_on_bd));
280
281#ifdef BNX2X_STOP_ON_ERROR
282 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
283 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
284 pages, cqe_idx);
285 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
286 fp_cqe->pkt_len, len_on_bd);
287 bnx2x_panic();
288 return -EINVAL;
289 }
290#endif
291
292 /* Run through the SGL and compose the fragmented skb */
293 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000294 u16 sge_idx =
295 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000296
297 /* FW gives the indices of the SGE as if the ring is an array
298 (meaning that "next" element will consume 2 indices) */
299 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
300 rx_pg = &fp->rx_page_ring[sge_idx];
301 old_rx_pg = *rx_pg;
302
303 /* If we fail to allocate a substitute page, we simply stop
304 where we are and drop the whole packet */
305 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
306 if (unlikely(err)) {
307 fp->eth_q_stats.rx_skb_alloc_failed++;
308 return err;
309 }
310
311 /* Unmap the page as we r going to pass it to the stack */
312 dma_unmap_page(&bp->pdev->dev,
313 dma_unmap_addr(&old_rx_pg, mapping),
314 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
315
316 /* Add one frag and update the appropriate fields in the skb */
317 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
318
319 skb->data_len += frag_len;
320 skb->truesize += frag_len;
321 skb->len += frag_len;
322
323 frag_size -= frag_len;
324 }
325
326 return 0;
327}
328
329static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
330 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
331 u16 cqe_idx)
332{
333 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
334 struct sk_buff *skb = rx_buf->skb;
335 /* alloc new skb */
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800336 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000337
338 /* Unmap skb in the pool anyway, as we are going to change
339 pool entry status to BNX2X_TPA_STOP even if new skb allocation
340 fails. */
341 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800342 fp->rx_buf_size, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000343
344 if (likely(new_skb)) {
345 /* fix ip xsum and give it to the stack */
346 /* (no need to map the new skb) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000347
348 prefetch(skb);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000349 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000350
351#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800352 if (pad + len > fp->rx_buf_size) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000353 BNX2X_ERR("skb_put is about to fail... "
354 "pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800355 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000356 bnx2x_panic();
357 return;
358 }
359#endif
360
361 skb_reserve(skb, pad);
362 skb_put(skb, len);
363
364 skb->protocol = eth_type_trans(skb, bp->dev);
365 skb->ip_summed = CHECKSUM_UNNECESSARY;
366
367 {
368 struct iphdr *iph;
369
370 iph = (struct iphdr *)skb->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000371 iph->check = 0;
372 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
373 }
374
375 if (!bnx2x_fill_frag_skb(bp, fp, skb,
376 &cqe->fast_path_cqe, cqe_idx)) {
Hao Zheng9bcc0892010-10-20 13:56:11 +0000377 if ((le16_to_cpu(cqe->fast_path_cqe.
378 pars_flags.flags) & PARSING_FLAGS_VLAN))
379 __vlan_hwaccel_put_tag(skb,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000380 le16_to_cpu(cqe->fast_path_cqe.
Hao Zheng9bcc0892010-10-20 13:56:11 +0000381 vlan_tag));
382 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000383 } else {
384 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
385 " - dropping packet!\n");
386 dev_kfree_skb(skb);
387 }
388
389
390 /* put new skb in bin */
391 fp->tpa_pool[queue].skb = new_skb;
392
393 } else {
394 /* else drop the packet and keep the buffer in the bin */
395 DP(NETIF_MSG_RX_STATUS,
396 "Failed to allocate new skb - dropping packet!\n");
397 fp->eth_q_stats.rx_skb_alloc_failed++;
398 }
399
400 fp->tpa_state[queue] = BNX2X_TPA_STOP;
401}
402
403/* Set Toeplitz hash value in the skb using the value from the
404 * CQE (calculated by HW).
405 */
406static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
407 struct sk_buff *skb)
408{
409 /* Set Toeplitz hash from CQE */
410 if ((bp->dev->features & NETIF_F_RXHASH) &&
411 (cqe->fast_path_cqe.status_flags &
412 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
413 skb->rxhash =
414 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
415}
416
417int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
418{
419 struct bnx2x *bp = fp->bp;
420 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
421 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
422 int rx_pkt = 0;
423
424#ifdef BNX2X_STOP_ON_ERROR
425 if (unlikely(bp->panic))
426 return 0;
427#endif
428
429 /* CQ "next element" is of the size of the regular element,
430 that's why it's ok here */
431 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
432 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
433 hw_comp_cons++;
434
435 bd_cons = fp->rx_bd_cons;
436 bd_prod = fp->rx_bd_prod;
437 bd_prod_fw = bd_prod;
438 sw_comp_cons = fp->rx_comp_cons;
439 sw_comp_prod = fp->rx_comp_prod;
440
441 /* Memory barrier necessary as speculative reads of the rx
442 * buffer can be ahead of the index in the status block
443 */
444 rmb();
445
446 DP(NETIF_MSG_RX_STATUS,
447 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
448 fp->index, hw_comp_cons, sw_comp_cons);
449
450 while (sw_comp_cons != hw_comp_cons) {
451 struct sw_rx_bd *rx_buf = NULL;
452 struct sk_buff *skb;
453 union eth_rx_cqe *cqe;
454 u8 cqe_fp_flags;
455 u16 len, pad;
456
457 comp_ring_cons = RCQ_BD(sw_comp_cons);
458 bd_prod = RX_BD(bd_prod);
459 bd_cons = RX_BD(bd_cons);
460
461 /* Prefetch the page containing the BD descriptor
462 at producer's index. It will be needed when new skb is
463 allocated */
464 prefetch((void *)(PAGE_ALIGN((unsigned long)
465 (&fp->rx_desc_ring[bd_prod])) -
466 PAGE_SIZE + 1));
467
468 cqe = &fp->rx_comp_ring[comp_ring_cons];
469 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
470
471 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
472 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
473 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
474 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
475 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
476 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
477
478 /* is this a slowpath msg? */
479 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
480 bnx2x_sp_event(fp, cqe);
481 goto next_cqe;
482
483 /* this is an rx packet */
484 } else {
485 rx_buf = &fp->rx_buf_ring[bd_cons];
486 skb = rx_buf->skb;
487 prefetch(skb);
488 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
489 pad = cqe->fast_path_cqe.placement_offset;
490
Vladislav Zolotarovfe78d262010-10-17 23:02:20 +0000491 /* - If CQE is marked both TPA_START and TPA_END it is
492 * a non-TPA CQE.
493 * - FP CQE will always have either TPA_START or/and
494 * TPA_STOP flags set.
495 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000496 if ((!fp->disable_tpa) &&
497 (TPA_TYPE(cqe_fp_flags) !=
498 (TPA_TYPE_START | TPA_TYPE_END))) {
499 u16 queue = cqe->fast_path_cqe.queue_index;
500
501 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
502 DP(NETIF_MSG_RX_STATUS,
503 "calling tpa_start on queue %d\n",
504 queue);
505
506 bnx2x_tpa_start(fp, queue, skb,
507 bd_cons, bd_prod);
508
509 /* Set Toeplitz hash for an LRO skb */
510 bnx2x_set_skb_rxhash(bp, cqe, skb);
511
512 goto next_rx;
Vladislav Zolotarovfe78d262010-10-17 23:02:20 +0000513 } else { /* TPA_STOP */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000514 DP(NETIF_MSG_RX_STATUS,
515 "calling tpa_stop on queue %d\n",
516 queue);
517
518 if (!BNX2X_RX_SUM_FIX(cqe))
519 BNX2X_ERR("STOP on none TCP "
520 "data\n");
521
522 /* This is a size of the linear data
523 on this skb */
524 len = le16_to_cpu(cqe->fast_path_cqe.
525 len_on_bd);
526 bnx2x_tpa_stop(bp, fp, queue, pad,
527 len, cqe, comp_ring_cons);
528#ifdef BNX2X_STOP_ON_ERROR
529 if (bp->panic)
530 return 0;
531#endif
532
533 bnx2x_update_sge_prod(fp,
534 &cqe->fast_path_cqe);
535 goto next_cqe;
536 }
537 }
538
539 dma_sync_single_for_device(&bp->pdev->dev,
540 dma_unmap_addr(rx_buf, mapping),
541 pad + RX_COPY_THRESH,
542 DMA_FROM_DEVICE);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000543 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000544
545 /* is this an error packet? */
546 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
547 DP(NETIF_MSG_RX_ERR,
548 "ERROR flags %x rx packet %u\n",
549 cqe_fp_flags, sw_comp_cons);
550 fp->eth_q_stats.rx_err_discard_pkt++;
551 goto reuse_rx;
552 }
553
554 /* Since we don't have a jumbo ring
555 * copy small packets if mtu > 1500
556 */
557 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
558 (len <= RX_COPY_THRESH)) {
559 struct sk_buff *new_skb;
560
561 new_skb = netdev_alloc_skb(bp->dev,
562 len + pad);
563 if (new_skb == NULL) {
564 DP(NETIF_MSG_RX_ERR,
565 "ERROR packet dropped "
566 "because of alloc failure\n");
567 fp->eth_q_stats.rx_skb_alloc_failed++;
568 goto reuse_rx;
569 }
570
571 /* aligned copy */
572 skb_copy_from_linear_data_offset(skb, pad,
573 new_skb->data + pad, len);
574 skb_reserve(new_skb, pad);
575 skb_put(new_skb, len);
576
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000577 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000578
579 skb = new_skb;
580
581 } else
582 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
583 dma_unmap_single(&bp->pdev->dev,
584 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800585 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000586 DMA_FROM_DEVICE);
587 skb_reserve(skb, pad);
588 skb_put(skb, len);
589
590 } else {
591 DP(NETIF_MSG_RX_ERR,
592 "ERROR packet dropped because "
593 "of alloc failure\n");
594 fp->eth_q_stats.rx_skb_alloc_failed++;
595reuse_rx:
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000596 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000597 goto next_rx;
598 }
599
600 skb->protocol = eth_type_trans(skb, bp->dev);
601
602 /* Set Toeplitz hash for a none-LRO skb */
603 bnx2x_set_skb_rxhash(bp, cqe, skb);
604
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700605 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000606
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000607 if (bp->rx_csum) {
608 if (likely(BNX2X_RX_CSUM_OK(cqe)))
609 skb->ip_summed = CHECKSUM_UNNECESSARY;
610 else
611 fp->eth_q_stats.hw_csum_err++;
612 }
613 }
614
615 skb_record_rx_queue(skb, fp->index);
616
Hao Zheng9bcc0892010-10-20 13:56:11 +0000617 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
618 PARSING_FLAGS_VLAN)
619 __vlan_hwaccel_put_tag(skb,
620 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
621 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000622
623
624next_rx:
625 rx_buf->skb = NULL;
626
627 bd_cons = NEXT_RX_IDX(bd_cons);
628 bd_prod = NEXT_RX_IDX(bd_prod);
629 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
630 rx_pkt++;
631next_cqe:
632 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
633 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
634
635 if (rx_pkt == budget)
636 break;
637 } /* while */
638
639 fp->rx_bd_cons = bd_cons;
640 fp->rx_bd_prod = bd_prod_fw;
641 fp->rx_comp_cons = sw_comp_cons;
642 fp->rx_comp_prod = sw_comp_prod;
643
644 /* Update producers */
645 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
646 fp->rx_sge_prod);
647
648 fp->rx_pkt += rx_pkt;
649 fp->rx_calls++;
650
651 return rx_pkt;
652}
653
654static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
655{
656 struct bnx2x_fastpath *fp = fp_cookie;
657 struct bnx2x *bp = fp->bp;
658
659 /* Return here if interrupt is disabled */
660 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
661 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
662 return IRQ_HANDLED;
663 }
664
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000665 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
666 "[fp %d fw_sd %d igusb %d]\n",
667 fp->index, fp->fw_sb_id, fp->igu_sb_id);
668 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000669
670#ifdef BNX2X_STOP_ON_ERROR
671 if (unlikely(bp->panic))
672 return IRQ_HANDLED;
673#endif
674
675 /* Handle Rx and Tx according to MSI-X vector */
676 prefetch(fp->rx_cons_sb);
677 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000678 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000679 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
680
681 return IRQ_HANDLED;
682}
683
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000684/* HW Lock for shared dual port PHYs */
685void bnx2x_acquire_phy_lock(struct bnx2x *bp)
686{
687 mutex_lock(&bp->port.phy_mutex);
688
689 if (bp->port.need_hw_lock)
690 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
691}
692
693void bnx2x_release_phy_lock(struct bnx2x *bp)
694{
695 if (bp->port.need_hw_lock)
696 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
697
698 mutex_unlock(&bp->port.phy_mutex);
699}
700
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800701/* calculates MF speed according to current linespeed and MF configuration */
702u16 bnx2x_get_mf_speed(struct bnx2x *bp)
703{
704 u16 line_speed = bp->link_vars.line_speed;
705 if (IS_MF(bp)) {
706 u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
707 FUNC_MF_CFG_MAX_BW_MASK) >>
708 FUNC_MF_CFG_MAX_BW_SHIFT;
709 /* Calculate the current MAX line speed limit for the DCC
710 * capable devices
711 */
712 if (IS_MF_SD(bp)) {
713 u16 vn_max_rate = maxCfg * 100;
714
715 if (vn_max_rate < line_speed)
716 line_speed = vn_max_rate;
717 } else /* IS_MF_SI(bp)) */
718 line_speed = (line_speed * maxCfg) / 100;
719 }
720
721 return line_speed;
722}
723
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000724void bnx2x_link_report(struct bnx2x *bp)
725{
726 if (bp->flags & MF_FUNC_DIS) {
727 netif_carrier_off(bp->dev);
728 netdev_err(bp->dev, "NIC Link is Down\n");
729 return;
730 }
731
732 if (bp->link_vars.link_up) {
733 u16 line_speed;
734
735 if (bp->state == BNX2X_STATE_OPEN)
736 netif_carrier_on(bp->dev);
737 netdev_info(bp->dev, "NIC Link is Up, ");
738
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800739 line_speed = bnx2x_get_mf_speed(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000740
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000741 pr_cont("%d Mbps ", line_speed);
742
743 if (bp->link_vars.duplex == DUPLEX_FULL)
744 pr_cont("full duplex");
745 else
746 pr_cont("half duplex");
747
748 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
749 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
750 pr_cont(", receive ");
751 if (bp->link_vars.flow_ctrl &
752 BNX2X_FLOW_CTRL_TX)
753 pr_cont("& transmit ");
754 } else {
755 pr_cont(", transmit ");
756 }
757 pr_cont("flow control ON");
758 }
759 pr_cont("\n");
760
761 } else { /* link_down */
762 netif_carrier_off(bp->dev);
763 netdev_err(bp->dev, "NIC Link is Down\n");
764 }
765}
766
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000767/* Returns the number of actually allocated BDs */
768static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
769 int rx_ring_size)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000770{
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000771 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000772 u16 ring_prod, cqe_ring_prod;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000773 int i;
774
775 fp->rx_comp_cons = 0;
776 cqe_ring_prod = ring_prod = 0;
777 for (i = 0; i < rx_ring_size; i++) {
778 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
779 BNX2X_ERR("was only able to allocate "
780 "%d rx skbs on queue[%d]\n", i, fp->index);
781 fp->eth_q_stats.rx_skb_alloc_failed++;
782 break;
783 }
784 ring_prod = NEXT_RX_IDX(ring_prod);
785 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
786 WARN_ON(ring_prod <= i);
787 }
788
789 fp->rx_bd_prod = ring_prod;
790 /* Limit the CQE producer by the CQE ring size */
791 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
792 cqe_ring_prod);
793 fp->rx_pkt = fp->rx_calls = 0;
794
795 return i;
796}
797
798static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
799{
800 struct bnx2x *bp = fp->bp;
Dmitry Kravkov25141582010-09-12 05:48:28 +0000801 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
802 MAX_RX_AVAIL/bp->num_queues;
803
804 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000805
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000806 bnx2x_alloc_rx_bds(fp, rx_ring_size);
807
808 /* Warning!
809 * this will generate an interrupt (to the TSTORM)
810 * must only be done after chip is initialized
811 */
812 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
813 fp->rx_sge_prod);
814}
815
816void bnx2x_init_rx_rings(struct bnx2x *bp)
817{
818 int func = BP_FUNC(bp);
819 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
820 ETH_MAX_AGGREGATION_QUEUES_E1H;
821 u16 ring_prod;
822 int i, j;
823
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000824 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000825 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000826
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800827 DP(NETIF_MSG_IFUP,
828 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
829
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000830 if (!fp->disable_tpa) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000831 for (i = 0; i < max_agg_queues; i++) {
832 fp->tpa_pool[i].skb =
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800833 netdev_alloc_skb(bp->dev, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000834 if (!fp->tpa_pool[i].skb) {
835 BNX2X_ERR("Failed to allocate TPA "
836 "skb pool for queue[%d] - "
837 "disabling TPA on this "
838 "queue!\n", j);
839 bnx2x_free_tpa_pool(bp, fp, i);
840 fp->disable_tpa = 1;
841 break;
842 }
843 dma_unmap_addr_set((struct sw_rx_bd *)
844 &bp->fp->tpa_pool[i],
845 mapping, 0);
846 fp->tpa_state[i] = BNX2X_TPA_STOP;
847 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000848
849 /* "next page" elements initialization */
850 bnx2x_set_next_page_sgl(fp);
851
852 /* set SGEs bit mask */
853 bnx2x_init_sge_ring_bit_mask(fp);
854
855 /* Allocate SGEs and initialize the ring elements */
856 for (i = 0, ring_prod = 0;
857 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
858
859 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
860 BNX2X_ERR("was only able to allocate "
861 "%d rx sges\n", i);
862 BNX2X_ERR("disabling TPA for"
863 " queue[%d]\n", j);
864 /* Cleanup already allocated elements */
865 bnx2x_free_rx_sge_range(bp,
866 fp, ring_prod);
867 bnx2x_free_tpa_pool(bp,
868 fp, max_agg_queues);
869 fp->disable_tpa = 1;
870 ring_prod = 0;
871 break;
872 }
873 ring_prod = NEXT_SGE_IDX(ring_prod);
874 }
875
876 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000877 }
878 }
879
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000880 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000881 struct bnx2x_fastpath *fp = &bp->fp[j];
882
883 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000884
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000885 bnx2x_set_next_page_rx_bd(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000886
887 /* CQ ring */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000888 bnx2x_set_next_page_rx_cq(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000889
890 /* Allocate BDs and initialize BD ring */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000891 bnx2x_alloc_rx_bd_ring(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000892
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000893 if (j != 0)
894 continue;
895
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000896 if (!CHIP_IS_E2(bp)) {
897 REG_WR(bp, BAR_USTRORM_INTMEM +
898 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
899 U64_LO(fp->rx_comp_mapping));
900 REG_WR(bp, BAR_USTRORM_INTMEM +
901 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
902 U64_HI(fp->rx_comp_mapping));
903 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000904 }
905}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000906
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000907static void bnx2x_free_tx_skbs(struct bnx2x *bp)
908{
909 int i;
910
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000911 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000912 struct bnx2x_fastpath *fp = &bp->fp[i];
913
914 u16 bd_cons = fp->tx_bd_cons;
915 u16 sw_prod = fp->tx_pkt_prod;
916 u16 sw_cons = fp->tx_pkt_cons;
917
918 while (sw_cons != sw_prod) {
919 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
920 sw_cons++;
921 }
922 }
923}
924
925static void bnx2x_free_rx_skbs(struct bnx2x *bp)
926{
927 int i, j;
928
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000929 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000930 struct bnx2x_fastpath *fp = &bp->fp[j];
931
932 for (i = 0; i < NUM_RX_BD; i++) {
933 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
934 struct sk_buff *skb = rx_buf->skb;
935
936 if (skb == NULL)
937 continue;
938
939 dma_unmap_single(&bp->pdev->dev,
940 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800941 fp->rx_buf_size, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000942
943 rx_buf->skb = NULL;
944 dev_kfree_skb(skb);
945 }
946 if (!fp->disable_tpa)
947 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
948 ETH_MAX_AGGREGATION_QUEUES_E1 :
949 ETH_MAX_AGGREGATION_QUEUES_E1H);
950 }
951}
952
953void bnx2x_free_skbs(struct bnx2x *bp)
954{
955 bnx2x_free_tx_skbs(bp);
956 bnx2x_free_rx_skbs(bp);
957}
958
959static void bnx2x_free_msix_irqs(struct bnx2x *bp)
960{
961 int i, offset = 1;
962
963 free_irq(bp->msix_table[0].vector, bp->dev);
964 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
965 bp->msix_table[0].vector);
966
967#ifdef BCM_CNIC
968 offset++;
969#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000970 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000971 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
972 "state %x\n", i, bp->msix_table[i + offset].vector,
973 bnx2x_fp(bp, i, state));
974
975 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
976 }
977}
978
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000979void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000980{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000981 if (bp->flags & USING_MSIX_FLAG)
982 bnx2x_free_msix_irqs(bp);
983 else if (bp->flags & USING_MSI_FLAG)
984 free_irq(bp->pdev->irq, bp->dev);
985 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000986 free_irq(bp->pdev->irq, bp->dev);
987}
988
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000989int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000990{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000991 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000992
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000993 bp->msix_table[msix_vec].entry = msix_vec;
994 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
995 bp->msix_table[0].entry);
996 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000997
998#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000999 bp->msix_table[msix_vec].entry = msix_vec;
1000 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1001 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1002 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001003#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001004 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001005 bp->msix_table[msix_vec].entry = msix_vec;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001006 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001007 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1008 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001009 }
1010
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001011 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001012
1013 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001014
1015 /*
1016 * reconfigure number of tx/rx queues according to available
1017 * MSI-X vectors
1018 */
1019 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001020 /* how less vectors we will have? */
1021 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001022
1023 DP(NETIF_MSG_IFUP,
1024 "Trying to use less MSI-X vectors: %d\n", rc);
1025
1026 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1027
1028 if (rc) {
1029 DP(NETIF_MSG_IFUP,
1030 "MSI-X is not attainable rc %d\n", rc);
1031 return rc;
1032 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001033 /*
1034 * decrease number of queues by number of unallocated entries
1035 */
1036 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001037
1038 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1039 bp->num_queues);
1040 } else if (rc) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001041 /* fall to INTx if not enough memory */
1042 if (rc == -ENOMEM)
1043 bp->flags |= DISABLE_MSI_FLAG;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001044 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1045 return rc;
1046 }
1047
1048 bp->flags |= USING_MSIX_FLAG;
1049
1050 return 0;
1051}
1052
1053static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1054{
1055 int i, rc, offset = 1;
1056
1057 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1058 bp->dev->name, bp->dev);
1059 if (rc) {
1060 BNX2X_ERR("request sp irq failed\n");
1061 return -EBUSY;
1062 }
1063
1064#ifdef BCM_CNIC
1065 offset++;
1066#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001067 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001068 struct bnx2x_fastpath *fp = &bp->fp[i];
1069 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1070 bp->dev->name, i);
1071
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001072 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001073 bnx2x_msix_fp_int, 0, fp->name, fp);
1074 if (rc) {
1075 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1076 bnx2x_free_msix_irqs(bp);
1077 return -EBUSY;
1078 }
1079
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001080 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001081 fp->state = BNX2X_FP_STATE_IRQ;
1082 }
1083
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001084 i = BNX2X_NUM_ETH_QUEUES(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001085 offset = 1 + CNIC_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001086 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1087 " ... fp[%d] %d\n",
1088 bp->msix_table[0].vector,
1089 0, bp->msix_table[offset].vector,
1090 i - 1, bp->msix_table[offset + i - 1].vector);
1091
1092 return 0;
1093}
1094
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001095int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001096{
1097 int rc;
1098
1099 rc = pci_enable_msi(bp->pdev);
1100 if (rc) {
1101 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1102 return -1;
1103 }
1104 bp->flags |= USING_MSI_FLAG;
1105
1106 return 0;
1107}
1108
1109static int bnx2x_req_irq(struct bnx2x *bp)
1110{
1111 unsigned long flags;
1112 int rc;
1113
1114 if (bp->flags & USING_MSI_FLAG)
1115 flags = 0;
1116 else
1117 flags = IRQF_SHARED;
1118
1119 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1120 bp->dev->name, bp->dev);
1121 if (!rc)
1122 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1123
1124 return rc;
1125}
1126
1127static void bnx2x_napi_enable(struct bnx2x *bp)
1128{
1129 int i;
1130
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001131 for_each_napi_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001132 napi_enable(&bnx2x_fp(bp, i, napi));
1133}
1134
1135static void bnx2x_napi_disable(struct bnx2x *bp)
1136{
1137 int i;
1138
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001139 for_each_napi_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001140 napi_disable(&bnx2x_fp(bp, i, napi));
1141}
1142
1143void bnx2x_netif_start(struct bnx2x *bp)
1144{
1145 int intr_sem;
1146
1147 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1148 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1149
1150 if (intr_sem) {
1151 if (netif_running(bp->dev)) {
1152 bnx2x_napi_enable(bp);
1153 bnx2x_int_enable(bp);
1154 if (bp->state == BNX2X_STATE_OPEN)
1155 netif_tx_wake_all_queues(bp->dev);
1156 }
1157 }
1158}
1159
1160void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1161{
1162 bnx2x_int_disable_sync(bp, disable_hw);
1163 bnx2x_napi_disable(bp);
1164 netif_tx_disable(bp->dev);
1165}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001166
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001167u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1168{
1169#ifdef BCM_CNIC
1170 struct bnx2x *bp = netdev_priv(dev);
1171 if (NO_FCOE(bp))
1172 return skb_tx_hash(dev, skb);
1173 else {
1174 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1175 u16 ether_type = ntohs(hdr->h_proto);
1176
1177 /* Skip VLAN tag if present */
1178 if (ether_type == ETH_P_8021Q) {
1179 struct vlan_ethhdr *vhdr =
1180 (struct vlan_ethhdr *)skb->data;
1181
1182 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1183 }
1184
1185 /* If ethertype is FCoE or FIP - use FCoE ring */
1186 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1187 return bnx2x_fcoe(bp, index);
1188 }
1189#endif
1190 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1191 */
1192 return __skb_tx_hash(dev, skb,
1193 dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1194}
1195
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001196void bnx2x_set_num_queues(struct bnx2x *bp)
1197{
1198 switch (bp->multi_mode) {
1199 case ETH_RSS_MODE_DISABLED:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001200 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001201 break;
1202 case ETH_RSS_MODE_REGULAR:
1203 bp->num_queues = bnx2x_calc_num_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001204 break;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001205
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001206 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001207 bp->num_queues = 1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001208 break;
1209 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001210
1211 /* Add special queues */
1212 bp->num_queues += NONE_ETH_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001213}
1214
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001215#ifdef BCM_CNIC
1216static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1217{
1218 if (!NO_FCOE(bp)) {
1219 if (!IS_MF_SD(bp))
1220 bnx2x_set_fip_eth_mac_addr(bp, 1);
1221 bnx2x_set_all_enode_macs(bp, 1);
1222 bp->flags |= FCOE_MACS_SET;
1223 }
1224}
1225#endif
1226
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001227static void bnx2x_release_firmware(struct bnx2x *bp)
1228{
1229 kfree(bp->init_ops_offsets);
1230 kfree(bp->init_ops);
1231 kfree(bp->init_data);
1232 release_firmware(bp->firmware);
1233}
1234
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001235static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1236{
1237 int rc, num = bp->num_queues;
1238
1239#ifdef BCM_CNIC
1240 if (NO_FCOE(bp))
1241 num -= FCOE_CONTEXT_USE;
1242
1243#endif
1244 netif_set_real_num_tx_queues(bp->dev, num);
1245 rc = netif_set_real_num_rx_queues(bp->dev, num);
1246 return rc;
1247}
1248
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001249static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1250{
1251 int i;
1252
1253 for_each_queue(bp, i) {
1254 struct bnx2x_fastpath *fp = &bp->fp[i];
1255
1256 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1257 if (IS_FCOE_IDX(i))
1258 /*
1259 * Although there are no IP frames expected to arrive to
1260 * this ring we still want to add an
1261 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1262 * overrun attack.
1263 */
1264 fp->rx_buf_size =
1265 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1266 BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1267 else
1268 fp->rx_buf_size =
1269 bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
1270 IP_HEADER_ALIGNMENT_PADDING;
1271 }
1272}
1273
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001274/* must be called with rtnl_lock */
1275int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1276{
1277 u32 load_code;
1278 int i, rc;
1279
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001280 /* Set init arrays */
1281 rc = bnx2x_init_firmware(bp);
1282 if (rc) {
1283 BNX2X_ERR("Error loading firmware\n");
1284 return rc;
1285 }
1286
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001287#ifdef BNX2X_STOP_ON_ERROR
1288 if (unlikely(bp->panic))
1289 return -EPERM;
1290#endif
1291
1292 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1293
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001294 /* must be called before memory allocation and HW init */
1295 bnx2x_ilt_set_info(bp);
1296
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001297 /* Set the receive queues buffer size */
1298 bnx2x_set_rx_buf_size(bp);
1299
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001300 if (bnx2x_alloc_mem(bp))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001301 return -ENOMEM;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001302
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001303 rc = bnx2x_set_real_num_queues(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001304 if (rc) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001305 BNX2X_ERR("Unable to set real_num_queues\n");
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001306 goto load_error0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001307 }
1308
1309 for_each_queue(bp, i)
1310 bnx2x_fp(bp, i, disable_tpa) =
1311 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1312
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001313#ifdef BCM_CNIC
1314 /* We don't want TPA on FCoE L2 ring */
1315 bnx2x_fcoe(bp, disable_tpa) = 1;
1316#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001317 bnx2x_napi_enable(bp);
1318
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001319 /* Send LOAD_REQUEST command to MCP
1320 Returns the type of LOAD command:
1321 if it is the first port to be initialized
1322 common blocks should be initialized, otherwise - not
1323 */
1324 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001325 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001326 if (!load_code) {
1327 BNX2X_ERR("MCP response failure, aborting\n");
1328 rc = -EBUSY;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001329 goto load_error1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001330 }
1331 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1332 rc = -EBUSY; /* other port in diagnostic mode */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001333 goto load_error1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001334 }
1335
1336 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001337 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001338 int port = BP_PORT(bp);
1339
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001340 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1341 path, load_count[path][0], load_count[path][1],
1342 load_count[path][2]);
1343 load_count[path][0]++;
1344 load_count[path][1 + port]++;
1345 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1346 path, load_count[path][0], load_count[path][1],
1347 load_count[path][2]);
1348 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001349 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001350 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001351 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1352 else
1353 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1354 }
1355
1356 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001357 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001358 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1359 bp->port.pmf = 1;
1360 else
1361 bp->port.pmf = 0;
1362 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1363
1364 /* Initialize HW */
1365 rc = bnx2x_init_hw(bp, load_code);
1366 if (rc) {
1367 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001368 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001369 goto load_error2;
1370 }
1371
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001372 /* Connect to IRQs */
1373 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001374 if (rc) {
1375 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1376 goto load_error2;
1377 }
1378
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001379 /* Setup NIC internals and enable interrupts */
1380 bnx2x_nic_init(bp, load_code);
1381
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001382 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1383 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001384 (bp->common.shmem2_base))
1385 SHMEM2_WR(bp, dcc_support,
1386 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1387 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1388
1389 /* Send LOAD_DONE command to MCP */
1390 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001391 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001392 if (!load_code) {
1393 BNX2X_ERR("MCP response failure, aborting\n");
1394 rc = -EBUSY;
1395 goto load_error3;
1396 }
1397 }
1398
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00001399 bnx2x_dcbx_init(bp);
1400
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001401 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1402
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001403 rc = bnx2x_func_start(bp);
1404 if (rc) {
1405 BNX2X_ERR("Function start failed!\n");
1406#ifndef BNX2X_STOP_ON_ERROR
1407 goto load_error3;
1408#else
1409 bp->panic = 1;
1410 return -EBUSY;
1411#endif
1412 }
1413
1414 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001415 if (rc) {
1416 BNX2X_ERR("Setup leading failed!\n");
1417#ifndef BNX2X_STOP_ON_ERROR
1418 goto load_error3;
1419#else
1420 bp->panic = 1;
1421 return -EBUSY;
1422#endif
1423 }
1424
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001425 if (!CHIP_IS_E1(bp) &&
1426 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1427 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1428 bp->flags |= MF_FUNC_DIS;
1429 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001430
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001431#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001432 /* Enable Timer scan */
1433 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001434#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001435
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001436 for_each_nondefault_queue(bp, i) {
1437 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1438 if (rc)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001439#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001440 goto load_error4;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001441#else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001442 goto load_error3;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001443#endif
1444 }
1445
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001446 /* Now when Clients are configured we are ready to work */
1447 bp->state = BNX2X_STATE_OPEN;
1448
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001449#ifdef BCM_CNIC
1450 bnx2x_set_fcoe_eth_macs(bp);
1451#endif
1452
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001453 bnx2x_set_eth_mac(bp, 1);
1454
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08001455 /* Clear MC configuration */
1456 if (CHIP_IS_E1(bp))
1457 bnx2x_invalidate_e1_mc_list(bp);
1458 else
1459 bnx2x_invalidate_e1h_mc_list(bp);
1460
1461 /* Clear UC lists configuration */
1462 bnx2x_invalidate_uc_list(bp);
1463
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001464 if (bp->port.pmf)
1465 bnx2x_initial_phy_init(bp, load_mode);
1466
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08001467 /* Initialize Rx filtering */
1468 bnx2x_set_rx_mode(bp->dev);
1469
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001470 /* Start fast path */
1471 switch (load_mode) {
1472 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001473 /* Tx queue should be only reenabled */
1474 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001475 /* Initialize the receive filter. */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001476 break;
1477
1478 case LOAD_OPEN:
1479 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001480 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001481 break;
1482
1483 case LOAD_DIAG:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001484 bp->state = BNX2X_STATE_DIAG;
1485 break;
1486
1487 default:
1488 break;
1489 }
1490
1491 if (!bp->port.pmf)
1492 bnx2x__link_status_update(bp);
1493
1494 /* start the timer */
1495 mod_timer(&bp->timer, jiffies + bp->current_interval);
1496
1497#ifdef BCM_CNIC
1498 bnx2x_setup_cnic_irq_info(bp);
1499 if (bp->state == BNX2X_STATE_OPEN)
1500 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1501#endif
1502 bnx2x_inc_load_cnt(bp);
1503
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001504 bnx2x_release_firmware(bp);
1505
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001506 return 0;
1507
1508#ifdef BCM_CNIC
1509load_error4:
1510 /* Disable Timer scan */
1511 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1512#endif
1513load_error3:
1514 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001515
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001516 /* Free SKBs, SGEs, TPA pool and driver internals */
1517 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001518 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001519 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001520
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001521 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001522 bnx2x_free_irq(bp);
1523load_error2:
1524 if (!BP_NOMCP(bp)) {
1525 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1526 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1527 }
1528
1529 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001530load_error1:
1531 bnx2x_napi_disable(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001532load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001533 bnx2x_free_mem(bp);
1534
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001535 bnx2x_release_firmware(bp);
1536
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001537 return rc;
1538}
1539
1540/* must be called with rtnl_lock */
1541int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1542{
1543 int i;
1544
1545 if (bp->state == BNX2X_STATE_CLOSED) {
1546 /* Interface has been removed - nothing to recover */
1547 bp->recovery_state = BNX2X_RECOVERY_DONE;
1548 bp->is_leader = 0;
1549 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1550 smp_wmb();
1551
1552 return -EINVAL;
1553 }
1554
1555#ifdef BCM_CNIC
1556 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1557#endif
1558 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1559
1560 /* Set "drop all" */
1561 bp->rx_mode = BNX2X_RX_MODE_NONE;
1562 bnx2x_set_storm_rx_mode(bp);
1563
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001564 /* Stop Tx */
1565 bnx2x_tx_disable(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001566
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001567 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001568
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001569 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001570 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001571
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001572 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001573
1574 /* Cleanup the chip if needed */
1575 if (unload_mode != UNLOAD_RECOVERY)
1576 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001577 else {
1578 /* Disable HW interrupts, NAPI and Tx */
1579 bnx2x_netif_stop(bp, 1);
1580
1581 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001582 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001583 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001584
1585 bp->port.pmf = 0;
1586
1587 /* Free SKBs, SGEs, TPA pool and driver internals */
1588 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001589 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001590 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001591
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001592 bnx2x_free_mem(bp);
1593
1594 bp->state = BNX2X_STATE_CLOSED;
1595
1596 /* The last driver must disable a "close the gate" if there is no
1597 * parity attention or "process kill" pending.
1598 */
1599 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1600 bnx2x_reset_is_done(bp))
1601 bnx2x_disable_close_the_gate(bp);
1602
1603 /* Reset MCP mail box sequence if there is on going recovery */
1604 if (unload_mode == UNLOAD_RECOVERY)
1605 bp->fw_seq = 0;
1606
1607 return 0;
1608}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001609
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001610int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1611{
1612 u16 pmcsr;
1613
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00001614 /* If there is no power capability, silently succeed */
1615 if (!bp->pm_cap) {
1616 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1617 return 0;
1618 }
1619
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001620 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1621
1622 switch (state) {
1623 case PCI_D0:
1624 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1625 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1626 PCI_PM_CTRL_PME_STATUS));
1627
1628 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1629 /* delay required during transition out of D3hot */
1630 msleep(20);
1631 break;
1632
1633 case PCI_D3hot:
1634 /* If there are other clients above don't
1635 shut down the power */
1636 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1637 return 0;
1638 /* Don't shut down the power for emulation and FPGA */
1639 if (CHIP_REV_IS_SLOW(bp))
1640 return 0;
1641
1642 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1643 pmcsr |= 3;
1644
1645 if (bp->wol)
1646 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1647
1648 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1649 pmcsr);
1650
1651 /* No more memory access after this point until
1652 * device is brought back to D0.
1653 */
1654 break;
1655
1656 default:
1657 return -EINVAL;
1658 }
1659 return 0;
1660}
1661
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001662/*
1663 * net_device service functions
1664 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001665int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001666{
1667 int work_done = 0;
1668 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1669 napi);
1670 struct bnx2x *bp = fp->bp;
1671
1672 while (1) {
1673#ifdef BNX2X_STOP_ON_ERROR
1674 if (unlikely(bp->panic)) {
1675 napi_complete(napi);
1676 return 0;
1677 }
1678#endif
1679
1680 if (bnx2x_has_tx_work(fp))
1681 bnx2x_tx_int(fp);
1682
1683 if (bnx2x_has_rx_work(fp)) {
1684 work_done += bnx2x_rx_int(fp, budget - work_done);
1685
1686 /* must not complete if we consumed full budget */
1687 if (work_done >= budget)
1688 break;
1689 }
1690
1691 /* Fall out from the NAPI loop if needed */
1692 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001693#ifdef BCM_CNIC
1694 /* No need to update SB for FCoE L2 ring as long as
1695 * it's connected to the default SB and the SB
1696 * has been updated when NAPI was scheduled.
1697 */
1698 if (IS_FCOE_FP(fp)) {
1699 napi_complete(napi);
1700 break;
1701 }
1702#endif
1703
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001704 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001705 /* bnx2x_has_rx_work() reads the status block,
1706 * thus we need to ensure that status block indices
1707 * have been actually read (bnx2x_update_fpsb_idx)
1708 * prior to this check (bnx2x_has_rx_work) so that
1709 * we won't write the "newer" value of the status block
1710 * to IGU (if there was a DMA right after
1711 * bnx2x_has_rx_work and if there is no rmb, the memory
1712 * reading (bnx2x_update_fpsb_idx) may be postponed
1713 * to right before bnx2x_ack_sb). In this case there
1714 * will never be another interrupt until there is
1715 * another update of the status block, while there
1716 * is still unhandled work.
1717 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001718 rmb();
1719
1720 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1721 napi_complete(napi);
1722 /* Re-enable interrupts */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001723 DP(NETIF_MSG_HW,
1724 "Update index to %d\n", fp->fp_hc_idx);
1725 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1726 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001727 IGU_INT_ENABLE, 1);
1728 break;
1729 }
1730 }
1731 }
1732
1733 return work_done;
1734}
1735
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001736/* we split the first BD into headers and data BDs
1737 * to ease the pain of our fellow microcode engineers
1738 * we use one mapping for both BDs
1739 * So far this has only been observed to happen
1740 * in Other Operating Systems(TM)
1741 */
1742static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1743 struct bnx2x_fastpath *fp,
1744 struct sw_tx_bd *tx_buf,
1745 struct eth_tx_start_bd **tx_bd, u16 hlen,
1746 u16 bd_prod, int nbd)
1747{
1748 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1749 struct eth_tx_bd *d_tx_bd;
1750 dma_addr_t mapping;
1751 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1752
1753 /* first fix first BD */
1754 h_tx_bd->nbd = cpu_to_le16(nbd);
1755 h_tx_bd->nbytes = cpu_to_le16(hlen);
1756
1757 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1758 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1759 h_tx_bd->addr_lo, h_tx_bd->nbd);
1760
1761 /* now get a new data BD
1762 * (after the pbd) and fill it */
1763 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1764 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1765
1766 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1767 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1768
1769 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1770 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1771 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1772
1773 /* this marks the BD as one that has no individual mapping */
1774 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1775
1776 DP(NETIF_MSG_TX_QUEUED,
1777 "TSO split data size is %d (%x:%x)\n",
1778 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1779
1780 /* update tx_bd */
1781 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1782
1783 return bd_prod;
1784}
1785
1786static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1787{
1788 if (fix > 0)
1789 csum = (u16) ~csum_fold(csum_sub(csum,
1790 csum_partial(t_header - fix, fix, 0)));
1791
1792 else if (fix < 0)
1793 csum = (u16) ~csum_fold(csum_add(csum,
1794 csum_partial(t_header, -fix, 0)));
1795
1796 return swab16(csum);
1797}
1798
1799static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1800{
1801 u32 rc;
1802
1803 if (skb->ip_summed != CHECKSUM_PARTIAL)
1804 rc = XMIT_PLAIN;
1805
1806 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00001807 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001808 rc = XMIT_CSUM_V6;
1809 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1810 rc |= XMIT_CSUM_TCP;
1811
1812 } else {
1813 rc = XMIT_CSUM_V4;
1814 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1815 rc |= XMIT_CSUM_TCP;
1816 }
1817 }
1818
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00001819 if (skb_is_gso_v6(skb))
1820 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1821 else if (skb_is_gso(skb))
1822 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001823
1824 return rc;
1825}
1826
1827#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1828/* check if packet requires linearization (packet is too fragmented)
1829 no need to check fragmentation if page size > 8K (there will be no
1830 violation to FW restrictions) */
1831static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1832 u32 xmit_type)
1833{
1834 int to_copy = 0;
1835 int hlen = 0;
1836 int first_bd_sz = 0;
1837
1838 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1839 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1840
1841 if (xmit_type & XMIT_GSO) {
1842 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1843 /* Check if LSO packet needs to be copied:
1844 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1845 int wnd_size = MAX_FETCH_BD - 3;
1846 /* Number of windows to check */
1847 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1848 int wnd_idx = 0;
1849 int frag_idx = 0;
1850 u32 wnd_sum = 0;
1851
1852 /* Headers length */
1853 hlen = (int)(skb_transport_header(skb) - skb->data) +
1854 tcp_hdrlen(skb);
1855
1856 /* Amount of data (w/o headers) on linear part of SKB*/
1857 first_bd_sz = skb_headlen(skb) - hlen;
1858
1859 wnd_sum = first_bd_sz;
1860
1861 /* Calculate the first sum - it's special */
1862 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1863 wnd_sum +=
1864 skb_shinfo(skb)->frags[frag_idx].size;
1865
1866 /* If there was data on linear skb data - check it */
1867 if (first_bd_sz > 0) {
1868 if (unlikely(wnd_sum < lso_mss)) {
1869 to_copy = 1;
1870 goto exit_lbl;
1871 }
1872
1873 wnd_sum -= first_bd_sz;
1874 }
1875
1876 /* Others are easier: run through the frag list and
1877 check all windows */
1878 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1879 wnd_sum +=
1880 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1881
1882 if (unlikely(wnd_sum < lso_mss)) {
1883 to_copy = 1;
1884 break;
1885 }
1886 wnd_sum -=
1887 skb_shinfo(skb)->frags[wnd_idx].size;
1888 }
1889 } else {
1890 /* in non-LSO too fragmented packet should always
1891 be linearized */
1892 to_copy = 1;
1893 }
1894 }
1895
1896exit_lbl:
1897 if (unlikely(to_copy))
1898 DP(NETIF_MSG_TX_QUEUED,
1899 "Linearization IS REQUIRED for %s packet. "
1900 "num_frags %d hlen %d first_bd_sz %d\n",
1901 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1902 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1903
1904 return to_copy;
1905}
1906#endif
1907
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001908static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
1909 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001910{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001911 *parsing_data |= (skb_shinfo(skb)->gso_size <<
1912 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
1913 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001914 if ((xmit_type & XMIT_GSO_V6) &&
1915 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001916 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001917}
1918
1919/**
1920 * Update PBD in GSO case.
1921 *
1922 * @param skb
1923 * @param tx_start_bd
1924 * @param pbd
1925 * @param xmit_type
1926 */
1927static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1928 struct eth_tx_parse_bd_e1x *pbd,
1929 u32 xmit_type)
1930{
1931 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1932 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1933 pbd->tcp_flags = pbd_tcp_flags(skb);
1934
1935 if (xmit_type & XMIT_GSO_V4) {
1936 pbd->ip_id = swab16(ip_hdr(skb)->id);
1937 pbd->tcp_pseudo_csum =
1938 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1939 ip_hdr(skb)->daddr,
1940 0, IPPROTO_TCP, 0));
1941
1942 } else
1943 pbd->tcp_pseudo_csum =
1944 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1945 &ipv6_hdr(skb)->daddr,
1946 0, IPPROTO_TCP, 0));
1947
1948 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1949}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001950
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001951/**
1952 *
1953 * @param skb
1954 * @param tx_start_bd
1955 * @param pbd_e2
1956 * @param xmit_type
1957 *
1958 * @return header len
1959 */
1960static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001961 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001962{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001963 *parsing_data |= ((tcp_hdrlen(skb)/4) <<
1964 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
1965 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001966
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001967 *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
1968 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
1969 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001970
1971 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1972}
1973
1974/**
1975 *
1976 * @param skb
1977 * @param tx_start_bd
1978 * @param pbd
1979 * @param xmit_type
1980 *
1981 * @return Header length
1982 */
1983static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1984 struct eth_tx_parse_bd_e1x *pbd,
1985 u32 xmit_type)
1986{
1987 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1988
1989 /* for now NS flag is not used in Linux */
1990 pbd->global_data =
1991 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1992 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1993
1994 pbd->ip_hlen_w = (skb_transport_header(skb) -
1995 skb_network_header(skb)) / 2;
1996
1997 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1998
1999 pbd->total_hlen_w = cpu_to_le16(hlen);
2000 hlen = hlen*2;
2001
2002 if (xmit_type & XMIT_CSUM_TCP) {
2003 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2004
2005 } else {
2006 s8 fix = SKB_CS_OFF(skb); /* signed! */
2007
2008 DP(NETIF_MSG_TX_QUEUED,
2009 "hlen %d fix %d csum before fix %x\n",
2010 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2011
2012 /* HW bug: fixup the CSUM */
2013 pbd->tcp_pseudo_csum =
2014 bnx2x_csum_fix(skb_transport_header(skb),
2015 SKB_CS(skb), fix);
2016
2017 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2018 pbd->tcp_pseudo_csum);
2019 }
2020
2021 return hlen;
2022}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002023
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002024/* called with netif_tx_lock
2025 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2026 * netif_wake_queue()
2027 */
2028netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2029{
2030 struct bnx2x *bp = netdev_priv(dev);
2031 struct bnx2x_fastpath *fp;
2032 struct netdev_queue *txq;
2033 struct sw_tx_bd *tx_buf;
2034 struct eth_tx_start_bd *tx_start_bd;
2035 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002036 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002037 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002038 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002039 u16 pkt_prod, bd_prod;
2040 int nbd, fp_index;
2041 dma_addr_t mapping;
2042 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2043 int i;
2044 u8 hlen = 0;
2045 __le16 pkt_size = 0;
2046 struct ethhdr *eth;
2047 u8 mac_type = UNICAST_ADDRESS;
2048
2049#ifdef BNX2X_STOP_ON_ERROR
2050 if (unlikely(bp->panic))
2051 return NETDEV_TX_BUSY;
2052#endif
2053
2054 fp_index = skb_get_queue_mapping(skb);
2055 txq = netdev_get_tx_queue(dev, fp_index);
2056
2057 fp = &bp->fp[fp_index];
2058
2059 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2060 fp->eth_q_stats.driver_xoff++;
2061 netif_tx_stop_queue(txq);
2062 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2063 return NETDEV_TX_BUSY;
2064 }
2065
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002066 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2067 "protocol(%x,%x) gso type %x xmit_type %x\n",
2068 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002069 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2070
2071 eth = (struct ethhdr *)skb->data;
2072
2073 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2074 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2075 if (is_broadcast_ether_addr(eth->h_dest))
2076 mac_type = BROADCAST_ADDRESS;
2077 else
2078 mac_type = MULTICAST_ADDRESS;
2079 }
2080
2081#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2082 /* First, check if we need to linearize the skb (due to FW
2083 restrictions). No need to check fragmentation if page size > 8K
2084 (there will be no violation to FW restrictions) */
2085 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2086 /* Statistics of linearization */
2087 bp->lin_cnt++;
2088 if (skb_linearize(skb) != 0) {
2089 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2090 "silently dropping this SKB\n");
2091 dev_kfree_skb_any(skb);
2092 return NETDEV_TX_OK;
2093 }
2094 }
2095#endif
2096
2097 /*
2098 Please read carefully. First we use one BD which we mark as start,
2099 then we have a parsing info BD (used for TSO or xsum),
2100 and only then we have the rest of the TSO BDs.
2101 (don't forget to mark the last one as last,
2102 and to unmap only AFTER you write to the BD ...)
2103 And above all, all pdb sizes are in words - NOT DWORDS!
2104 */
2105
2106 pkt_prod = fp->tx_pkt_prod++;
2107 bd_prod = TX_BD(fp->tx_bd_prod);
2108
2109 /* get a tx_buf and first BD */
2110 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2111 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2112
2113 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002114 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2115 mac_type);
2116
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002117 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002118 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002119
2120 /* remember the first BD of the packet */
2121 tx_buf->first_bd = fp->tx_bd_prod;
2122 tx_buf->skb = skb;
2123 tx_buf->flags = 0;
2124
2125 DP(NETIF_MSG_TX_QUEUED,
2126 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2127 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2128
Jesse Grosseab6d182010-10-20 13:56:03 +00002129 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002130 tx_start_bd->vlan_or_ethertype =
2131 cpu_to_le16(vlan_tx_tag_get(skb));
2132 tx_start_bd->bd_flags.as_bitfield |=
2133 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002134 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002135 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002136
2137 /* turn on parsing and get a BD */
2138 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002139
2140 if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002141 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2142
2143 if (xmit_type & XMIT_CSUM_V4)
2144 tx_start_bd->bd_flags.as_bitfield |=
2145 ETH_TX_BD_FLAGS_IP_CSUM;
2146 else
2147 tx_start_bd->bd_flags.as_bitfield |=
2148 ETH_TX_BD_FLAGS_IPV6;
2149
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002150 if (!(xmit_type & XMIT_CSUM_TCP))
2151 tx_start_bd->bd_flags.as_bitfield |=
2152 ETH_TX_BD_FLAGS_IS_UDP;
2153 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002154
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002155 if (CHIP_IS_E2(bp)) {
2156 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2157 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2158 /* Set PBD in checksum offload case */
2159 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002160 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2161 &pbd_e2_parsing_data,
2162 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002163 } else {
2164 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2165 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2166 /* Set PBD in checksum offload case */
2167 if (xmit_type & XMIT_CSUM)
2168 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002169
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002170 }
2171
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002172 /* Map skb linear data for DMA */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002173 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2174 skb_headlen(skb), DMA_TO_DEVICE);
2175
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002176 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002177 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2178 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2179 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2180 tx_start_bd->nbd = cpu_to_le16(nbd);
2181 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2182 pkt_size = tx_start_bd->nbytes;
2183
2184 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2185 " nbytes %d flags %x vlan %x\n",
2186 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2187 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002188 tx_start_bd->bd_flags.as_bitfield,
2189 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002190
2191 if (xmit_type & XMIT_GSO) {
2192
2193 DP(NETIF_MSG_TX_QUEUED,
2194 "TSO packet len %d hlen %d total len %d tso size %d\n",
2195 skb->len, hlen, skb_headlen(skb),
2196 skb_shinfo(skb)->gso_size);
2197
2198 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2199
2200 if (unlikely(skb_headlen(skb) > hlen))
2201 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2202 hlen, bd_prod, ++nbd);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002203 if (CHIP_IS_E2(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002204 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2205 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002206 else
2207 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002208 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002209
2210 /* Set the PBD's parsing_data field if not zero
2211 * (for the chips newer than 57711).
2212 */
2213 if (pbd_e2_parsing_data)
2214 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2215
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002216 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2217
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002218 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002219 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2220 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2221
2222 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2223 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2224 if (total_pkt_bd == NULL)
2225 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2226
2227 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2228 frag->page_offset,
2229 frag->size, DMA_TO_DEVICE);
2230
2231 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2232 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2233 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2234 le16_add_cpu(&pkt_size, frag->size);
2235
2236 DP(NETIF_MSG_TX_QUEUED,
2237 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2238 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2239 le16_to_cpu(tx_data_bd->nbytes));
2240 }
2241
2242 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2243
2244 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2245
2246 /* now send a tx doorbell, counting the next BD
2247 * if the packet contains or ends with it
2248 */
2249 if (TX_BD_POFF(bd_prod) < nbd)
2250 nbd++;
2251
2252 if (total_pkt_bd != NULL)
2253 total_pkt_bd->total_pkt_bytes = pkt_size;
2254
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002255 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002256 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002257 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002258 " tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002259 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2260 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2261 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2262 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002263 if (pbd_e2)
2264 DP(NETIF_MSG_TX_QUEUED,
2265 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2266 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2267 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2268 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2269 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002270 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2271
2272 /*
2273 * Make sure that the BD data is updated before updating the producer
2274 * since FW might read the BD right after the producer is updated.
2275 * This is only applicable for weak-ordered memory model archs such
2276 * as IA-64. The following barrier is also mandatory since FW will
2277 * assumes packets must have BDs.
2278 */
2279 wmb();
2280
2281 fp->tx_db.data.prod += nbd;
2282 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002283
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002284 DOORBELL(bp, fp->cid, fp->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002285
2286 mmiowb();
2287
2288 fp->tx_bd_prod += nbd;
2289
2290 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2291 netif_tx_stop_queue(txq);
2292
2293 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2294 * ordering of set_bit() in netif_tx_stop_queue() and read of
2295 * fp->bd_tx_cons */
2296 smp_mb();
2297
2298 fp->eth_q_stats.driver_xoff++;
2299 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2300 netif_tx_wake_queue(txq);
2301 }
2302 fp->tx_pkt++;
2303
2304 return NETDEV_TX_OK;
2305}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002306
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002307/* called with rtnl_lock */
2308int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2309{
2310 struct sockaddr *addr = p;
2311 struct bnx2x *bp = netdev_priv(dev);
2312
2313 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2314 return -EINVAL;
2315
2316 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002317 if (netif_running(dev))
2318 bnx2x_set_eth_mac(bp, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002319
2320 return 0;
2321}
2322
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002323
stephen hemminger8d962862010-10-21 07:50:56 +00002324static int bnx2x_setup_irqs(struct bnx2x *bp)
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002325{
2326 int rc = 0;
2327 if (bp->flags & USING_MSIX_FLAG) {
2328 rc = bnx2x_req_msix_irqs(bp);
2329 if (rc)
2330 return rc;
2331 } else {
2332 bnx2x_ack_int(bp);
2333 rc = bnx2x_req_irq(bp);
2334 if (rc) {
2335 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2336 return rc;
2337 }
2338 if (bp->flags & USING_MSI_FLAG) {
2339 bp->dev->irq = bp->pdev->irq;
2340 netdev_info(bp->dev, "using MSI IRQ %d\n",
2341 bp->pdev->irq);
2342 }
2343 }
2344
2345 return 0;
2346}
2347
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002348void bnx2x_free_mem_bp(struct bnx2x *bp)
2349{
2350 kfree(bp->fp);
2351 kfree(bp->msix_table);
2352 kfree(bp->ilt);
2353}
2354
2355int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2356{
2357 struct bnx2x_fastpath *fp;
2358 struct msix_entry *tbl;
2359 struct bnx2x_ilt *ilt;
2360
2361 /* fp array */
2362 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2363 if (!fp)
2364 goto alloc_err;
2365 bp->fp = fp;
2366
2367 /* msix table */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002368 tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002369 GFP_KERNEL);
2370 if (!tbl)
2371 goto alloc_err;
2372 bp->msix_table = tbl;
2373
2374 /* ilt */
2375 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2376 if (!ilt)
2377 goto alloc_err;
2378 bp->ilt = ilt;
2379
2380 return 0;
2381alloc_err:
2382 bnx2x_free_mem_bp(bp);
2383 return -ENOMEM;
2384
2385}
2386
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002387/* called with rtnl_lock */
2388int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2389{
2390 struct bnx2x *bp = netdev_priv(dev);
2391 int rc = 0;
2392
2393 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2394 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2395 return -EAGAIN;
2396 }
2397
2398 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2399 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2400 return -EINVAL;
2401
2402 /* This does not race with packet allocation
2403 * because the actual alloc size is
2404 * only updated as part of load
2405 */
2406 dev->mtu = new_mtu;
2407
2408 if (netif_running(dev)) {
2409 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2410 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2411 }
2412
2413 return rc;
2414}
2415
2416void bnx2x_tx_timeout(struct net_device *dev)
2417{
2418 struct bnx2x *bp = netdev_priv(dev);
2419
2420#ifdef BNX2X_STOP_ON_ERROR
2421 if (!bp->panic)
2422 bnx2x_panic();
2423#endif
2424 /* This allows the netif to be shutdown gracefully before resetting */
2425 schedule_delayed_work(&bp->reset_task, 0);
2426}
2427
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002428int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2429{
2430 struct net_device *dev = pci_get_drvdata(pdev);
2431 struct bnx2x *bp;
2432
2433 if (!dev) {
2434 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2435 return -ENODEV;
2436 }
2437 bp = netdev_priv(dev);
2438
2439 rtnl_lock();
2440
2441 pci_save_state(pdev);
2442
2443 if (!netif_running(dev)) {
2444 rtnl_unlock();
2445 return 0;
2446 }
2447
2448 netif_device_detach(dev);
2449
2450 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2451
2452 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2453
2454 rtnl_unlock();
2455
2456 return 0;
2457}
2458
2459int bnx2x_resume(struct pci_dev *pdev)
2460{
2461 struct net_device *dev = pci_get_drvdata(pdev);
2462 struct bnx2x *bp;
2463 int rc;
2464
2465 if (!dev) {
2466 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2467 return -ENODEV;
2468 }
2469 bp = netdev_priv(dev);
2470
2471 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2472 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2473 return -EAGAIN;
2474 }
2475
2476 rtnl_lock();
2477
2478 pci_restore_state(pdev);
2479
2480 if (!netif_running(dev)) {
2481 rtnl_unlock();
2482 return 0;
2483 }
2484
2485 bnx2x_set_power_state(bp, PCI_D0);
2486 netif_device_attach(dev);
2487
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002488 /* Since the chip was reset, clear the FW sequence number */
2489 bp->fw_seq = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002490 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2491
2492 rtnl_unlock();
2493
2494 return rc;
2495}