blob: 10eef54343864a943cb8d7d437bc9dd48d88df11 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000018#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000019#include <linux/if_vlan.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000021#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070022#include <net/ip6_checksum.h>
Dmitry Kravkov6891dd22010-08-03 21:49:40 +000023#include <linux/firmware.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000024#include "bnx2x_cmn.h"
25
Dmitry Kravkov523224a2010-10-06 03:23:26 +000026#include "bnx2x_init.h"
27
stephen hemminger8d962862010-10-21 07:50:56 +000028static int bnx2x_setup_irqs(struct bnx2x *bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000029
30/* free skb in the packet ring at pos idx
31 * return idx of last bd freed
32 */
33static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
34 u16 idx)
35{
36 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
37 struct eth_tx_start_bd *tx_start_bd;
38 struct eth_tx_bd *tx_data_bd;
39 struct sk_buff *skb = tx_buf->skb;
40 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
41 int nbd;
42
43 /* prefetch skb end pointer to speedup dev_kfree_skb() */
44 prefetch(&skb->end);
45
46 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
47 idx, tx_buf, skb);
48
49 /* unmap first bd */
50 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
51 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
52 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +000053 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000054
55 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
56#ifdef BNX2X_STOP_ON_ERROR
57 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
58 BNX2X_ERR("BAD nbd!\n");
59 bnx2x_panic();
60 }
61#endif
62 new_cons = nbd + tx_buf->first_bd;
63
64 /* Get the next bd */
65 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
66
67 /* Skip a parse bd... */
68 --nbd;
69 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
70
71 /* ...and the TSO split header bd since they have no mapping */
72 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
73 --nbd;
74 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
75 }
76
77 /* now free frags */
78 while (nbd > 0) {
79
80 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
81 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
82 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
83 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
84 if (--nbd)
85 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
86 }
87
88 /* release skb */
89 WARN_ON(!skb);
90 dev_kfree_skb(skb);
91 tx_buf->first_bd = 0;
92 tx_buf->skb = NULL;
93
94 return new_cons;
95}
96
97int bnx2x_tx_int(struct bnx2x_fastpath *fp)
98{
99 struct bnx2x *bp = fp->bp;
100 struct netdev_queue *txq;
101 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
102
103#ifdef BNX2X_STOP_ON_ERROR
104 if (unlikely(bp->panic))
105 return -1;
106#endif
107
108 txq = netdev_get_tx_queue(bp->dev, fp->index);
109 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
110 sw_cons = fp->tx_pkt_cons;
111
112 while (sw_cons != hw_cons) {
113 u16 pkt_cons;
114
115 pkt_cons = TX_BD(sw_cons);
116
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000117 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
118 " pkt_cons %u\n",
119 fp->index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000120
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000121 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
122 sw_cons++;
123 }
124
125 fp->tx_pkt_cons = sw_cons;
126 fp->tx_bd_cons = bd_cons;
127
128 /* Need to make the tx_bd_cons update visible to start_xmit()
129 * before checking for netif_tx_queue_stopped(). Without the
130 * memory barrier, there is a small possibility that
131 * start_xmit() will miss it and cause the queue to be stopped
132 * forever.
133 */
134 smp_mb();
135
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000136 if (unlikely(netif_tx_queue_stopped(txq))) {
137 /* Taking tx_lock() is needed to prevent reenabling the queue
138 * while it's empty. This could have happen if rx_action() gets
139 * suspended in bnx2x_tx_int() after the condition before
140 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
141 *
142 * stops the queue->sees fresh tx_bd_cons->releases the queue->
143 * sends some packets consuming the whole queue again->
144 * stops the queue
145 */
146
147 __netif_tx_lock(txq, smp_processor_id());
148
149 if ((netif_tx_queue_stopped(txq)) &&
150 (bp->state == BNX2X_STATE_OPEN) &&
151 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
152 netif_tx_wake_queue(txq);
153
154 __netif_tx_unlock(txq);
155 }
156 return 0;
157}
158
159static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
160 u16 idx)
161{
162 u16 last_max = fp->last_max_sge;
163
164 if (SUB_S16(idx, last_max) > 0)
165 fp->last_max_sge = idx;
166}
167
168static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
169 struct eth_fast_path_rx_cqe *fp_cqe)
170{
171 struct bnx2x *bp = fp->bp;
172 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
173 le16_to_cpu(fp_cqe->len_on_bd)) >>
174 SGE_PAGE_SHIFT;
175 u16 last_max, last_elem, first_elem;
176 u16 delta = 0;
177 u16 i;
178
179 if (!sge_len)
180 return;
181
182 /* First mark all used pages */
183 for (i = 0; i < sge_len; i++)
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000184 SGE_MASK_CLEAR_BIT(fp,
185 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000186
187 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000188 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000189
190 /* Here we assume that the last SGE index is the biggest */
191 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000192 bnx2x_update_last_max_sge(fp,
193 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000194
195 last_max = RX_SGE(fp->last_max_sge);
196 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
197 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
198
199 /* If ring is not full */
200 if (last_elem + 1 != first_elem)
201 last_elem++;
202
203 /* Now update the prod */
204 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
205 if (likely(fp->sge_mask[i]))
206 break;
207
208 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
209 delta += RX_SGE_MASK_ELEM_SZ;
210 }
211
212 if (delta > 0) {
213 fp->rx_sge_prod += delta;
214 /* clear page-end entries */
215 bnx2x_clear_sge_mask_next_elems(fp);
216 }
217
218 DP(NETIF_MSG_RX_STATUS,
219 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
220 fp->last_max_sge, fp->rx_sge_prod);
221}
222
223static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
224 struct sk_buff *skb, u16 cons, u16 prod)
225{
226 struct bnx2x *bp = fp->bp;
227 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
228 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
229 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
230 dma_addr_t mapping;
231
232 /* move empty skb from pool to prod and map it */
233 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
234 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
235 bp->rx_buf_size, DMA_FROM_DEVICE);
236 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
237
238 /* move partial skb from cons to pool (don't unmap yet) */
239 fp->tpa_pool[queue] = *cons_rx_buf;
240
241 /* mark bin state as start - print error if current state != stop */
242 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
243 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
244
245 fp->tpa_state[queue] = BNX2X_TPA_START;
246
247 /* point prod_bd to new skb */
248 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
249 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
250
251#ifdef BNX2X_STOP_ON_ERROR
252 fp->tpa_queue_used |= (1 << queue);
253#ifdef _ASM_GENERIC_INT_L64_H
254 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
255#else
256 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
257#endif
258 fp->tpa_queue_used);
259#endif
260}
261
262static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
263 struct sk_buff *skb,
264 struct eth_fast_path_rx_cqe *fp_cqe,
265 u16 cqe_idx)
266{
267 struct sw_rx_page *rx_pg, old_rx_pg;
268 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
269 u32 i, frag_len, frag_size, pages;
270 int err;
271 int j;
272
273 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
274 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
275
276 /* This is needed in order to enable forwarding support */
277 if (frag_size)
278 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
279 max(frag_size, (u32)len_on_bd));
280
281#ifdef BNX2X_STOP_ON_ERROR
282 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
283 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
284 pages, cqe_idx);
285 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
286 fp_cqe->pkt_len, len_on_bd);
287 bnx2x_panic();
288 return -EINVAL;
289 }
290#endif
291
292 /* Run through the SGL and compose the fragmented skb */
293 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000294 u16 sge_idx =
295 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000296
297 /* FW gives the indices of the SGE as if the ring is an array
298 (meaning that "next" element will consume 2 indices) */
299 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
300 rx_pg = &fp->rx_page_ring[sge_idx];
301 old_rx_pg = *rx_pg;
302
303 /* If we fail to allocate a substitute page, we simply stop
304 where we are and drop the whole packet */
305 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
306 if (unlikely(err)) {
307 fp->eth_q_stats.rx_skb_alloc_failed++;
308 return err;
309 }
310
311 /* Unmap the page as we r going to pass it to the stack */
312 dma_unmap_page(&bp->pdev->dev,
313 dma_unmap_addr(&old_rx_pg, mapping),
314 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
315
316 /* Add one frag and update the appropriate fields in the skb */
317 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
318
319 skb->data_len += frag_len;
320 skb->truesize += frag_len;
321 skb->len += frag_len;
322
323 frag_size -= frag_len;
324 }
325
326 return 0;
327}
328
329static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
330 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
331 u16 cqe_idx)
332{
333 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
334 struct sk_buff *skb = rx_buf->skb;
335 /* alloc new skb */
336 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
337
338 /* Unmap skb in the pool anyway, as we are going to change
339 pool entry status to BNX2X_TPA_STOP even if new skb allocation
340 fails. */
341 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
342 bp->rx_buf_size, DMA_FROM_DEVICE);
343
344 if (likely(new_skb)) {
345 /* fix ip xsum and give it to the stack */
346 /* (no need to map the new skb) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000347
348 prefetch(skb);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000349 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000350
351#ifdef BNX2X_STOP_ON_ERROR
352 if (pad + len > bp->rx_buf_size) {
353 BNX2X_ERR("skb_put is about to fail... "
354 "pad %d len %d rx_buf_size %d\n",
355 pad, len, bp->rx_buf_size);
356 bnx2x_panic();
357 return;
358 }
359#endif
360
361 skb_reserve(skb, pad);
362 skb_put(skb, len);
363
364 skb->protocol = eth_type_trans(skb, bp->dev);
365 skb->ip_summed = CHECKSUM_UNNECESSARY;
366
367 {
368 struct iphdr *iph;
369
370 iph = (struct iphdr *)skb->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000371 iph->check = 0;
372 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
373 }
374
375 if (!bnx2x_fill_frag_skb(bp, fp, skb,
376 &cqe->fast_path_cqe, cqe_idx)) {
Hao Zheng9bcc0892010-10-20 13:56:11 +0000377 if ((le16_to_cpu(cqe->fast_path_cqe.
378 pars_flags.flags) & PARSING_FLAGS_VLAN))
379 __vlan_hwaccel_put_tag(skb,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000380 le16_to_cpu(cqe->fast_path_cqe.
Hao Zheng9bcc0892010-10-20 13:56:11 +0000381 vlan_tag));
382 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000383 } else {
384 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
385 " - dropping packet!\n");
386 dev_kfree_skb(skb);
387 }
388
389
390 /* put new skb in bin */
391 fp->tpa_pool[queue].skb = new_skb;
392
393 } else {
394 /* else drop the packet and keep the buffer in the bin */
395 DP(NETIF_MSG_RX_STATUS,
396 "Failed to allocate new skb - dropping packet!\n");
397 fp->eth_q_stats.rx_skb_alloc_failed++;
398 }
399
400 fp->tpa_state[queue] = BNX2X_TPA_STOP;
401}
402
403/* Set Toeplitz hash value in the skb using the value from the
404 * CQE (calculated by HW).
405 */
406static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
407 struct sk_buff *skb)
408{
409 /* Set Toeplitz hash from CQE */
410 if ((bp->dev->features & NETIF_F_RXHASH) &&
411 (cqe->fast_path_cqe.status_flags &
412 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
413 skb->rxhash =
414 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
415}
416
417int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
418{
419 struct bnx2x *bp = fp->bp;
420 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
421 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
422 int rx_pkt = 0;
423
424#ifdef BNX2X_STOP_ON_ERROR
425 if (unlikely(bp->panic))
426 return 0;
427#endif
428
429 /* CQ "next element" is of the size of the regular element,
430 that's why it's ok here */
431 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
432 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
433 hw_comp_cons++;
434
435 bd_cons = fp->rx_bd_cons;
436 bd_prod = fp->rx_bd_prod;
437 bd_prod_fw = bd_prod;
438 sw_comp_cons = fp->rx_comp_cons;
439 sw_comp_prod = fp->rx_comp_prod;
440
441 /* Memory barrier necessary as speculative reads of the rx
442 * buffer can be ahead of the index in the status block
443 */
444 rmb();
445
446 DP(NETIF_MSG_RX_STATUS,
447 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
448 fp->index, hw_comp_cons, sw_comp_cons);
449
450 while (sw_comp_cons != hw_comp_cons) {
451 struct sw_rx_bd *rx_buf = NULL;
452 struct sk_buff *skb;
453 union eth_rx_cqe *cqe;
454 u8 cqe_fp_flags;
455 u16 len, pad;
456
457 comp_ring_cons = RCQ_BD(sw_comp_cons);
458 bd_prod = RX_BD(bd_prod);
459 bd_cons = RX_BD(bd_cons);
460
461 /* Prefetch the page containing the BD descriptor
462 at producer's index. It will be needed when new skb is
463 allocated */
464 prefetch((void *)(PAGE_ALIGN((unsigned long)
465 (&fp->rx_desc_ring[bd_prod])) -
466 PAGE_SIZE + 1));
467
468 cqe = &fp->rx_comp_ring[comp_ring_cons];
469 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
470
471 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
472 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
473 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
474 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
475 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
476 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
477
478 /* is this a slowpath msg? */
479 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
480 bnx2x_sp_event(fp, cqe);
481 goto next_cqe;
482
483 /* this is an rx packet */
484 } else {
485 rx_buf = &fp->rx_buf_ring[bd_cons];
486 skb = rx_buf->skb;
487 prefetch(skb);
488 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
489 pad = cqe->fast_path_cqe.placement_offset;
490
Vladislav Zolotarovfe78d262010-10-17 23:02:20 +0000491 /* - If CQE is marked both TPA_START and TPA_END it is
492 * a non-TPA CQE.
493 * - FP CQE will always have either TPA_START or/and
494 * TPA_STOP flags set.
495 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000496 if ((!fp->disable_tpa) &&
497 (TPA_TYPE(cqe_fp_flags) !=
498 (TPA_TYPE_START | TPA_TYPE_END))) {
499 u16 queue = cqe->fast_path_cqe.queue_index;
500
501 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
502 DP(NETIF_MSG_RX_STATUS,
503 "calling tpa_start on queue %d\n",
504 queue);
505
506 bnx2x_tpa_start(fp, queue, skb,
507 bd_cons, bd_prod);
508
509 /* Set Toeplitz hash for an LRO skb */
510 bnx2x_set_skb_rxhash(bp, cqe, skb);
511
512 goto next_rx;
Vladislav Zolotarovfe78d262010-10-17 23:02:20 +0000513 } else { /* TPA_STOP */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000514 DP(NETIF_MSG_RX_STATUS,
515 "calling tpa_stop on queue %d\n",
516 queue);
517
518 if (!BNX2X_RX_SUM_FIX(cqe))
519 BNX2X_ERR("STOP on none TCP "
520 "data\n");
521
522 /* This is a size of the linear data
523 on this skb */
524 len = le16_to_cpu(cqe->fast_path_cqe.
525 len_on_bd);
526 bnx2x_tpa_stop(bp, fp, queue, pad,
527 len, cqe, comp_ring_cons);
528#ifdef BNX2X_STOP_ON_ERROR
529 if (bp->panic)
530 return 0;
531#endif
532
533 bnx2x_update_sge_prod(fp,
534 &cqe->fast_path_cqe);
535 goto next_cqe;
536 }
537 }
538
539 dma_sync_single_for_device(&bp->pdev->dev,
540 dma_unmap_addr(rx_buf, mapping),
541 pad + RX_COPY_THRESH,
542 DMA_FROM_DEVICE);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000543 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000544
545 /* is this an error packet? */
546 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
547 DP(NETIF_MSG_RX_ERR,
548 "ERROR flags %x rx packet %u\n",
549 cqe_fp_flags, sw_comp_cons);
550 fp->eth_q_stats.rx_err_discard_pkt++;
551 goto reuse_rx;
552 }
553
554 /* Since we don't have a jumbo ring
555 * copy small packets if mtu > 1500
556 */
557 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
558 (len <= RX_COPY_THRESH)) {
559 struct sk_buff *new_skb;
560
561 new_skb = netdev_alloc_skb(bp->dev,
562 len + pad);
563 if (new_skb == NULL) {
564 DP(NETIF_MSG_RX_ERR,
565 "ERROR packet dropped "
566 "because of alloc failure\n");
567 fp->eth_q_stats.rx_skb_alloc_failed++;
568 goto reuse_rx;
569 }
570
571 /* aligned copy */
572 skb_copy_from_linear_data_offset(skb, pad,
573 new_skb->data + pad, len);
574 skb_reserve(new_skb, pad);
575 skb_put(new_skb, len);
576
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000577 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000578
579 skb = new_skb;
580
581 } else
582 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
583 dma_unmap_single(&bp->pdev->dev,
584 dma_unmap_addr(rx_buf, mapping),
585 bp->rx_buf_size,
586 DMA_FROM_DEVICE);
587 skb_reserve(skb, pad);
588 skb_put(skb, len);
589
590 } else {
591 DP(NETIF_MSG_RX_ERR,
592 "ERROR packet dropped because "
593 "of alloc failure\n");
594 fp->eth_q_stats.rx_skb_alloc_failed++;
595reuse_rx:
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000596 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000597 goto next_rx;
598 }
599
600 skb->protocol = eth_type_trans(skb, bp->dev);
601
602 /* Set Toeplitz hash for a none-LRO skb */
603 bnx2x_set_skb_rxhash(bp, cqe, skb);
604
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700605 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000606
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000607 if (bp->rx_csum) {
608 if (likely(BNX2X_RX_CSUM_OK(cqe)))
609 skb->ip_summed = CHECKSUM_UNNECESSARY;
610 else
611 fp->eth_q_stats.hw_csum_err++;
612 }
613 }
614
615 skb_record_rx_queue(skb, fp->index);
616
Hao Zheng9bcc0892010-10-20 13:56:11 +0000617 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
618 PARSING_FLAGS_VLAN)
619 __vlan_hwaccel_put_tag(skb,
620 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
621 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000622
623
624next_rx:
625 rx_buf->skb = NULL;
626
627 bd_cons = NEXT_RX_IDX(bd_cons);
628 bd_prod = NEXT_RX_IDX(bd_prod);
629 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
630 rx_pkt++;
631next_cqe:
632 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
633 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
634
635 if (rx_pkt == budget)
636 break;
637 } /* while */
638
639 fp->rx_bd_cons = bd_cons;
640 fp->rx_bd_prod = bd_prod_fw;
641 fp->rx_comp_cons = sw_comp_cons;
642 fp->rx_comp_prod = sw_comp_prod;
643
644 /* Update producers */
645 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
646 fp->rx_sge_prod);
647
648 fp->rx_pkt += rx_pkt;
649 fp->rx_calls++;
650
651 return rx_pkt;
652}
653
654static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
655{
656 struct bnx2x_fastpath *fp = fp_cookie;
657 struct bnx2x *bp = fp->bp;
658
659 /* Return here if interrupt is disabled */
660 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
661 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
662 return IRQ_HANDLED;
663 }
664
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000665 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
666 "[fp %d fw_sd %d igusb %d]\n",
667 fp->index, fp->fw_sb_id, fp->igu_sb_id);
668 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000669
670#ifdef BNX2X_STOP_ON_ERROR
671 if (unlikely(bp->panic))
672 return IRQ_HANDLED;
673#endif
674
675 /* Handle Rx and Tx according to MSI-X vector */
676 prefetch(fp->rx_cons_sb);
677 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000678 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000679 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
680
681 return IRQ_HANDLED;
682}
683
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000684/* HW Lock for shared dual port PHYs */
685void bnx2x_acquire_phy_lock(struct bnx2x *bp)
686{
687 mutex_lock(&bp->port.phy_mutex);
688
689 if (bp->port.need_hw_lock)
690 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
691}
692
693void bnx2x_release_phy_lock(struct bnx2x *bp)
694{
695 if (bp->port.need_hw_lock)
696 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
697
698 mutex_unlock(&bp->port.phy_mutex);
699}
700
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800701/* calculates MF speed according to current linespeed and MF configuration */
702u16 bnx2x_get_mf_speed(struct bnx2x *bp)
703{
704 u16 line_speed = bp->link_vars.line_speed;
705 if (IS_MF(bp)) {
706 u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
707 FUNC_MF_CFG_MAX_BW_MASK) >>
708 FUNC_MF_CFG_MAX_BW_SHIFT;
709 /* Calculate the current MAX line speed limit for the DCC
710 * capable devices
711 */
712 if (IS_MF_SD(bp)) {
713 u16 vn_max_rate = maxCfg * 100;
714
715 if (vn_max_rate < line_speed)
716 line_speed = vn_max_rate;
717 } else /* IS_MF_SI(bp)) */
718 line_speed = (line_speed * maxCfg) / 100;
719 }
720
721 return line_speed;
722}
723
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000724void bnx2x_link_report(struct bnx2x *bp)
725{
726 if (bp->flags & MF_FUNC_DIS) {
727 netif_carrier_off(bp->dev);
728 netdev_err(bp->dev, "NIC Link is Down\n");
729 return;
730 }
731
732 if (bp->link_vars.link_up) {
733 u16 line_speed;
734
735 if (bp->state == BNX2X_STATE_OPEN)
736 netif_carrier_on(bp->dev);
737 netdev_info(bp->dev, "NIC Link is Up, ");
738
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800739 line_speed = bnx2x_get_mf_speed(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000740
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000741 pr_cont("%d Mbps ", line_speed);
742
743 if (bp->link_vars.duplex == DUPLEX_FULL)
744 pr_cont("full duplex");
745 else
746 pr_cont("half duplex");
747
748 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
749 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
750 pr_cont(", receive ");
751 if (bp->link_vars.flow_ctrl &
752 BNX2X_FLOW_CTRL_TX)
753 pr_cont("& transmit ");
754 } else {
755 pr_cont(", transmit ");
756 }
757 pr_cont("flow control ON");
758 }
759 pr_cont("\n");
760
761 } else { /* link_down */
762 netif_carrier_off(bp->dev);
763 netdev_err(bp->dev, "NIC Link is Down\n");
764 }
765}
766
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000767/* Returns the number of actually allocated BDs */
768static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
769 int rx_ring_size)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000770{
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000771 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000772 u16 ring_prod, cqe_ring_prod;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000773 int i;
774
775 fp->rx_comp_cons = 0;
776 cqe_ring_prod = ring_prod = 0;
777 for (i = 0; i < rx_ring_size; i++) {
778 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
779 BNX2X_ERR("was only able to allocate "
780 "%d rx skbs on queue[%d]\n", i, fp->index);
781 fp->eth_q_stats.rx_skb_alloc_failed++;
782 break;
783 }
784 ring_prod = NEXT_RX_IDX(ring_prod);
785 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
786 WARN_ON(ring_prod <= i);
787 }
788
789 fp->rx_bd_prod = ring_prod;
790 /* Limit the CQE producer by the CQE ring size */
791 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
792 cqe_ring_prod);
793 fp->rx_pkt = fp->rx_calls = 0;
794
795 return i;
796}
797
798static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
799{
800 struct bnx2x *bp = fp->bp;
Dmitry Kravkov25141582010-09-12 05:48:28 +0000801 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
802 MAX_RX_AVAIL/bp->num_queues;
803
804 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000805
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000806 bnx2x_alloc_rx_bds(fp, rx_ring_size);
807
808 /* Warning!
809 * this will generate an interrupt (to the TSTORM)
810 * must only be done after chip is initialized
811 */
812 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
813 fp->rx_sge_prod);
814}
815
816void bnx2x_init_rx_rings(struct bnx2x *bp)
817{
818 int func = BP_FUNC(bp);
819 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
820 ETH_MAX_AGGREGATION_QUEUES_E1H;
821 u16 ring_prod;
822 int i, j;
823
824 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
Dmitry Kravkovc8e4f482010-10-17 23:09:30 +0000825 IP_HEADER_ALIGNMENT_PADDING;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000826
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000827 DP(NETIF_MSG_IFUP,
828 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
829
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000830 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000831 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000832
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000833 if (!fp->disable_tpa) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000834 for (i = 0; i < max_agg_queues; i++) {
835 fp->tpa_pool[i].skb =
836 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
837 if (!fp->tpa_pool[i].skb) {
838 BNX2X_ERR("Failed to allocate TPA "
839 "skb pool for queue[%d] - "
840 "disabling TPA on this "
841 "queue!\n", j);
842 bnx2x_free_tpa_pool(bp, fp, i);
843 fp->disable_tpa = 1;
844 break;
845 }
846 dma_unmap_addr_set((struct sw_rx_bd *)
847 &bp->fp->tpa_pool[i],
848 mapping, 0);
849 fp->tpa_state[i] = BNX2X_TPA_STOP;
850 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000851
852 /* "next page" elements initialization */
853 bnx2x_set_next_page_sgl(fp);
854
855 /* set SGEs bit mask */
856 bnx2x_init_sge_ring_bit_mask(fp);
857
858 /* Allocate SGEs and initialize the ring elements */
859 for (i = 0, ring_prod = 0;
860 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
861
862 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
863 BNX2X_ERR("was only able to allocate "
864 "%d rx sges\n", i);
865 BNX2X_ERR("disabling TPA for"
866 " queue[%d]\n", j);
867 /* Cleanup already allocated elements */
868 bnx2x_free_rx_sge_range(bp,
869 fp, ring_prod);
870 bnx2x_free_tpa_pool(bp,
871 fp, max_agg_queues);
872 fp->disable_tpa = 1;
873 ring_prod = 0;
874 break;
875 }
876 ring_prod = NEXT_SGE_IDX(ring_prod);
877 }
878
879 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000880 }
881 }
882
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000883 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000884 struct bnx2x_fastpath *fp = &bp->fp[j];
885
886 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000887
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000888 bnx2x_set_next_page_rx_bd(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000889
890 /* CQ ring */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000891 bnx2x_set_next_page_rx_cq(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000892
893 /* Allocate BDs and initialize BD ring */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000894 bnx2x_alloc_rx_bd_ring(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000895
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000896 if (j != 0)
897 continue;
898
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000899 if (!CHIP_IS_E2(bp)) {
900 REG_WR(bp, BAR_USTRORM_INTMEM +
901 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
902 U64_LO(fp->rx_comp_mapping));
903 REG_WR(bp, BAR_USTRORM_INTMEM +
904 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
905 U64_HI(fp->rx_comp_mapping));
906 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000907 }
908}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000909
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000910static void bnx2x_free_tx_skbs(struct bnx2x *bp)
911{
912 int i;
913
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000914 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000915 struct bnx2x_fastpath *fp = &bp->fp[i];
916
917 u16 bd_cons = fp->tx_bd_cons;
918 u16 sw_prod = fp->tx_pkt_prod;
919 u16 sw_cons = fp->tx_pkt_cons;
920
921 while (sw_cons != sw_prod) {
922 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
923 sw_cons++;
924 }
925 }
926}
927
928static void bnx2x_free_rx_skbs(struct bnx2x *bp)
929{
930 int i, j;
931
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000932 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000933 struct bnx2x_fastpath *fp = &bp->fp[j];
934
935 for (i = 0; i < NUM_RX_BD; i++) {
936 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
937 struct sk_buff *skb = rx_buf->skb;
938
939 if (skb == NULL)
940 continue;
941
942 dma_unmap_single(&bp->pdev->dev,
943 dma_unmap_addr(rx_buf, mapping),
944 bp->rx_buf_size, DMA_FROM_DEVICE);
945
946 rx_buf->skb = NULL;
947 dev_kfree_skb(skb);
948 }
949 if (!fp->disable_tpa)
950 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
951 ETH_MAX_AGGREGATION_QUEUES_E1 :
952 ETH_MAX_AGGREGATION_QUEUES_E1H);
953 }
954}
955
956void bnx2x_free_skbs(struct bnx2x *bp)
957{
958 bnx2x_free_tx_skbs(bp);
959 bnx2x_free_rx_skbs(bp);
960}
961
962static void bnx2x_free_msix_irqs(struct bnx2x *bp)
963{
964 int i, offset = 1;
965
966 free_irq(bp->msix_table[0].vector, bp->dev);
967 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
968 bp->msix_table[0].vector);
969
970#ifdef BCM_CNIC
971 offset++;
972#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000973 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000974 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
975 "state %x\n", i, bp->msix_table[i + offset].vector,
976 bnx2x_fp(bp, i, state));
977
978 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
979 }
980}
981
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000982void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000983{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000984 if (bp->flags & USING_MSIX_FLAG)
985 bnx2x_free_msix_irqs(bp);
986 else if (bp->flags & USING_MSI_FLAG)
987 free_irq(bp->pdev->irq, bp->dev);
988 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000989 free_irq(bp->pdev->irq, bp->dev);
990}
991
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000992int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000993{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000994 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000995
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000996 bp->msix_table[msix_vec].entry = msix_vec;
997 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
998 bp->msix_table[0].entry);
999 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001000
1001#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001002 bp->msix_table[msix_vec].entry = msix_vec;
1003 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1004 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1005 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001006#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001007 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001008 bp->msix_table[msix_vec].entry = msix_vec;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001009 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001010 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1011 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001012 }
1013
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001014 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001015
1016 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001017
1018 /*
1019 * reconfigure number of tx/rx queues according to available
1020 * MSI-X vectors
1021 */
1022 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001023 /* how less vectors we will have? */
1024 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001025
1026 DP(NETIF_MSG_IFUP,
1027 "Trying to use less MSI-X vectors: %d\n", rc);
1028
1029 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1030
1031 if (rc) {
1032 DP(NETIF_MSG_IFUP,
1033 "MSI-X is not attainable rc %d\n", rc);
1034 return rc;
1035 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001036 /*
1037 * decrease number of queues by number of unallocated entries
1038 */
1039 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001040
1041 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1042 bp->num_queues);
1043 } else if (rc) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001044 /* fall to INTx if not enough memory */
1045 if (rc == -ENOMEM)
1046 bp->flags |= DISABLE_MSI_FLAG;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001047 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1048 return rc;
1049 }
1050
1051 bp->flags |= USING_MSIX_FLAG;
1052
1053 return 0;
1054}
1055
1056static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1057{
1058 int i, rc, offset = 1;
1059
1060 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1061 bp->dev->name, bp->dev);
1062 if (rc) {
1063 BNX2X_ERR("request sp irq failed\n");
1064 return -EBUSY;
1065 }
1066
1067#ifdef BCM_CNIC
1068 offset++;
1069#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001070 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001071 struct bnx2x_fastpath *fp = &bp->fp[i];
1072 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1073 bp->dev->name, i);
1074
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001075 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001076 bnx2x_msix_fp_int, 0, fp->name, fp);
1077 if (rc) {
1078 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1079 bnx2x_free_msix_irqs(bp);
1080 return -EBUSY;
1081 }
1082
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001083 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001084 fp->state = BNX2X_FP_STATE_IRQ;
1085 }
1086
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001087 i = BNX2X_NUM_ETH_QUEUES(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001088 offset = 1 + CNIC_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001089 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1090 " ... fp[%d] %d\n",
1091 bp->msix_table[0].vector,
1092 0, bp->msix_table[offset].vector,
1093 i - 1, bp->msix_table[offset + i - 1].vector);
1094
1095 return 0;
1096}
1097
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001098int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001099{
1100 int rc;
1101
1102 rc = pci_enable_msi(bp->pdev);
1103 if (rc) {
1104 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1105 return -1;
1106 }
1107 bp->flags |= USING_MSI_FLAG;
1108
1109 return 0;
1110}
1111
1112static int bnx2x_req_irq(struct bnx2x *bp)
1113{
1114 unsigned long flags;
1115 int rc;
1116
1117 if (bp->flags & USING_MSI_FLAG)
1118 flags = 0;
1119 else
1120 flags = IRQF_SHARED;
1121
1122 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1123 bp->dev->name, bp->dev);
1124 if (!rc)
1125 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1126
1127 return rc;
1128}
1129
1130static void bnx2x_napi_enable(struct bnx2x *bp)
1131{
1132 int i;
1133
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001134 for_each_napi_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001135 napi_enable(&bnx2x_fp(bp, i, napi));
1136}
1137
1138static void bnx2x_napi_disable(struct bnx2x *bp)
1139{
1140 int i;
1141
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001142 for_each_napi_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001143 napi_disable(&bnx2x_fp(bp, i, napi));
1144}
1145
1146void bnx2x_netif_start(struct bnx2x *bp)
1147{
1148 int intr_sem;
1149
1150 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1151 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1152
1153 if (intr_sem) {
1154 if (netif_running(bp->dev)) {
1155 bnx2x_napi_enable(bp);
1156 bnx2x_int_enable(bp);
1157 if (bp->state == BNX2X_STATE_OPEN)
1158 netif_tx_wake_all_queues(bp->dev);
1159 }
1160 }
1161}
1162
1163void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1164{
1165 bnx2x_int_disable_sync(bp, disable_hw);
1166 bnx2x_napi_disable(bp);
1167 netif_tx_disable(bp->dev);
1168}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001169
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001170u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1171{
1172#ifdef BCM_CNIC
1173 struct bnx2x *bp = netdev_priv(dev);
1174 if (NO_FCOE(bp))
1175 return skb_tx_hash(dev, skb);
1176 else {
1177 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1178 u16 ether_type = ntohs(hdr->h_proto);
1179
1180 /* Skip VLAN tag if present */
1181 if (ether_type == ETH_P_8021Q) {
1182 struct vlan_ethhdr *vhdr =
1183 (struct vlan_ethhdr *)skb->data;
1184
1185 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1186 }
1187
1188 /* If ethertype is FCoE or FIP - use FCoE ring */
1189 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1190 return bnx2x_fcoe(bp, index);
1191 }
1192#endif
1193 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1194 */
1195 return __skb_tx_hash(dev, skb,
1196 dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1197}
1198
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001199void bnx2x_set_num_queues(struct bnx2x *bp)
1200{
1201 switch (bp->multi_mode) {
1202 case ETH_RSS_MODE_DISABLED:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001203 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001204 break;
1205 case ETH_RSS_MODE_REGULAR:
1206 bp->num_queues = bnx2x_calc_num_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001207 break;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001208
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001209 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001210 bp->num_queues = 1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001211 break;
1212 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001213
1214 /* Add special queues */
1215 bp->num_queues += NONE_ETH_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001216}
1217
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001218#ifdef BCM_CNIC
1219static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1220{
1221 if (!NO_FCOE(bp)) {
1222 if (!IS_MF_SD(bp))
1223 bnx2x_set_fip_eth_mac_addr(bp, 1);
1224 bnx2x_set_all_enode_macs(bp, 1);
1225 bp->flags |= FCOE_MACS_SET;
1226 }
1227}
1228#endif
1229
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001230static void bnx2x_release_firmware(struct bnx2x *bp)
1231{
1232 kfree(bp->init_ops_offsets);
1233 kfree(bp->init_ops);
1234 kfree(bp->init_data);
1235 release_firmware(bp->firmware);
1236}
1237
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001238static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1239{
1240 int rc, num = bp->num_queues;
1241
1242#ifdef BCM_CNIC
1243 if (NO_FCOE(bp))
1244 num -= FCOE_CONTEXT_USE;
1245
1246#endif
1247 netif_set_real_num_tx_queues(bp->dev, num);
1248 rc = netif_set_real_num_rx_queues(bp->dev, num);
1249 return rc;
1250}
1251
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001252/* must be called with rtnl_lock */
1253int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1254{
1255 u32 load_code;
1256 int i, rc;
1257
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001258 /* Set init arrays */
1259 rc = bnx2x_init_firmware(bp);
1260 if (rc) {
1261 BNX2X_ERR("Error loading firmware\n");
1262 return rc;
1263 }
1264
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001265#ifdef BNX2X_STOP_ON_ERROR
1266 if (unlikely(bp->panic))
1267 return -EPERM;
1268#endif
1269
1270 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1271
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001272 /* must be called before memory allocation and HW init */
1273 bnx2x_ilt_set_info(bp);
1274
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001275 if (bnx2x_alloc_mem(bp))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001276 return -ENOMEM;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001277
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001278 rc = bnx2x_set_real_num_queues(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001279 if (rc) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001280 BNX2X_ERR("Unable to set real_num_queues\n");
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001281 goto load_error0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001282 }
1283
1284 for_each_queue(bp, i)
1285 bnx2x_fp(bp, i, disable_tpa) =
1286 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1287
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001288#ifdef BCM_CNIC
1289 /* We don't want TPA on FCoE L2 ring */
1290 bnx2x_fcoe(bp, disable_tpa) = 1;
1291#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001292 bnx2x_napi_enable(bp);
1293
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001294 /* Send LOAD_REQUEST command to MCP
1295 Returns the type of LOAD command:
1296 if it is the first port to be initialized
1297 common blocks should be initialized, otherwise - not
1298 */
1299 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001300 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001301 if (!load_code) {
1302 BNX2X_ERR("MCP response failure, aborting\n");
1303 rc = -EBUSY;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001304 goto load_error1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001305 }
1306 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1307 rc = -EBUSY; /* other port in diagnostic mode */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001308 goto load_error1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001309 }
1310
1311 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001312 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001313 int port = BP_PORT(bp);
1314
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001315 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1316 path, load_count[path][0], load_count[path][1],
1317 load_count[path][2]);
1318 load_count[path][0]++;
1319 load_count[path][1 + port]++;
1320 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1321 path, load_count[path][0], load_count[path][1],
1322 load_count[path][2]);
1323 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001324 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001325 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001326 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1327 else
1328 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1329 }
1330
1331 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001332 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001333 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1334 bp->port.pmf = 1;
1335 else
1336 bp->port.pmf = 0;
1337 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1338
1339 /* Initialize HW */
1340 rc = bnx2x_init_hw(bp, load_code);
1341 if (rc) {
1342 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001343 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001344 goto load_error2;
1345 }
1346
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001347 /* Connect to IRQs */
1348 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001349 if (rc) {
1350 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1351 goto load_error2;
1352 }
1353
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001354 /* Setup NIC internals and enable interrupts */
1355 bnx2x_nic_init(bp, load_code);
1356
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001357 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1358 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001359 (bp->common.shmem2_base))
1360 SHMEM2_WR(bp, dcc_support,
1361 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1362 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1363
1364 /* Send LOAD_DONE command to MCP */
1365 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001366 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001367 if (!load_code) {
1368 BNX2X_ERR("MCP response failure, aborting\n");
1369 rc = -EBUSY;
1370 goto load_error3;
1371 }
1372 }
1373
1374 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1375
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001376 rc = bnx2x_func_start(bp);
1377 if (rc) {
1378 BNX2X_ERR("Function start failed!\n");
1379#ifndef BNX2X_STOP_ON_ERROR
1380 goto load_error3;
1381#else
1382 bp->panic = 1;
1383 return -EBUSY;
1384#endif
1385 }
1386
1387 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001388 if (rc) {
1389 BNX2X_ERR("Setup leading failed!\n");
1390#ifndef BNX2X_STOP_ON_ERROR
1391 goto load_error3;
1392#else
1393 bp->panic = 1;
1394 return -EBUSY;
1395#endif
1396 }
1397
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001398 if (!CHIP_IS_E1(bp) &&
1399 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1400 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1401 bp->flags |= MF_FUNC_DIS;
1402 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001403
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001404#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001405 /* Enable Timer scan */
1406 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001407#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001408
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001409 for_each_nondefault_queue(bp, i) {
1410 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1411 if (rc)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001412#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001413 goto load_error4;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001414#else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001415 goto load_error3;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001416#endif
1417 }
1418
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001419 /* Now when Clients are configured we are ready to work */
1420 bp->state = BNX2X_STATE_OPEN;
1421
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001422#ifdef BCM_CNIC
1423 bnx2x_set_fcoe_eth_macs(bp);
1424#endif
1425
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001426 bnx2x_set_eth_mac(bp, 1);
1427
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001428 if (bp->port.pmf)
1429 bnx2x_initial_phy_init(bp, load_mode);
1430
1431 /* Start fast path */
1432 switch (load_mode) {
1433 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001434 /* Tx queue should be only reenabled */
1435 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001436 /* Initialize the receive filter. */
1437 bnx2x_set_rx_mode(bp->dev);
1438 break;
1439
1440 case LOAD_OPEN:
1441 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001442 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001443 /* Initialize the receive filter. */
1444 bnx2x_set_rx_mode(bp->dev);
1445 break;
1446
1447 case LOAD_DIAG:
1448 /* Initialize the receive filter. */
1449 bnx2x_set_rx_mode(bp->dev);
1450 bp->state = BNX2X_STATE_DIAG;
1451 break;
1452
1453 default:
1454 break;
1455 }
1456
1457 if (!bp->port.pmf)
1458 bnx2x__link_status_update(bp);
1459
1460 /* start the timer */
1461 mod_timer(&bp->timer, jiffies + bp->current_interval);
1462
1463#ifdef BCM_CNIC
1464 bnx2x_setup_cnic_irq_info(bp);
1465 if (bp->state == BNX2X_STATE_OPEN)
1466 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1467#endif
1468 bnx2x_inc_load_cnt(bp);
1469
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001470 bnx2x_release_firmware(bp);
1471
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001472 return 0;
1473
1474#ifdef BCM_CNIC
1475load_error4:
1476 /* Disable Timer scan */
1477 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1478#endif
1479load_error3:
1480 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001481
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001482 /* Free SKBs, SGEs, TPA pool and driver internals */
1483 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001484 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001485 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001486
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001487 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001488 bnx2x_free_irq(bp);
1489load_error2:
1490 if (!BP_NOMCP(bp)) {
1491 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1492 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1493 }
1494
1495 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001496load_error1:
1497 bnx2x_napi_disable(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001498load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001499 bnx2x_free_mem(bp);
1500
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001501 bnx2x_release_firmware(bp);
1502
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001503 return rc;
1504}
1505
1506/* must be called with rtnl_lock */
1507int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1508{
1509 int i;
1510
1511 if (bp->state == BNX2X_STATE_CLOSED) {
1512 /* Interface has been removed - nothing to recover */
1513 bp->recovery_state = BNX2X_RECOVERY_DONE;
1514 bp->is_leader = 0;
1515 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1516 smp_wmb();
1517
1518 return -EINVAL;
1519 }
1520
1521#ifdef BCM_CNIC
1522 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1523#endif
1524 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1525
1526 /* Set "drop all" */
1527 bp->rx_mode = BNX2X_RX_MODE_NONE;
1528 bnx2x_set_storm_rx_mode(bp);
1529
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001530 /* Stop Tx */
1531 bnx2x_tx_disable(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001532
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001533 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001534
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001535 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001536 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001537
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001538 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001539
1540 /* Cleanup the chip if needed */
1541 if (unload_mode != UNLOAD_RECOVERY)
1542 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001543 else {
1544 /* Disable HW interrupts, NAPI and Tx */
1545 bnx2x_netif_stop(bp, 1);
1546
1547 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001548 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001549 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001550
1551 bp->port.pmf = 0;
1552
1553 /* Free SKBs, SGEs, TPA pool and driver internals */
1554 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001555 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001556 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001557
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001558 bnx2x_free_mem(bp);
1559
1560 bp->state = BNX2X_STATE_CLOSED;
1561
1562 /* The last driver must disable a "close the gate" if there is no
1563 * parity attention or "process kill" pending.
1564 */
1565 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1566 bnx2x_reset_is_done(bp))
1567 bnx2x_disable_close_the_gate(bp);
1568
1569 /* Reset MCP mail box sequence if there is on going recovery */
1570 if (unload_mode == UNLOAD_RECOVERY)
1571 bp->fw_seq = 0;
1572
1573 return 0;
1574}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001575
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001576int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1577{
1578 u16 pmcsr;
1579
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00001580 /* If there is no power capability, silently succeed */
1581 if (!bp->pm_cap) {
1582 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1583 return 0;
1584 }
1585
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001586 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1587
1588 switch (state) {
1589 case PCI_D0:
1590 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1591 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1592 PCI_PM_CTRL_PME_STATUS));
1593
1594 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1595 /* delay required during transition out of D3hot */
1596 msleep(20);
1597 break;
1598
1599 case PCI_D3hot:
1600 /* If there are other clients above don't
1601 shut down the power */
1602 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1603 return 0;
1604 /* Don't shut down the power for emulation and FPGA */
1605 if (CHIP_REV_IS_SLOW(bp))
1606 return 0;
1607
1608 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1609 pmcsr |= 3;
1610
1611 if (bp->wol)
1612 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1613
1614 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1615 pmcsr);
1616
1617 /* No more memory access after this point until
1618 * device is brought back to D0.
1619 */
1620 break;
1621
1622 default:
1623 return -EINVAL;
1624 }
1625 return 0;
1626}
1627
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001628/*
1629 * net_device service functions
1630 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001631int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001632{
1633 int work_done = 0;
1634 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1635 napi);
1636 struct bnx2x *bp = fp->bp;
1637
1638 while (1) {
1639#ifdef BNX2X_STOP_ON_ERROR
1640 if (unlikely(bp->panic)) {
1641 napi_complete(napi);
1642 return 0;
1643 }
1644#endif
1645
1646 if (bnx2x_has_tx_work(fp))
1647 bnx2x_tx_int(fp);
1648
1649 if (bnx2x_has_rx_work(fp)) {
1650 work_done += bnx2x_rx_int(fp, budget - work_done);
1651
1652 /* must not complete if we consumed full budget */
1653 if (work_done >= budget)
1654 break;
1655 }
1656
1657 /* Fall out from the NAPI loop if needed */
1658 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001659#ifdef BCM_CNIC
1660 /* No need to update SB for FCoE L2 ring as long as
1661 * it's connected to the default SB and the SB
1662 * has been updated when NAPI was scheduled.
1663 */
1664 if (IS_FCOE_FP(fp)) {
1665 napi_complete(napi);
1666 break;
1667 }
1668#endif
1669
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001670 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001671 /* bnx2x_has_rx_work() reads the status block,
1672 * thus we need to ensure that status block indices
1673 * have been actually read (bnx2x_update_fpsb_idx)
1674 * prior to this check (bnx2x_has_rx_work) so that
1675 * we won't write the "newer" value of the status block
1676 * to IGU (if there was a DMA right after
1677 * bnx2x_has_rx_work and if there is no rmb, the memory
1678 * reading (bnx2x_update_fpsb_idx) may be postponed
1679 * to right before bnx2x_ack_sb). In this case there
1680 * will never be another interrupt until there is
1681 * another update of the status block, while there
1682 * is still unhandled work.
1683 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001684 rmb();
1685
1686 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1687 napi_complete(napi);
1688 /* Re-enable interrupts */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001689 DP(NETIF_MSG_HW,
1690 "Update index to %d\n", fp->fp_hc_idx);
1691 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1692 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001693 IGU_INT_ENABLE, 1);
1694 break;
1695 }
1696 }
1697 }
1698
1699 return work_done;
1700}
1701
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001702/* we split the first BD into headers and data BDs
1703 * to ease the pain of our fellow microcode engineers
1704 * we use one mapping for both BDs
1705 * So far this has only been observed to happen
1706 * in Other Operating Systems(TM)
1707 */
1708static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1709 struct bnx2x_fastpath *fp,
1710 struct sw_tx_bd *tx_buf,
1711 struct eth_tx_start_bd **tx_bd, u16 hlen,
1712 u16 bd_prod, int nbd)
1713{
1714 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1715 struct eth_tx_bd *d_tx_bd;
1716 dma_addr_t mapping;
1717 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1718
1719 /* first fix first BD */
1720 h_tx_bd->nbd = cpu_to_le16(nbd);
1721 h_tx_bd->nbytes = cpu_to_le16(hlen);
1722
1723 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1724 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1725 h_tx_bd->addr_lo, h_tx_bd->nbd);
1726
1727 /* now get a new data BD
1728 * (after the pbd) and fill it */
1729 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1730 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1731
1732 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1733 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1734
1735 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1736 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1737 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1738
1739 /* this marks the BD as one that has no individual mapping */
1740 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1741
1742 DP(NETIF_MSG_TX_QUEUED,
1743 "TSO split data size is %d (%x:%x)\n",
1744 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1745
1746 /* update tx_bd */
1747 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1748
1749 return bd_prod;
1750}
1751
1752static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1753{
1754 if (fix > 0)
1755 csum = (u16) ~csum_fold(csum_sub(csum,
1756 csum_partial(t_header - fix, fix, 0)));
1757
1758 else if (fix < 0)
1759 csum = (u16) ~csum_fold(csum_add(csum,
1760 csum_partial(t_header, -fix, 0)));
1761
1762 return swab16(csum);
1763}
1764
1765static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1766{
1767 u32 rc;
1768
1769 if (skb->ip_summed != CHECKSUM_PARTIAL)
1770 rc = XMIT_PLAIN;
1771
1772 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00001773 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001774 rc = XMIT_CSUM_V6;
1775 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1776 rc |= XMIT_CSUM_TCP;
1777
1778 } else {
1779 rc = XMIT_CSUM_V4;
1780 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1781 rc |= XMIT_CSUM_TCP;
1782 }
1783 }
1784
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00001785 if (skb_is_gso_v6(skb))
1786 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1787 else if (skb_is_gso(skb))
1788 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001789
1790 return rc;
1791}
1792
1793#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1794/* check if packet requires linearization (packet is too fragmented)
1795 no need to check fragmentation if page size > 8K (there will be no
1796 violation to FW restrictions) */
1797static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1798 u32 xmit_type)
1799{
1800 int to_copy = 0;
1801 int hlen = 0;
1802 int first_bd_sz = 0;
1803
1804 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1805 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1806
1807 if (xmit_type & XMIT_GSO) {
1808 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1809 /* Check if LSO packet needs to be copied:
1810 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1811 int wnd_size = MAX_FETCH_BD - 3;
1812 /* Number of windows to check */
1813 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1814 int wnd_idx = 0;
1815 int frag_idx = 0;
1816 u32 wnd_sum = 0;
1817
1818 /* Headers length */
1819 hlen = (int)(skb_transport_header(skb) - skb->data) +
1820 tcp_hdrlen(skb);
1821
1822 /* Amount of data (w/o headers) on linear part of SKB*/
1823 first_bd_sz = skb_headlen(skb) - hlen;
1824
1825 wnd_sum = first_bd_sz;
1826
1827 /* Calculate the first sum - it's special */
1828 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1829 wnd_sum +=
1830 skb_shinfo(skb)->frags[frag_idx].size;
1831
1832 /* If there was data on linear skb data - check it */
1833 if (first_bd_sz > 0) {
1834 if (unlikely(wnd_sum < lso_mss)) {
1835 to_copy = 1;
1836 goto exit_lbl;
1837 }
1838
1839 wnd_sum -= first_bd_sz;
1840 }
1841
1842 /* Others are easier: run through the frag list and
1843 check all windows */
1844 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1845 wnd_sum +=
1846 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1847
1848 if (unlikely(wnd_sum < lso_mss)) {
1849 to_copy = 1;
1850 break;
1851 }
1852 wnd_sum -=
1853 skb_shinfo(skb)->frags[wnd_idx].size;
1854 }
1855 } else {
1856 /* in non-LSO too fragmented packet should always
1857 be linearized */
1858 to_copy = 1;
1859 }
1860 }
1861
1862exit_lbl:
1863 if (unlikely(to_copy))
1864 DP(NETIF_MSG_TX_QUEUED,
1865 "Linearization IS REQUIRED for %s packet. "
1866 "num_frags %d hlen %d first_bd_sz %d\n",
1867 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1868 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1869
1870 return to_copy;
1871}
1872#endif
1873
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001874static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
1875 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001876{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001877 *parsing_data |= (skb_shinfo(skb)->gso_size <<
1878 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
1879 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001880 if ((xmit_type & XMIT_GSO_V6) &&
1881 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001882 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001883}
1884
1885/**
1886 * Update PBD in GSO case.
1887 *
1888 * @param skb
1889 * @param tx_start_bd
1890 * @param pbd
1891 * @param xmit_type
1892 */
1893static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1894 struct eth_tx_parse_bd_e1x *pbd,
1895 u32 xmit_type)
1896{
1897 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1898 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1899 pbd->tcp_flags = pbd_tcp_flags(skb);
1900
1901 if (xmit_type & XMIT_GSO_V4) {
1902 pbd->ip_id = swab16(ip_hdr(skb)->id);
1903 pbd->tcp_pseudo_csum =
1904 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1905 ip_hdr(skb)->daddr,
1906 0, IPPROTO_TCP, 0));
1907
1908 } else
1909 pbd->tcp_pseudo_csum =
1910 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1911 &ipv6_hdr(skb)->daddr,
1912 0, IPPROTO_TCP, 0));
1913
1914 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1915}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001916
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001917/**
1918 *
1919 * @param skb
1920 * @param tx_start_bd
1921 * @param pbd_e2
1922 * @param xmit_type
1923 *
1924 * @return header len
1925 */
1926static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001927 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001928{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001929 *parsing_data |= ((tcp_hdrlen(skb)/4) <<
1930 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
1931 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001932
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001933 *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
1934 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
1935 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001936
1937 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1938}
1939
1940/**
1941 *
1942 * @param skb
1943 * @param tx_start_bd
1944 * @param pbd
1945 * @param xmit_type
1946 *
1947 * @return Header length
1948 */
1949static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1950 struct eth_tx_parse_bd_e1x *pbd,
1951 u32 xmit_type)
1952{
1953 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1954
1955 /* for now NS flag is not used in Linux */
1956 pbd->global_data =
1957 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1958 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1959
1960 pbd->ip_hlen_w = (skb_transport_header(skb) -
1961 skb_network_header(skb)) / 2;
1962
1963 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1964
1965 pbd->total_hlen_w = cpu_to_le16(hlen);
1966 hlen = hlen*2;
1967
1968 if (xmit_type & XMIT_CSUM_TCP) {
1969 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1970
1971 } else {
1972 s8 fix = SKB_CS_OFF(skb); /* signed! */
1973
1974 DP(NETIF_MSG_TX_QUEUED,
1975 "hlen %d fix %d csum before fix %x\n",
1976 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1977
1978 /* HW bug: fixup the CSUM */
1979 pbd->tcp_pseudo_csum =
1980 bnx2x_csum_fix(skb_transport_header(skb),
1981 SKB_CS(skb), fix);
1982
1983 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1984 pbd->tcp_pseudo_csum);
1985 }
1986
1987 return hlen;
1988}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001989
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001990/* called with netif_tx_lock
1991 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1992 * netif_wake_queue()
1993 */
1994netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1995{
1996 struct bnx2x *bp = netdev_priv(dev);
1997 struct bnx2x_fastpath *fp;
1998 struct netdev_queue *txq;
1999 struct sw_tx_bd *tx_buf;
2000 struct eth_tx_start_bd *tx_start_bd;
2001 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002002 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002003 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002004 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002005 u16 pkt_prod, bd_prod;
2006 int nbd, fp_index;
2007 dma_addr_t mapping;
2008 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2009 int i;
2010 u8 hlen = 0;
2011 __le16 pkt_size = 0;
2012 struct ethhdr *eth;
2013 u8 mac_type = UNICAST_ADDRESS;
2014
2015#ifdef BNX2X_STOP_ON_ERROR
2016 if (unlikely(bp->panic))
2017 return NETDEV_TX_BUSY;
2018#endif
2019
2020 fp_index = skb_get_queue_mapping(skb);
2021 txq = netdev_get_tx_queue(dev, fp_index);
2022
2023 fp = &bp->fp[fp_index];
2024
2025 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2026 fp->eth_q_stats.driver_xoff++;
2027 netif_tx_stop_queue(txq);
2028 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2029 return NETDEV_TX_BUSY;
2030 }
2031
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002032 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2033 "protocol(%x,%x) gso type %x xmit_type %x\n",
2034 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002035 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2036
2037 eth = (struct ethhdr *)skb->data;
2038
2039 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2040 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2041 if (is_broadcast_ether_addr(eth->h_dest))
2042 mac_type = BROADCAST_ADDRESS;
2043 else
2044 mac_type = MULTICAST_ADDRESS;
2045 }
2046
2047#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2048 /* First, check if we need to linearize the skb (due to FW
2049 restrictions). No need to check fragmentation if page size > 8K
2050 (there will be no violation to FW restrictions) */
2051 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2052 /* Statistics of linearization */
2053 bp->lin_cnt++;
2054 if (skb_linearize(skb) != 0) {
2055 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2056 "silently dropping this SKB\n");
2057 dev_kfree_skb_any(skb);
2058 return NETDEV_TX_OK;
2059 }
2060 }
2061#endif
2062
2063 /*
2064 Please read carefully. First we use one BD which we mark as start,
2065 then we have a parsing info BD (used for TSO or xsum),
2066 and only then we have the rest of the TSO BDs.
2067 (don't forget to mark the last one as last,
2068 and to unmap only AFTER you write to the BD ...)
2069 And above all, all pdb sizes are in words - NOT DWORDS!
2070 */
2071
2072 pkt_prod = fp->tx_pkt_prod++;
2073 bd_prod = TX_BD(fp->tx_bd_prod);
2074
2075 /* get a tx_buf and first BD */
2076 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2077 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2078
2079 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002080 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2081 mac_type);
2082
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002083 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002084 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002085
2086 /* remember the first BD of the packet */
2087 tx_buf->first_bd = fp->tx_bd_prod;
2088 tx_buf->skb = skb;
2089 tx_buf->flags = 0;
2090
2091 DP(NETIF_MSG_TX_QUEUED,
2092 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2093 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2094
Jesse Grosseab6d182010-10-20 13:56:03 +00002095 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002096 tx_start_bd->vlan_or_ethertype =
2097 cpu_to_le16(vlan_tx_tag_get(skb));
2098 tx_start_bd->bd_flags.as_bitfield |=
2099 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002100 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002101 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002102
2103 /* turn on parsing and get a BD */
2104 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002105
2106 if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002107 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2108
2109 if (xmit_type & XMIT_CSUM_V4)
2110 tx_start_bd->bd_flags.as_bitfield |=
2111 ETH_TX_BD_FLAGS_IP_CSUM;
2112 else
2113 tx_start_bd->bd_flags.as_bitfield |=
2114 ETH_TX_BD_FLAGS_IPV6;
2115
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002116 if (!(xmit_type & XMIT_CSUM_TCP))
2117 tx_start_bd->bd_flags.as_bitfield |=
2118 ETH_TX_BD_FLAGS_IS_UDP;
2119 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002120
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002121 if (CHIP_IS_E2(bp)) {
2122 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2123 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2124 /* Set PBD in checksum offload case */
2125 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002126 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2127 &pbd_e2_parsing_data,
2128 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002129 } else {
2130 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2131 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2132 /* Set PBD in checksum offload case */
2133 if (xmit_type & XMIT_CSUM)
2134 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002135
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002136 }
2137
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002138 /* Map skb linear data for DMA */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002139 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2140 skb_headlen(skb), DMA_TO_DEVICE);
2141
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002142 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002143 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2144 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2145 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2146 tx_start_bd->nbd = cpu_to_le16(nbd);
2147 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2148 pkt_size = tx_start_bd->nbytes;
2149
2150 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2151 " nbytes %d flags %x vlan %x\n",
2152 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2153 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002154 tx_start_bd->bd_flags.as_bitfield,
2155 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002156
2157 if (xmit_type & XMIT_GSO) {
2158
2159 DP(NETIF_MSG_TX_QUEUED,
2160 "TSO packet len %d hlen %d total len %d tso size %d\n",
2161 skb->len, hlen, skb_headlen(skb),
2162 skb_shinfo(skb)->gso_size);
2163
2164 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2165
2166 if (unlikely(skb_headlen(skb) > hlen))
2167 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2168 hlen, bd_prod, ++nbd);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002169 if (CHIP_IS_E2(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002170 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2171 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002172 else
2173 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002174 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002175
2176 /* Set the PBD's parsing_data field if not zero
2177 * (for the chips newer than 57711).
2178 */
2179 if (pbd_e2_parsing_data)
2180 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2181
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002182 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2183
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002184 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002185 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2186 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2187
2188 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2189 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2190 if (total_pkt_bd == NULL)
2191 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2192
2193 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2194 frag->page_offset,
2195 frag->size, DMA_TO_DEVICE);
2196
2197 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2198 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2199 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2200 le16_add_cpu(&pkt_size, frag->size);
2201
2202 DP(NETIF_MSG_TX_QUEUED,
2203 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2204 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2205 le16_to_cpu(tx_data_bd->nbytes));
2206 }
2207
2208 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2209
2210 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2211
2212 /* now send a tx doorbell, counting the next BD
2213 * if the packet contains or ends with it
2214 */
2215 if (TX_BD_POFF(bd_prod) < nbd)
2216 nbd++;
2217
2218 if (total_pkt_bd != NULL)
2219 total_pkt_bd->total_pkt_bytes = pkt_size;
2220
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002221 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002222 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002223 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002224 " tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002225 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2226 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2227 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2228 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002229 if (pbd_e2)
2230 DP(NETIF_MSG_TX_QUEUED,
2231 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2232 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2233 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2234 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2235 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002236 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2237
2238 /*
2239 * Make sure that the BD data is updated before updating the producer
2240 * since FW might read the BD right after the producer is updated.
2241 * This is only applicable for weak-ordered memory model archs such
2242 * as IA-64. The following barrier is also mandatory since FW will
2243 * assumes packets must have BDs.
2244 */
2245 wmb();
2246
2247 fp->tx_db.data.prod += nbd;
2248 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002249
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002250 DOORBELL(bp, fp->cid, fp->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002251
2252 mmiowb();
2253
2254 fp->tx_bd_prod += nbd;
2255
2256 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2257 netif_tx_stop_queue(txq);
2258
2259 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2260 * ordering of set_bit() in netif_tx_stop_queue() and read of
2261 * fp->bd_tx_cons */
2262 smp_mb();
2263
2264 fp->eth_q_stats.driver_xoff++;
2265 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2266 netif_tx_wake_queue(txq);
2267 }
2268 fp->tx_pkt++;
2269
2270 return NETDEV_TX_OK;
2271}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002272
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002273/* called with rtnl_lock */
2274int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2275{
2276 struct sockaddr *addr = p;
2277 struct bnx2x *bp = netdev_priv(dev);
2278
2279 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2280 return -EINVAL;
2281
2282 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002283 if (netif_running(dev))
2284 bnx2x_set_eth_mac(bp, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002285
2286 return 0;
2287}
2288
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002289
stephen hemminger8d962862010-10-21 07:50:56 +00002290static int bnx2x_setup_irqs(struct bnx2x *bp)
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002291{
2292 int rc = 0;
2293 if (bp->flags & USING_MSIX_FLAG) {
2294 rc = bnx2x_req_msix_irqs(bp);
2295 if (rc)
2296 return rc;
2297 } else {
2298 bnx2x_ack_int(bp);
2299 rc = bnx2x_req_irq(bp);
2300 if (rc) {
2301 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2302 return rc;
2303 }
2304 if (bp->flags & USING_MSI_FLAG) {
2305 bp->dev->irq = bp->pdev->irq;
2306 netdev_info(bp->dev, "using MSI IRQ %d\n",
2307 bp->pdev->irq);
2308 }
2309 }
2310
2311 return 0;
2312}
2313
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002314void bnx2x_free_mem_bp(struct bnx2x *bp)
2315{
2316 kfree(bp->fp);
2317 kfree(bp->msix_table);
2318 kfree(bp->ilt);
2319}
2320
2321int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2322{
2323 struct bnx2x_fastpath *fp;
2324 struct msix_entry *tbl;
2325 struct bnx2x_ilt *ilt;
2326
2327 /* fp array */
2328 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2329 if (!fp)
2330 goto alloc_err;
2331 bp->fp = fp;
2332
2333 /* msix table */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002334 tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002335 GFP_KERNEL);
2336 if (!tbl)
2337 goto alloc_err;
2338 bp->msix_table = tbl;
2339
2340 /* ilt */
2341 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2342 if (!ilt)
2343 goto alloc_err;
2344 bp->ilt = ilt;
2345
2346 return 0;
2347alloc_err:
2348 bnx2x_free_mem_bp(bp);
2349 return -ENOMEM;
2350
2351}
2352
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002353/* called with rtnl_lock */
2354int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2355{
2356 struct bnx2x *bp = netdev_priv(dev);
2357 int rc = 0;
2358
2359 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2360 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2361 return -EAGAIN;
2362 }
2363
2364 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2365 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2366 return -EINVAL;
2367
2368 /* This does not race with packet allocation
2369 * because the actual alloc size is
2370 * only updated as part of load
2371 */
2372 dev->mtu = new_mtu;
2373
2374 if (netif_running(dev)) {
2375 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2376 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2377 }
2378
2379 return rc;
2380}
2381
2382void bnx2x_tx_timeout(struct net_device *dev)
2383{
2384 struct bnx2x *bp = netdev_priv(dev);
2385
2386#ifdef BNX2X_STOP_ON_ERROR
2387 if (!bp->panic)
2388 bnx2x_panic();
2389#endif
2390 /* This allows the netif to be shutdown gracefully before resetting */
2391 schedule_delayed_work(&bp->reset_task, 0);
2392}
2393
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002394int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2395{
2396 struct net_device *dev = pci_get_drvdata(pdev);
2397 struct bnx2x *bp;
2398
2399 if (!dev) {
2400 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2401 return -ENODEV;
2402 }
2403 bp = netdev_priv(dev);
2404
2405 rtnl_lock();
2406
2407 pci_save_state(pdev);
2408
2409 if (!netif_running(dev)) {
2410 rtnl_unlock();
2411 return 0;
2412 }
2413
2414 netif_device_detach(dev);
2415
2416 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2417
2418 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2419
2420 rtnl_unlock();
2421
2422 return 0;
2423}
2424
2425int bnx2x_resume(struct pci_dev *pdev)
2426{
2427 struct net_device *dev = pci_get_drvdata(pdev);
2428 struct bnx2x *bp;
2429 int rc;
2430
2431 if (!dev) {
2432 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2433 return -ENODEV;
2434 }
2435 bp = netdev_priv(dev);
2436
2437 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2438 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2439 return -EAGAIN;
2440 }
2441
2442 rtnl_lock();
2443
2444 pci_restore_state(pdev);
2445
2446 if (!netif_running(dev)) {
2447 rtnl_unlock();
2448 return 0;
2449 }
2450
2451 bnx2x_set_power_state(bp, PCI_D0);
2452 netif_device_attach(dev);
2453
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002454 /* Since the chip was reset, clear the FW sequence number */
2455 bp->fw_seq = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002456 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2457
2458 rtnl_unlock();
2459
2460 return rc;
2461}