blob: fa12365faec2b398edc5723d0dd615bebd08df12 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000018#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000019#include <linux/if_vlan.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000021#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070022#include <net/ip6_checksum.h>
Dmitry Kravkov6891dd22010-08-03 21:49:40 +000023#include <linux/firmware.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000024#include "bnx2x_cmn.h"
25
Dmitry Kravkov523224a2010-10-06 03:23:26 +000026#include "bnx2x_init.h"
27
stephen hemminger8d962862010-10-21 07:50:56 +000028static int bnx2x_setup_irqs(struct bnx2x *bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000029
30/* free skb in the packet ring at pos idx
31 * return idx of last bd freed
32 */
33static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
34 u16 idx)
35{
36 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
37 struct eth_tx_start_bd *tx_start_bd;
38 struct eth_tx_bd *tx_data_bd;
39 struct sk_buff *skb = tx_buf->skb;
40 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
41 int nbd;
42
43 /* prefetch skb end pointer to speedup dev_kfree_skb() */
44 prefetch(&skb->end);
45
46 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
47 idx, tx_buf, skb);
48
49 /* unmap first bd */
50 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
51 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
52 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +000053 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000054
55 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
56#ifdef BNX2X_STOP_ON_ERROR
57 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
58 BNX2X_ERR("BAD nbd!\n");
59 bnx2x_panic();
60 }
61#endif
62 new_cons = nbd + tx_buf->first_bd;
63
64 /* Get the next bd */
65 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
66
67 /* Skip a parse bd... */
68 --nbd;
69 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
70
71 /* ...and the TSO split header bd since they have no mapping */
72 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
73 --nbd;
74 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
75 }
76
77 /* now free frags */
78 while (nbd > 0) {
79
80 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
81 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
82 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
83 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
84 if (--nbd)
85 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
86 }
87
88 /* release skb */
89 WARN_ON(!skb);
90 dev_kfree_skb(skb);
91 tx_buf->first_bd = 0;
92 tx_buf->skb = NULL;
93
94 return new_cons;
95}
96
97int bnx2x_tx_int(struct bnx2x_fastpath *fp)
98{
99 struct bnx2x *bp = fp->bp;
100 struct netdev_queue *txq;
101 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
102
103#ifdef BNX2X_STOP_ON_ERROR
104 if (unlikely(bp->panic))
105 return -1;
106#endif
107
108 txq = netdev_get_tx_queue(bp->dev, fp->index);
109 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
110 sw_cons = fp->tx_pkt_cons;
111
112 while (sw_cons != hw_cons) {
113 u16 pkt_cons;
114
115 pkt_cons = TX_BD(sw_cons);
116
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000117 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
118 " pkt_cons %u\n",
119 fp->index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000120
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000121 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
122 sw_cons++;
123 }
124
125 fp->tx_pkt_cons = sw_cons;
126 fp->tx_bd_cons = bd_cons;
127
128 /* Need to make the tx_bd_cons update visible to start_xmit()
129 * before checking for netif_tx_queue_stopped(). Without the
130 * memory barrier, there is a small possibility that
131 * start_xmit() will miss it and cause the queue to be stopped
132 * forever.
133 */
134 smp_mb();
135
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000136 if (unlikely(netif_tx_queue_stopped(txq))) {
137 /* Taking tx_lock() is needed to prevent reenabling the queue
138 * while it's empty. This could have happen if rx_action() gets
139 * suspended in bnx2x_tx_int() after the condition before
140 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
141 *
142 * stops the queue->sees fresh tx_bd_cons->releases the queue->
143 * sends some packets consuming the whole queue again->
144 * stops the queue
145 */
146
147 __netif_tx_lock(txq, smp_processor_id());
148
149 if ((netif_tx_queue_stopped(txq)) &&
150 (bp->state == BNX2X_STATE_OPEN) &&
151 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
152 netif_tx_wake_queue(txq);
153
154 __netif_tx_unlock(txq);
155 }
156 return 0;
157}
158
159static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
160 u16 idx)
161{
162 u16 last_max = fp->last_max_sge;
163
164 if (SUB_S16(idx, last_max) > 0)
165 fp->last_max_sge = idx;
166}
167
168static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
169 struct eth_fast_path_rx_cqe *fp_cqe)
170{
171 struct bnx2x *bp = fp->bp;
172 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
173 le16_to_cpu(fp_cqe->len_on_bd)) >>
174 SGE_PAGE_SHIFT;
175 u16 last_max, last_elem, first_elem;
176 u16 delta = 0;
177 u16 i;
178
179 if (!sge_len)
180 return;
181
182 /* First mark all used pages */
183 for (i = 0; i < sge_len; i++)
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000184 SGE_MASK_CLEAR_BIT(fp,
185 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000186
187 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000188 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000189
190 /* Here we assume that the last SGE index is the biggest */
191 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000192 bnx2x_update_last_max_sge(fp,
193 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000194
195 last_max = RX_SGE(fp->last_max_sge);
196 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
197 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
198
199 /* If ring is not full */
200 if (last_elem + 1 != first_elem)
201 last_elem++;
202
203 /* Now update the prod */
204 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
205 if (likely(fp->sge_mask[i]))
206 break;
207
208 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
209 delta += RX_SGE_MASK_ELEM_SZ;
210 }
211
212 if (delta > 0) {
213 fp->rx_sge_prod += delta;
214 /* clear page-end entries */
215 bnx2x_clear_sge_mask_next_elems(fp);
216 }
217
218 DP(NETIF_MSG_RX_STATUS,
219 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
220 fp->last_max_sge, fp->rx_sge_prod);
221}
222
223static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
224 struct sk_buff *skb, u16 cons, u16 prod)
225{
226 struct bnx2x *bp = fp->bp;
227 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
228 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
229 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
230 dma_addr_t mapping;
231
232 /* move empty skb from pool to prod and map it */
233 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
234 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
235 bp->rx_buf_size, DMA_FROM_DEVICE);
236 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
237
238 /* move partial skb from cons to pool (don't unmap yet) */
239 fp->tpa_pool[queue] = *cons_rx_buf;
240
241 /* mark bin state as start - print error if current state != stop */
242 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
243 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
244
245 fp->tpa_state[queue] = BNX2X_TPA_START;
246
247 /* point prod_bd to new skb */
248 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
249 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
250
251#ifdef BNX2X_STOP_ON_ERROR
252 fp->tpa_queue_used |= (1 << queue);
253#ifdef _ASM_GENERIC_INT_L64_H
254 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
255#else
256 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
257#endif
258 fp->tpa_queue_used);
259#endif
260}
261
262static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
263 struct sk_buff *skb,
264 struct eth_fast_path_rx_cqe *fp_cqe,
265 u16 cqe_idx)
266{
267 struct sw_rx_page *rx_pg, old_rx_pg;
268 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
269 u32 i, frag_len, frag_size, pages;
270 int err;
271 int j;
272
273 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
274 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
275
276 /* This is needed in order to enable forwarding support */
277 if (frag_size)
278 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
279 max(frag_size, (u32)len_on_bd));
280
281#ifdef BNX2X_STOP_ON_ERROR
282 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
283 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
284 pages, cqe_idx);
285 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
286 fp_cqe->pkt_len, len_on_bd);
287 bnx2x_panic();
288 return -EINVAL;
289 }
290#endif
291
292 /* Run through the SGL and compose the fragmented skb */
293 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000294 u16 sge_idx =
295 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000296
297 /* FW gives the indices of the SGE as if the ring is an array
298 (meaning that "next" element will consume 2 indices) */
299 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
300 rx_pg = &fp->rx_page_ring[sge_idx];
301 old_rx_pg = *rx_pg;
302
303 /* If we fail to allocate a substitute page, we simply stop
304 where we are and drop the whole packet */
305 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
306 if (unlikely(err)) {
307 fp->eth_q_stats.rx_skb_alloc_failed++;
308 return err;
309 }
310
311 /* Unmap the page as we r going to pass it to the stack */
312 dma_unmap_page(&bp->pdev->dev,
313 dma_unmap_addr(&old_rx_pg, mapping),
314 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
315
316 /* Add one frag and update the appropriate fields in the skb */
317 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
318
319 skb->data_len += frag_len;
320 skb->truesize += frag_len;
321 skb->len += frag_len;
322
323 frag_size -= frag_len;
324 }
325
326 return 0;
327}
328
329static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
330 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
331 u16 cqe_idx)
332{
333 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
334 struct sk_buff *skb = rx_buf->skb;
335 /* alloc new skb */
336 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
337
338 /* Unmap skb in the pool anyway, as we are going to change
339 pool entry status to BNX2X_TPA_STOP even if new skb allocation
340 fails. */
341 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
342 bp->rx_buf_size, DMA_FROM_DEVICE);
343
344 if (likely(new_skb)) {
345 /* fix ip xsum and give it to the stack */
346 /* (no need to map the new skb) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000347
348 prefetch(skb);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000349 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000350
351#ifdef BNX2X_STOP_ON_ERROR
352 if (pad + len > bp->rx_buf_size) {
353 BNX2X_ERR("skb_put is about to fail... "
354 "pad %d len %d rx_buf_size %d\n",
355 pad, len, bp->rx_buf_size);
356 bnx2x_panic();
357 return;
358 }
359#endif
360
361 skb_reserve(skb, pad);
362 skb_put(skb, len);
363
364 skb->protocol = eth_type_trans(skb, bp->dev);
365 skb->ip_summed = CHECKSUM_UNNECESSARY;
366
367 {
368 struct iphdr *iph;
369
370 iph = (struct iphdr *)skb->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000371 iph->check = 0;
372 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
373 }
374
375 if (!bnx2x_fill_frag_skb(bp, fp, skb,
376 &cqe->fast_path_cqe, cqe_idx)) {
Hao Zheng9bcc0892010-10-20 13:56:11 +0000377 if ((le16_to_cpu(cqe->fast_path_cqe.
378 pars_flags.flags) & PARSING_FLAGS_VLAN))
379 __vlan_hwaccel_put_tag(skb,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000380 le16_to_cpu(cqe->fast_path_cqe.
Hao Zheng9bcc0892010-10-20 13:56:11 +0000381 vlan_tag));
382 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000383 } else {
384 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
385 " - dropping packet!\n");
386 dev_kfree_skb(skb);
387 }
388
389
390 /* put new skb in bin */
391 fp->tpa_pool[queue].skb = new_skb;
392
393 } else {
394 /* else drop the packet and keep the buffer in the bin */
395 DP(NETIF_MSG_RX_STATUS,
396 "Failed to allocate new skb - dropping packet!\n");
397 fp->eth_q_stats.rx_skb_alloc_failed++;
398 }
399
400 fp->tpa_state[queue] = BNX2X_TPA_STOP;
401}
402
403/* Set Toeplitz hash value in the skb using the value from the
404 * CQE (calculated by HW).
405 */
406static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
407 struct sk_buff *skb)
408{
409 /* Set Toeplitz hash from CQE */
410 if ((bp->dev->features & NETIF_F_RXHASH) &&
411 (cqe->fast_path_cqe.status_flags &
412 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
413 skb->rxhash =
414 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
415}
416
417int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
418{
419 struct bnx2x *bp = fp->bp;
420 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
421 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
422 int rx_pkt = 0;
423
424#ifdef BNX2X_STOP_ON_ERROR
425 if (unlikely(bp->panic))
426 return 0;
427#endif
428
429 /* CQ "next element" is of the size of the regular element,
430 that's why it's ok here */
431 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
432 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
433 hw_comp_cons++;
434
435 bd_cons = fp->rx_bd_cons;
436 bd_prod = fp->rx_bd_prod;
437 bd_prod_fw = bd_prod;
438 sw_comp_cons = fp->rx_comp_cons;
439 sw_comp_prod = fp->rx_comp_prod;
440
441 /* Memory barrier necessary as speculative reads of the rx
442 * buffer can be ahead of the index in the status block
443 */
444 rmb();
445
446 DP(NETIF_MSG_RX_STATUS,
447 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
448 fp->index, hw_comp_cons, sw_comp_cons);
449
450 while (sw_comp_cons != hw_comp_cons) {
451 struct sw_rx_bd *rx_buf = NULL;
452 struct sk_buff *skb;
453 union eth_rx_cqe *cqe;
454 u8 cqe_fp_flags;
455 u16 len, pad;
456
457 comp_ring_cons = RCQ_BD(sw_comp_cons);
458 bd_prod = RX_BD(bd_prod);
459 bd_cons = RX_BD(bd_cons);
460
461 /* Prefetch the page containing the BD descriptor
462 at producer's index. It will be needed when new skb is
463 allocated */
464 prefetch((void *)(PAGE_ALIGN((unsigned long)
465 (&fp->rx_desc_ring[bd_prod])) -
466 PAGE_SIZE + 1));
467
468 cqe = &fp->rx_comp_ring[comp_ring_cons];
469 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
470
471 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
472 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
473 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
474 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
475 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
476 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
477
478 /* is this a slowpath msg? */
479 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
480 bnx2x_sp_event(fp, cqe);
481 goto next_cqe;
482
483 /* this is an rx packet */
484 } else {
485 rx_buf = &fp->rx_buf_ring[bd_cons];
486 skb = rx_buf->skb;
487 prefetch(skb);
488 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
489 pad = cqe->fast_path_cqe.placement_offset;
490
Vladislav Zolotarovfe78d262010-10-17 23:02:20 +0000491 /* - If CQE is marked both TPA_START and TPA_END it is
492 * a non-TPA CQE.
493 * - FP CQE will always have either TPA_START or/and
494 * TPA_STOP flags set.
495 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000496 if ((!fp->disable_tpa) &&
497 (TPA_TYPE(cqe_fp_flags) !=
498 (TPA_TYPE_START | TPA_TYPE_END))) {
499 u16 queue = cqe->fast_path_cqe.queue_index;
500
501 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
502 DP(NETIF_MSG_RX_STATUS,
503 "calling tpa_start on queue %d\n",
504 queue);
505
506 bnx2x_tpa_start(fp, queue, skb,
507 bd_cons, bd_prod);
508
509 /* Set Toeplitz hash for an LRO skb */
510 bnx2x_set_skb_rxhash(bp, cqe, skb);
511
512 goto next_rx;
Vladislav Zolotarovfe78d262010-10-17 23:02:20 +0000513 } else { /* TPA_STOP */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000514 DP(NETIF_MSG_RX_STATUS,
515 "calling tpa_stop on queue %d\n",
516 queue);
517
518 if (!BNX2X_RX_SUM_FIX(cqe))
519 BNX2X_ERR("STOP on none TCP "
520 "data\n");
521
522 /* This is a size of the linear data
523 on this skb */
524 len = le16_to_cpu(cqe->fast_path_cqe.
525 len_on_bd);
526 bnx2x_tpa_stop(bp, fp, queue, pad,
527 len, cqe, comp_ring_cons);
528#ifdef BNX2X_STOP_ON_ERROR
529 if (bp->panic)
530 return 0;
531#endif
532
533 bnx2x_update_sge_prod(fp,
534 &cqe->fast_path_cqe);
535 goto next_cqe;
536 }
537 }
538
539 dma_sync_single_for_device(&bp->pdev->dev,
540 dma_unmap_addr(rx_buf, mapping),
541 pad + RX_COPY_THRESH,
542 DMA_FROM_DEVICE);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000543 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000544
545 /* is this an error packet? */
546 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
547 DP(NETIF_MSG_RX_ERR,
548 "ERROR flags %x rx packet %u\n",
549 cqe_fp_flags, sw_comp_cons);
550 fp->eth_q_stats.rx_err_discard_pkt++;
551 goto reuse_rx;
552 }
553
554 /* Since we don't have a jumbo ring
555 * copy small packets if mtu > 1500
556 */
557 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
558 (len <= RX_COPY_THRESH)) {
559 struct sk_buff *new_skb;
560
561 new_skb = netdev_alloc_skb(bp->dev,
562 len + pad);
563 if (new_skb == NULL) {
564 DP(NETIF_MSG_RX_ERR,
565 "ERROR packet dropped "
566 "because of alloc failure\n");
567 fp->eth_q_stats.rx_skb_alloc_failed++;
568 goto reuse_rx;
569 }
570
571 /* aligned copy */
572 skb_copy_from_linear_data_offset(skb, pad,
573 new_skb->data + pad, len);
574 skb_reserve(new_skb, pad);
575 skb_put(new_skb, len);
576
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000577 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000578
579 skb = new_skb;
580
581 } else
582 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
583 dma_unmap_single(&bp->pdev->dev,
584 dma_unmap_addr(rx_buf, mapping),
585 bp->rx_buf_size,
586 DMA_FROM_DEVICE);
587 skb_reserve(skb, pad);
588 skb_put(skb, len);
589
590 } else {
591 DP(NETIF_MSG_RX_ERR,
592 "ERROR packet dropped because "
593 "of alloc failure\n");
594 fp->eth_q_stats.rx_skb_alloc_failed++;
595reuse_rx:
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000596 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000597 goto next_rx;
598 }
599
600 skb->protocol = eth_type_trans(skb, bp->dev);
601
602 /* Set Toeplitz hash for a none-LRO skb */
603 bnx2x_set_skb_rxhash(bp, cqe, skb);
604
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700605 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000606
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000607 if (bp->rx_csum) {
608 if (likely(BNX2X_RX_CSUM_OK(cqe)))
609 skb->ip_summed = CHECKSUM_UNNECESSARY;
610 else
611 fp->eth_q_stats.hw_csum_err++;
612 }
613 }
614
615 skb_record_rx_queue(skb, fp->index);
616
Hao Zheng9bcc0892010-10-20 13:56:11 +0000617 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
618 PARSING_FLAGS_VLAN)
619 __vlan_hwaccel_put_tag(skb,
620 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
621 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000622
623
624next_rx:
625 rx_buf->skb = NULL;
626
627 bd_cons = NEXT_RX_IDX(bd_cons);
628 bd_prod = NEXT_RX_IDX(bd_prod);
629 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
630 rx_pkt++;
631next_cqe:
632 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
633 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
634
635 if (rx_pkt == budget)
636 break;
637 } /* while */
638
639 fp->rx_bd_cons = bd_cons;
640 fp->rx_bd_prod = bd_prod_fw;
641 fp->rx_comp_cons = sw_comp_cons;
642 fp->rx_comp_prod = sw_comp_prod;
643
644 /* Update producers */
645 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
646 fp->rx_sge_prod);
647
648 fp->rx_pkt += rx_pkt;
649 fp->rx_calls++;
650
651 return rx_pkt;
652}
653
654static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
655{
656 struct bnx2x_fastpath *fp = fp_cookie;
657 struct bnx2x *bp = fp->bp;
658
659 /* Return here if interrupt is disabled */
660 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
661 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
662 return IRQ_HANDLED;
663 }
664
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000665 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
666 "[fp %d fw_sd %d igusb %d]\n",
667 fp->index, fp->fw_sb_id, fp->igu_sb_id);
668 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000669
670#ifdef BNX2X_STOP_ON_ERROR
671 if (unlikely(bp->panic))
672 return IRQ_HANDLED;
673#endif
674
675 /* Handle Rx and Tx according to MSI-X vector */
676 prefetch(fp->rx_cons_sb);
677 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000678 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000679 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
680
681 return IRQ_HANDLED;
682}
683
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000684/* HW Lock for shared dual port PHYs */
685void bnx2x_acquire_phy_lock(struct bnx2x *bp)
686{
687 mutex_lock(&bp->port.phy_mutex);
688
689 if (bp->port.need_hw_lock)
690 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
691}
692
693void bnx2x_release_phy_lock(struct bnx2x *bp)
694{
695 if (bp->port.need_hw_lock)
696 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
697
698 mutex_unlock(&bp->port.phy_mutex);
699}
700
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800701/* calculates MF speed according to current linespeed and MF configuration */
702u16 bnx2x_get_mf_speed(struct bnx2x *bp)
703{
704 u16 line_speed = bp->link_vars.line_speed;
705 if (IS_MF(bp)) {
706 u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
707 FUNC_MF_CFG_MAX_BW_MASK) >>
708 FUNC_MF_CFG_MAX_BW_SHIFT;
709 /* Calculate the current MAX line speed limit for the DCC
710 * capable devices
711 */
712 if (IS_MF_SD(bp)) {
713 u16 vn_max_rate = maxCfg * 100;
714
715 if (vn_max_rate < line_speed)
716 line_speed = vn_max_rate;
717 } else /* IS_MF_SI(bp)) */
718 line_speed = (line_speed * maxCfg) / 100;
719 }
720
721 return line_speed;
722}
723
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000724void bnx2x_link_report(struct bnx2x *bp)
725{
726 if (bp->flags & MF_FUNC_DIS) {
727 netif_carrier_off(bp->dev);
728 netdev_err(bp->dev, "NIC Link is Down\n");
729 return;
730 }
731
732 if (bp->link_vars.link_up) {
733 u16 line_speed;
734
735 if (bp->state == BNX2X_STATE_OPEN)
736 netif_carrier_on(bp->dev);
737 netdev_info(bp->dev, "NIC Link is Up, ");
738
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800739 line_speed = bnx2x_get_mf_speed(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000740
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000741 pr_cont("%d Mbps ", line_speed);
742
743 if (bp->link_vars.duplex == DUPLEX_FULL)
744 pr_cont("full duplex");
745 else
746 pr_cont("half duplex");
747
748 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
749 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
750 pr_cont(", receive ");
751 if (bp->link_vars.flow_ctrl &
752 BNX2X_FLOW_CTRL_TX)
753 pr_cont("& transmit ");
754 } else {
755 pr_cont(", transmit ");
756 }
757 pr_cont("flow control ON");
758 }
759 pr_cont("\n");
760
761 } else { /* link_down */
762 netif_carrier_off(bp->dev);
763 netdev_err(bp->dev, "NIC Link is Down\n");
764 }
765}
766
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000767/* Returns the number of actually allocated BDs */
768static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
769 int rx_ring_size)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000770{
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000771 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000772 u16 ring_prod, cqe_ring_prod;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000773 int i;
774
775 fp->rx_comp_cons = 0;
776 cqe_ring_prod = ring_prod = 0;
777 for (i = 0; i < rx_ring_size; i++) {
778 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
779 BNX2X_ERR("was only able to allocate "
780 "%d rx skbs on queue[%d]\n", i, fp->index);
781 fp->eth_q_stats.rx_skb_alloc_failed++;
782 break;
783 }
784 ring_prod = NEXT_RX_IDX(ring_prod);
785 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
786 WARN_ON(ring_prod <= i);
787 }
788
789 fp->rx_bd_prod = ring_prod;
790 /* Limit the CQE producer by the CQE ring size */
791 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
792 cqe_ring_prod);
793 fp->rx_pkt = fp->rx_calls = 0;
794
795 return i;
796}
797
798static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
799{
800 struct bnx2x *bp = fp->bp;
Dmitry Kravkov25141582010-09-12 05:48:28 +0000801 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
802 MAX_RX_AVAIL/bp->num_queues;
803
804 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000805
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000806 bnx2x_alloc_rx_bds(fp, rx_ring_size);
807
808 /* Warning!
809 * this will generate an interrupt (to the TSTORM)
810 * must only be done after chip is initialized
811 */
812 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
813 fp->rx_sge_prod);
814}
815
816void bnx2x_init_rx_rings(struct bnx2x *bp)
817{
818 int func = BP_FUNC(bp);
819 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
820 ETH_MAX_AGGREGATION_QUEUES_E1H;
821 u16 ring_prod;
822 int i, j;
823
824 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
Dmitry Kravkovc8e4f482010-10-17 23:09:30 +0000825 IP_HEADER_ALIGNMENT_PADDING;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000826
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000827 DP(NETIF_MSG_IFUP,
828 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
829
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000830 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000831 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000832
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000833 if (!fp->disable_tpa) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000834 for (i = 0; i < max_agg_queues; i++) {
835 fp->tpa_pool[i].skb =
836 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
837 if (!fp->tpa_pool[i].skb) {
838 BNX2X_ERR("Failed to allocate TPA "
839 "skb pool for queue[%d] - "
840 "disabling TPA on this "
841 "queue!\n", j);
842 bnx2x_free_tpa_pool(bp, fp, i);
843 fp->disable_tpa = 1;
844 break;
845 }
846 dma_unmap_addr_set((struct sw_rx_bd *)
847 &bp->fp->tpa_pool[i],
848 mapping, 0);
849 fp->tpa_state[i] = BNX2X_TPA_STOP;
850 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000851
852 /* "next page" elements initialization */
853 bnx2x_set_next_page_sgl(fp);
854
855 /* set SGEs bit mask */
856 bnx2x_init_sge_ring_bit_mask(fp);
857
858 /* Allocate SGEs and initialize the ring elements */
859 for (i = 0, ring_prod = 0;
860 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
861
862 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
863 BNX2X_ERR("was only able to allocate "
864 "%d rx sges\n", i);
865 BNX2X_ERR("disabling TPA for"
866 " queue[%d]\n", j);
867 /* Cleanup already allocated elements */
868 bnx2x_free_rx_sge_range(bp,
869 fp, ring_prod);
870 bnx2x_free_tpa_pool(bp,
871 fp, max_agg_queues);
872 fp->disable_tpa = 1;
873 ring_prod = 0;
874 break;
875 }
876 ring_prod = NEXT_SGE_IDX(ring_prod);
877 }
878
879 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000880 }
881 }
882
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000883 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000884 struct bnx2x_fastpath *fp = &bp->fp[j];
885
886 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000887
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000888 bnx2x_set_next_page_rx_bd(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000889
890 /* CQ ring */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000891 bnx2x_set_next_page_rx_cq(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000892
893 /* Allocate BDs and initialize BD ring */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000894 bnx2x_alloc_rx_bd_ring(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000895
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000896 if (j != 0)
897 continue;
898
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000899 if (!CHIP_IS_E2(bp)) {
900 REG_WR(bp, BAR_USTRORM_INTMEM +
901 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
902 U64_LO(fp->rx_comp_mapping));
903 REG_WR(bp, BAR_USTRORM_INTMEM +
904 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
905 U64_HI(fp->rx_comp_mapping));
906 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000907 }
908}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000909
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000910static void bnx2x_free_tx_skbs(struct bnx2x *bp)
911{
912 int i;
913
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000914 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000915 struct bnx2x_fastpath *fp = &bp->fp[i];
916
917 u16 bd_cons = fp->tx_bd_cons;
918 u16 sw_prod = fp->tx_pkt_prod;
919 u16 sw_cons = fp->tx_pkt_cons;
920
921 while (sw_cons != sw_prod) {
922 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
923 sw_cons++;
924 }
925 }
926}
927
928static void bnx2x_free_rx_skbs(struct bnx2x *bp)
929{
930 int i, j;
931
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000932 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000933 struct bnx2x_fastpath *fp = &bp->fp[j];
934
935 for (i = 0; i < NUM_RX_BD; i++) {
936 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
937 struct sk_buff *skb = rx_buf->skb;
938
939 if (skb == NULL)
940 continue;
941
942 dma_unmap_single(&bp->pdev->dev,
943 dma_unmap_addr(rx_buf, mapping),
944 bp->rx_buf_size, DMA_FROM_DEVICE);
945
946 rx_buf->skb = NULL;
947 dev_kfree_skb(skb);
948 }
949 if (!fp->disable_tpa)
950 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
951 ETH_MAX_AGGREGATION_QUEUES_E1 :
952 ETH_MAX_AGGREGATION_QUEUES_E1H);
953 }
954}
955
956void bnx2x_free_skbs(struct bnx2x *bp)
957{
958 bnx2x_free_tx_skbs(bp);
959 bnx2x_free_rx_skbs(bp);
960}
961
962static void bnx2x_free_msix_irqs(struct bnx2x *bp)
963{
964 int i, offset = 1;
965
966 free_irq(bp->msix_table[0].vector, bp->dev);
967 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
968 bp->msix_table[0].vector);
969
970#ifdef BCM_CNIC
971 offset++;
972#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000973 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000974 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
975 "state %x\n", i, bp->msix_table[i + offset].vector,
976 bnx2x_fp(bp, i, state));
977
978 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
979 }
980}
981
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000982void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000983{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000984 if (bp->flags & USING_MSIX_FLAG)
985 bnx2x_free_msix_irqs(bp);
986 else if (bp->flags & USING_MSI_FLAG)
987 free_irq(bp->pdev->irq, bp->dev);
988 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000989 free_irq(bp->pdev->irq, bp->dev);
990}
991
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000992int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000993{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000994 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000995
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000996 bp->msix_table[msix_vec].entry = msix_vec;
997 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
998 bp->msix_table[0].entry);
999 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001000
1001#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001002 bp->msix_table[msix_vec].entry = msix_vec;
1003 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1004 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1005 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001006#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001007 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001008 bp->msix_table[msix_vec].entry = msix_vec;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001009 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001010 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1011 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001012 }
1013
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001014 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001015
1016 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001017
1018 /*
1019 * reconfigure number of tx/rx queues according to available
1020 * MSI-X vectors
1021 */
1022 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001023 /* how less vectors we will have? */
1024 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001025
1026 DP(NETIF_MSG_IFUP,
1027 "Trying to use less MSI-X vectors: %d\n", rc);
1028
1029 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1030
1031 if (rc) {
1032 DP(NETIF_MSG_IFUP,
1033 "MSI-X is not attainable rc %d\n", rc);
1034 return rc;
1035 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001036 /*
1037 * decrease number of queues by number of unallocated entries
1038 */
1039 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001040
1041 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1042 bp->num_queues);
1043 } else if (rc) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001044 /* fall to INTx if not enough memory */
1045 if (rc == -ENOMEM)
1046 bp->flags |= DISABLE_MSI_FLAG;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001047 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1048 return rc;
1049 }
1050
1051 bp->flags |= USING_MSIX_FLAG;
1052
1053 return 0;
1054}
1055
1056static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1057{
1058 int i, rc, offset = 1;
1059
1060 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1061 bp->dev->name, bp->dev);
1062 if (rc) {
1063 BNX2X_ERR("request sp irq failed\n");
1064 return -EBUSY;
1065 }
1066
1067#ifdef BCM_CNIC
1068 offset++;
1069#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001070 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001071 struct bnx2x_fastpath *fp = &bp->fp[i];
1072 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1073 bp->dev->name, i);
1074
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001075 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001076 bnx2x_msix_fp_int, 0, fp->name, fp);
1077 if (rc) {
1078 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1079 bnx2x_free_msix_irqs(bp);
1080 return -EBUSY;
1081 }
1082
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001083 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001084 fp->state = BNX2X_FP_STATE_IRQ;
1085 }
1086
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001087 i = BNX2X_NUM_ETH_QUEUES(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001088 offset = 1 + CNIC_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001089 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1090 " ... fp[%d] %d\n",
1091 bp->msix_table[0].vector,
1092 0, bp->msix_table[offset].vector,
1093 i - 1, bp->msix_table[offset + i - 1].vector);
1094
1095 return 0;
1096}
1097
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001098int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001099{
1100 int rc;
1101
1102 rc = pci_enable_msi(bp->pdev);
1103 if (rc) {
1104 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1105 return -1;
1106 }
1107 bp->flags |= USING_MSI_FLAG;
1108
1109 return 0;
1110}
1111
1112static int bnx2x_req_irq(struct bnx2x *bp)
1113{
1114 unsigned long flags;
1115 int rc;
1116
1117 if (bp->flags & USING_MSI_FLAG)
1118 flags = 0;
1119 else
1120 flags = IRQF_SHARED;
1121
1122 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1123 bp->dev->name, bp->dev);
1124 if (!rc)
1125 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1126
1127 return rc;
1128}
1129
1130static void bnx2x_napi_enable(struct bnx2x *bp)
1131{
1132 int i;
1133
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001134 for_each_napi_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001135 napi_enable(&bnx2x_fp(bp, i, napi));
1136}
1137
1138static void bnx2x_napi_disable(struct bnx2x *bp)
1139{
1140 int i;
1141
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001142 for_each_napi_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001143 napi_disable(&bnx2x_fp(bp, i, napi));
1144}
1145
1146void bnx2x_netif_start(struct bnx2x *bp)
1147{
1148 int intr_sem;
1149
1150 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1151 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1152
1153 if (intr_sem) {
1154 if (netif_running(bp->dev)) {
1155 bnx2x_napi_enable(bp);
1156 bnx2x_int_enable(bp);
1157 if (bp->state == BNX2X_STATE_OPEN)
1158 netif_tx_wake_all_queues(bp->dev);
1159 }
1160 }
1161}
1162
1163void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1164{
1165 bnx2x_int_disable_sync(bp, disable_hw);
1166 bnx2x_napi_disable(bp);
1167 netif_tx_disable(bp->dev);
1168}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001169
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001170void bnx2x_set_num_queues(struct bnx2x *bp)
1171{
1172 switch (bp->multi_mode) {
1173 case ETH_RSS_MODE_DISABLED:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001174 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001175 break;
1176 case ETH_RSS_MODE_REGULAR:
1177 bp->num_queues = bnx2x_calc_num_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001178 break;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001179
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001180 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001181 bp->num_queues = 1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001182 break;
1183 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001184
1185 /* Add special queues */
1186 bp->num_queues += NONE_ETH_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001187}
1188
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001189#ifdef BCM_CNIC
1190static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1191{
1192 if (!NO_FCOE(bp)) {
1193 if (!IS_MF_SD(bp))
1194 bnx2x_set_fip_eth_mac_addr(bp, 1);
1195 bnx2x_set_all_enode_macs(bp, 1);
1196 bp->flags |= FCOE_MACS_SET;
1197 }
1198}
1199#endif
1200
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001201static void bnx2x_release_firmware(struct bnx2x *bp)
1202{
1203 kfree(bp->init_ops_offsets);
1204 kfree(bp->init_ops);
1205 kfree(bp->init_data);
1206 release_firmware(bp->firmware);
1207}
1208
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001209static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1210{
1211 int rc, num = bp->num_queues;
1212
1213#ifdef BCM_CNIC
1214 if (NO_FCOE(bp))
1215 num -= FCOE_CONTEXT_USE;
1216
1217#endif
1218 netif_set_real_num_tx_queues(bp->dev, num);
1219 rc = netif_set_real_num_rx_queues(bp->dev, num);
1220 return rc;
1221}
1222
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001223/* must be called with rtnl_lock */
1224int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1225{
1226 u32 load_code;
1227 int i, rc;
1228
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001229 /* Set init arrays */
1230 rc = bnx2x_init_firmware(bp);
1231 if (rc) {
1232 BNX2X_ERR("Error loading firmware\n");
1233 return rc;
1234 }
1235
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001236#ifdef BNX2X_STOP_ON_ERROR
1237 if (unlikely(bp->panic))
1238 return -EPERM;
1239#endif
1240
1241 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1242
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001243 /* must be called before memory allocation and HW init */
1244 bnx2x_ilt_set_info(bp);
1245
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001246 if (bnx2x_alloc_mem(bp))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001247 return -ENOMEM;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001248
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001249 rc = bnx2x_set_real_num_queues(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001250 if (rc) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001251 BNX2X_ERR("Unable to set real_num_queues\n");
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001252 goto load_error0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001253 }
1254
1255 for_each_queue(bp, i)
1256 bnx2x_fp(bp, i, disable_tpa) =
1257 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1258
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001259#ifdef BCM_CNIC
1260 /* We don't want TPA on FCoE L2 ring */
1261 bnx2x_fcoe(bp, disable_tpa) = 1;
1262#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001263 bnx2x_napi_enable(bp);
1264
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001265 /* Send LOAD_REQUEST command to MCP
1266 Returns the type of LOAD command:
1267 if it is the first port to be initialized
1268 common blocks should be initialized, otherwise - not
1269 */
1270 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001271 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001272 if (!load_code) {
1273 BNX2X_ERR("MCP response failure, aborting\n");
1274 rc = -EBUSY;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001275 goto load_error1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001276 }
1277 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1278 rc = -EBUSY; /* other port in diagnostic mode */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001279 goto load_error1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001280 }
1281
1282 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001283 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001284 int port = BP_PORT(bp);
1285
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001286 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1287 path, load_count[path][0], load_count[path][1],
1288 load_count[path][2]);
1289 load_count[path][0]++;
1290 load_count[path][1 + port]++;
1291 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1292 path, load_count[path][0], load_count[path][1],
1293 load_count[path][2]);
1294 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001295 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001296 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001297 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1298 else
1299 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1300 }
1301
1302 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001303 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001304 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1305 bp->port.pmf = 1;
1306 else
1307 bp->port.pmf = 0;
1308 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1309
1310 /* Initialize HW */
1311 rc = bnx2x_init_hw(bp, load_code);
1312 if (rc) {
1313 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001314 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001315 goto load_error2;
1316 }
1317
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001318 /* Connect to IRQs */
1319 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001320 if (rc) {
1321 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1322 goto load_error2;
1323 }
1324
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001325 /* Setup NIC internals and enable interrupts */
1326 bnx2x_nic_init(bp, load_code);
1327
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001328 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1329 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001330 (bp->common.shmem2_base))
1331 SHMEM2_WR(bp, dcc_support,
1332 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1333 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1334
1335 /* Send LOAD_DONE command to MCP */
1336 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001337 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001338 if (!load_code) {
1339 BNX2X_ERR("MCP response failure, aborting\n");
1340 rc = -EBUSY;
1341 goto load_error3;
1342 }
1343 }
1344
1345 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1346
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001347 rc = bnx2x_func_start(bp);
1348 if (rc) {
1349 BNX2X_ERR("Function start failed!\n");
1350#ifndef BNX2X_STOP_ON_ERROR
1351 goto load_error3;
1352#else
1353 bp->panic = 1;
1354 return -EBUSY;
1355#endif
1356 }
1357
1358 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001359 if (rc) {
1360 BNX2X_ERR("Setup leading failed!\n");
1361#ifndef BNX2X_STOP_ON_ERROR
1362 goto load_error3;
1363#else
1364 bp->panic = 1;
1365 return -EBUSY;
1366#endif
1367 }
1368
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001369 if (!CHIP_IS_E1(bp) &&
1370 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1371 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1372 bp->flags |= MF_FUNC_DIS;
1373 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001374
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001375#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001376 /* Enable Timer scan */
1377 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001378#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001379
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001380 for_each_nondefault_queue(bp, i) {
1381 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1382 if (rc)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001383#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001384 goto load_error4;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001385#else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001386 goto load_error3;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001387#endif
1388 }
1389
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001390 /* Now when Clients are configured we are ready to work */
1391 bp->state = BNX2X_STATE_OPEN;
1392
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001393#ifdef BCM_CNIC
1394 bnx2x_set_fcoe_eth_macs(bp);
1395#endif
1396
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001397 bnx2x_set_eth_mac(bp, 1);
1398
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001399 if (bp->port.pmf)
1400 bnx2x_initial_phy_init(bp, load_mode);
1401
1402 /* Start fast path */
1403 switch (load_mode) {
1404 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001405 /* Tx queue should be only reenabled */
1406 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001407 /* Initialize the receive filter. */
1408 bnx2x_set_rx_mode(bp->dev);
1409 break;
1410
1411 case LOAD_OPEN:
1412 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001413 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001414 /* Initialize the receive filter. */
1415 bnx2x_set_rx_mode(bp->dev);
1416 break;
1417
1418 case LOAD_DIAG:
1419 /* Initialize the receive filter. */
1420 bnx2x_set_rx_mode(bp->dev);
1421 bp->state = BNX2X_STATE_DIAG;
1422 break;
1423
1424 default:
1425 break;
1426 }
1427
1428 if (!bp->port.pmf)
1429 bnx2x__link_status_update(bp);
1430
1431 /* start the timer */
1432 mod_timer(&bp->timer, jiffies + bp->current_interval);
1433
1434#ifdef BCM_CNIC
1435 bnx2x_setup_cnic_irq_info(bp);
1436 if (bp->state == BNX2X_STATE_OPEN)
1437 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1438#endif
1439 bnx2x_inc_load_cnt(bp);
1440
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001441 bnx2x_release_firmware(bp);
1442
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001443 return 0;
1444
1445#ifdef BCM_CNIC
1446load_error4:
1447 /* Disable Timer scan */
1448 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1449#endif
1450load_error3:
1451 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001452
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001453 /* Free SKBs, SGEs, TPA pool and driver internals */
1454 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001455 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001456 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001457
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001458 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001459 bnx2x_free_irq(bp);
1460load_error2:
1461 if (!BP_NOMCP(bp)) {
1462 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1463 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1464 }
1465
1466 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001467load_error1:
1468 bnx2x_napi_disable(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001469load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001470 bnx2x_free_mem(bp);
1471
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001472 bnx2x_release_firmware(bp);
1473
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001474 return rc;
1475}
1476
1477/* must be called with rtnl_lock */
1478int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1479{
1480 int i;
1481
1482 if (bp->state == BNX2X_STATE_CLOSED) {
1483 /* Interface has been removed - nothing to recover */
1484 bp->recovery_state = BNX2X_RECOVERY_DONE;
1485 bp->is_leader = 0;
1486 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1487 smp_wmb();
1488
1489 return -EINVAL;
1490 }
1491
1492#ifdef BCM_CNIC
1493 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1494#endif
1495 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1496
1497 /* Set "drop all" */
1498 bp->rx_mode = BNX2X_RX_MODE_NONE;
1499 bnx2x_set_storm_rx_mode(bp);
1500
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001501 /* Stop Tx */
1502 bnx2x_tx_disable(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001503
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001504 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001505
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001506 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001507 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001508
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001509 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001510
1511 /* Cleanup the chip if needed */
1512 if (unload_mode != UNLOAD_RECOVERY)
1513 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001514 else {
1515 /* Disable HW interrupts, NAPI and Tx */
1516 bnx2x_netif_stop(bp, 1);
1517
1518 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001519 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001520 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001521
1522 bp->port.pmf = 0;
1523
1524 /* Free SKBs, SGEs, TPA pool and driver internals */
1525 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001526 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001527 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001528
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001529 bnx2x_free_mem(bp);
1530
1531 bp->state = BNX2X_STATE_CLOSED;
1532
1533 /* The last driver must disable a "close the gate" if there is no
1534 * parity attention or "process kill" pending.
1535 */
1536 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1537 bnx2x_reset_is_done(bp))
1538 bnx2x_disable_close_the_gate(bp);
1539
1540 /* Reset MCP mail box sequence if there is on going recovery */
1541 if (unload_mode == UNLOAD_RECOVERY)
1542 bp->fw_seq = 0;
1543
1544 return 0;
1545}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001546
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001547int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1548{
1549 u16 pmcsr;
1550
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00001551 /* If there is no power capability, silently succeed */
1552 if (!bp->pm_cap) {
1553 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1554 return 0;
1555 }
1556
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001557 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1558
1559 switch (state) {
1560 case PCI_D0:
1561 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1562 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1563 PCI_PM_CTRL_PME_STATUS));
1564
1565 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1566 /* delay required during transition out of D3hot */
1567 msleep(20);
1568 break;
1569
1570 case PCI_D3hot:
1571 /* If there are other clients above don't
1572 shut down the power */
1573 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1574 return 0;
1575 /* Don't shut down the power for emulation and FPGA */
1576 if (CHIP_REV_IS_SLOW(bp))
1577 return 0;
1578
1579 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1580 pmcsr |= 3;
1581
1582 if (bp->wol)
1583 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1584
1585 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1586 pmcsr);
1587
1588 /* No more memory access after this point until
1589 * device is brought back to D0.
1590 */
1591 break;
1592
1593 default:
1594 return -EINVAL;
1595 }
1596 return 0;
1597}
1598
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001599/*
1600 * net_device service functions
1601 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001602int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001603{
1604 int work_done = 0;
1605 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1606 napi);
1607 struct bnx2x *bp = fp->bp;
1608
1609 while (1) {
1610#ifdef BNX2X_STOP_ON_ERROR
1611 if (unlikely(bp->panic)) {
1612 napi_complete(napi);
1613 return 0;
1614 }
1615#endif
1616
1617 if (bnx2x_has_tx_work(fp))
1618 bnx2x_tx_int(fp);
1619
1620 if (bnx2x_has_rx_work(fp)) {
1621 work_done += bnx2x_rx_int(fp, budget - work_done);
1622
1623 /* must not complete if we consumed full budget */
1624 if (work_done >= budget)
1625 break;
1626 }
1627
1628 /* Fall out from the NAPI loop if needed */
1629 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001630#ifdef BCM_CNIC
1631 /* No need to update SB for FCoE L2 ring as long as
1632 * it's connected to the default SB and the SB
1633 * has been updated when NAPI was scheduled.
1634 */
1635 if (IS_FCOE_FP(fp)) {
1636 napi_complete(napi);
1637 break;
1638 }
1639#endif
1640
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001641 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001642 /* bnx2x_has_rx_work() reads the status block,
1643 * thus we need to ensure that status block indices
1644 * have been actually read (bnx2x_update_fpsb_idx)
1645 * prior to this check (bnx2x_has_rx_work) so that
1646 * we won't write the "newer" value of the status block
1647 * to IGU (if there was a DMA right after
1648 * bnx2x_has_rx_work and if there is no rmb, the memory
1649 * reading (bnx2x_update_fpsb_idx) may be postponed
1650 * to right before bnx2x_ack_sb). In this case there
1651 * will never be another interrupt until there is
1652 * another update of the status block, while there
1653 * is still unhandled work.
1654 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001655 rmb();
1656
1657 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1658 napi_complete(napi);
1659 /* Re-enable interrupts */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001660 DP(NETIF_MSG_HW,
1661 "Update index to %d\n", fp->fp_hc_idx);
1662 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1663 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001664 IGU_INT_ENABLE, 1);
1665 break;
1666 }
1667 }
1668 }
1669
1670 return work_done;
1671}
1672
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001673/* we split the first BD into headers and data BDs
1674 * to ease the pain of our fellow microcode engineers
1675 * we use one mapping for both BDs
1676 * So far this has only been observed to happen
1677 * in Other Operating Systems(TM)
1678 */
1679static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1680 struct bnx2x_fastpath *fp,
1681 struct sw_tx_bd *tx_buf,
1682 struct eth_tx_start_bd **tx_bd, u16 hlen,
1683 u16 bd_prod, int nbd)
1684{
1685 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1686 struct eth_tx_bd *d_tx_bd;
1687 dma_addr_t mapping;
1688 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1689
1690 /* first fix first BD */
1691 h_tx_bd->nbd = cpu_to_le16(nbd);
1692 h_tx_bd->nbytes = cpu_to_le16(hlen);
1693
1694 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1695 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1696 h_tx_bd->addr_lo, h_tx_bd->nbd);
1697
1698 /* now get a new data BD
1699 * (after the pbd) and fill it */
1700 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1701 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1702
1703 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1704 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1705
1706 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1707 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1708 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1709
1710 /* this marks the BD as one that has no individual mapping */
1711 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1712
1713 DP(NETIF_MSG_TX_QUEUED,
1714 "TSO split data size is %d (%x:%x)\n",
1715 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1716
1717 /* update tx_bd */
1718 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1719
1720 return bd_prod;
1721}
1722
1723static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1724{
1725 if (fix > 0)
1726 csum = (u16) ~csum_fold(csum_sub(csum,
1727 csum_partial(t_header - fix, fix, 0)));
1728
1729 else if (fix < 0)
1730 csum = (u16) ~csum_fold(csum_add(csum,
1731 csum_partial(t_header, -fix, 0)));
1732
1733 return swab16(csum);
1734}
1735
1736static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1737{
1738 u32 rc;
1739
1740 if (skb->ip_summed != CHECKSUM_PARTIAL)
1741 rc = XMIT_PLAIN;
1742
1743 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00001744 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001745 rc = XMIT_CSUM_V6;
1746 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1747 rc |= XMIT_CSUM_TCP;
1748
1749 } else {
1750 rc = XMIT_CSUM_V4;
1751 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1752 rc |= XMIT_CSUM_TCP;
1753 }
1754 }
1755
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00001756 if (skb_is_gso_v6(skb))
1757 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1758 else if (skb_is_gso(skb))
1759 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001760
1761 return rc;
1762}
1763
1764#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1765/* check if packet requires linearization (packet is too fragmented)
1766 no need to check fragmentation if page size > 8K (there will be no
1767 violation to FW restrictions) */
1768static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1769 u32 xmit_type)
1770{
1771 int to_copy = 0;
1772 int hlen = 0;
1773 int first_bd_sz = 0;
1774
1775 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1776 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1777
1778 if (xmit_type & XMIT_GSO) {
1779 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1780 /* Check if LSO packet needs to be copied:
1781 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1782 int wnd_size = MAX_FETCH_BD - 3;
1783 /* Number of windows to check */
1784 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1785 int wnd_idx = 0;
1786 int frag_idx = 0;
1787 u32 wnd_sum = 0;
1788
1789 /* Headers length */
1790 hlen = (int)(skb_transport_header(skb) - skb->data) +
1791 tcp_hdrlen(skb);
1792
1793 /* Amount of data (w/o headers) on linear part of SKB*/
1794 first_bd_sz = skb_headlen(skb) - hlen;
1795
1796 wnd_sum = first_bd_sz;
1797
1798 /* Calculate the first sum - it's special */
1799 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1800 wnd_sum +=
1801 skb_shinfo(skb)->frags[frag_idx].size;
1802
1803 /* If there was data on linear skb data - check it */
1804 if (first_bd_sz > 0) {
1805 if (unlikely(wnd_sum < lso_mss)) {
1806 to_copy = 1;
1807 goto exit_lbl;
1808 }
1809
1810 wnd_sum -= first_bd_sz;
1811 }
1812
1813 /* Others are easier: run through the frag list and
1814 check all windows */
1815 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1816 wnd_sum +=
1817 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1818
1819 if (unlikely(wnd_sum < lso_mss)) {
1820 to_copy = 1;
1821 break;
1822 }
1823 wnd_sum -=
1824 skb_shinfo(skb)->frags[wnd_idx].size;
1825 }
1826 } else {
1827 /* in non-LSO too fragmented packet should always
1828 be linearized */
1829 to_copy = 1;
1830 }
1831 }
1832
1833exit_lbl:
1834 if (unlikely(to_copy))
1835 DP(NETIF_MSG_TX_QUEUED,
1836 "Linearization IS REQUIRED for %s packet. "
1837 "num_frags %d hlen %d first_bd_sz %d\n",
1838 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1839 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1840
1841 return to_copy;
1842}
1843#endif
1844
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001845static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
1846 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001847{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001848 *parsing_data |= (skb_shinfo(skb)->gso_size <<
1849 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
1850 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001851 if ((xmit_type & XMIT_GSO_V6) &&
1852 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001853 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001854}
1855
1856/**
1857 * Update PBD in GSO case.
1858 *
1859 * @param skb
1860 * @param tx_start_bd
1861 * @param pbd
1862 * @param xmit_type
1863 */
1864static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1865 struct eth_tx_parse_bd_e1x *pbd,
1866 u32 xmit_type)
1867{
1868 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1869 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1870 pbd->tcp_flags = pbd_tcp_flags(skb);
1871
1872 if (xmit_type & XMIT_GSO_V4) {
1873 pbd->ip_id = swab16(ip_hdr(skb)->id);
1874 pbd->tcp_pseudo_csum =
1875 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1876 ip_hdr(skb)->daddr,
1877 0, IPPROTO_TCP, 0));
1878
1879 } else
1880 pbd->tcp_pseudo_csum =
1881 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1882 &ipv6_hdr(skb)->daddr,
1883 0, IPPROTO_TCP, 0));
1884
1885 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1886}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001887
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001888/**
1889 *
1890 * @param skb
1891 * @param tx_start_bd
1892 * @param pbd_e2
1893 * @param xmit_type
1894 *
1895 * @return header len
1896 */
1897static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001898 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001899{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001900 *parsing_data |= ((tcp_hdrlen(skb)/4) <<
1901 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
1902 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001903
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001904 *parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
1905 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
1906 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001907
1908 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1909}
1910
1911/**
1912 *
1913 * @param skb
1914 * @param tx_start_bd
1915 * @param pbd
1916 * @param xmit_type
1917 *
1918 * @return Header length
1919 */
1920static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1921 struct eth_tx_parse_bd_e1x *pbd,
1922 u32 xmit_type)
1923{
1924 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1925
1926 /* for now NS flag is not used in Linux */
1927 pbd->global_data =
1928 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1929 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1930
1931 pbd->ip_hlen_w = (skb_transport_header(skb) -
1932 skb_network_header(skb)) / 2;
1933
1934 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1935
1936 pbd->total_hlen_w = cpu_to_le16(hlen);
1937 hlen = hlen*2;
1938
1939 if (xmit_type & XMIT_CSUM_TCP) {
1940 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1941
1942 } else {
1943 s8 fix = SKB_CS_OFF(skb); /* signed! */
1944
1945 DP(NETIF_MSG_TX_QUEUED,
1946 "hlen %d fix %d csum before fix %x\n",
1947 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1948
1949 /* HW bug: fixup the CSUM */
1950 pbd->tcp_pseudo_csum =
1951 bnx2x_csum_fix(skb_transport_header(skb),
1952 SKB_CS(skb), fix);
1953
1954 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1955 pbd->tcp_pseudo_csum);
1956 }
1957
1958 return hlen;
1959}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001960
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001961/* called with netif_tx_lock
1962 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1963 * netif_wake_queue()
1964 */
1965netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1966{
1967 struct bnx2x *bp = netdev_priv(dev);
1968 struct bnx2x_fastpath *fp;
1969 struct netdev_queue *txq;
1970 struct sw_tx_bd *tx_buf;
1971 struct eth_tx_start_bd *tx_start_bd;
1972 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001973 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001974 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00001975 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001976 u16 pkt_prod, bd_prod;
1977 int nbd, fp_index;
1978 dma_addr_t mapping;
1979 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1980 int i;
1981 u8 hlen = 0;
1982 __le16 pkt_size = 0;
1983 struct ethhdr *eth;
1984 u8 mac_type = UNICAST_ADDRESS;
1985
1986#ifdef BNX2X_STOP_ON_ERROR
1987 if (unlikely(bp->panic))
1988 return NETDEV_TX_BUSY;
1989#endif
1990
1991 fp_index = skb_get_queue_mapping(skb);
1992 txq = netdev_get_tx_queue(dev, fp_index);
1993
1994 fp = &bp->fp[fp_index];
1995
1996 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1997 fp->eth_q_stats.driver_xoff++;
1998 netif_tx_stop_queue(txq);
1999 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2000 return NETDEV_TX_BUSY;
2001 }
2002
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002003 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2004 "protocol(%x,%x) gso type %x xmit_type %x\n",
2005 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002006 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2007
2008 eth = (struct ethhdr *)skb->data;
2009
2010 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2011 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2012 if (is_broadcast_ether_addr(eth->h_dest))
2013 mac_type = BROADCAST_ADDRESS;
2014 else
2015 mac_type = MULTICAST_ADDRESS;
2016 }
2017
2018#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2019 /* First, check if we need to linearize the skb (due to FW
2020 restrictions). No need to check fragmentation if page size > 8K
2021 (there will be no violation to FW restrictions) */
2022 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2023 /* Statistics of linearization */
2024 bp->lin_cnt++;
2025 if (skb_linearize(skb) != 0) {
2026 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2027 "silently dropping this SKB\n");
2028 dev_kfree_skb_any(skb);
2029 return NETDEV_TX_OK;
2030 }
2031 }
2032#endif
2033
2034 /*
2035 Please read carefully. First we use one BD which we mark as start,
2036 then we have a parsing info BD (used for TSO or xsum),
2037 and only then we have the rest of the TSO BDs.
2038 (don't forget to mark the last one as last,
2039 and to unmap only AFTER you write to the BD ...)
2040 And above all, all pdb sizes are in words - NOT DWORDS!
2041 */
2042
2043 pkt_prod = fp->tx_pkt_prod++;
2044 bd_prod = TX_BD(fp->tx_bd_prod);
2045
2046 /* get a tx_buf and first BD */
2047 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2048 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2049
2050 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002051 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2052 mac_type);
2053
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002054 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002055 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002056
2057 /* remember the first BD of the packet */
2058 tx_buf->first_bd = fp->tx_bd_prod;
2059 tx_buf->skb = skb;
2060 tx_buf->flags = 0;
2061
2062 DP(NETIF_MSG_TX_QUEUED,
2063 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2064 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2065
Jesse Grosseab6d182010-10-20 13:56:03 +00002066 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002067 tx_start_bd->vlan_or_ethertype =
2068 cpu_to_le16(vlan_tx_tag_get(skb));
2069 tx_start_bd->bd_flags.as_bitfield |=
2070 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002071 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002072 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002073
2074 /* turn on parsing and get a BD */
2075 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002076
2077 if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002078 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2079
2080 if (xmit_type & XMIT_CSUM_V4)
2081 tx_start_bd->bd_flags.as_bitfield |=
2082 ETH_TX_BD_FLAGS_IP_CSUM;
2083 else
2084 tx_start_bd->bd_flags.as_bitfield |=
2085 ETH_TX_BD_FLAGS_IPV6;
2086
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002087 if (!(xmit_type & XMIT_CSUM_TCP))
2088 tx_start_bd->bd_flags.as_bitfield |=
2089 ETH_TX_BD_FLAGS_IS_UDP;
2090 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002091
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002092 if (CHIP_IS_E2(bp)) {
2093 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2094 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2095 /* Set PBD in checksum offload case */
2096 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002097 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2098 &pbd_e2_parsing_data,
2099 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002100 } else {
2101 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2102 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2103 /* Set PBD in checksum offload case */
2104 if (xmit_type & XMIT_CSUM)
2105 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002106
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002107 }
2108
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002109 /* Map skb linear data for DMA */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002110 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2111 skb_headlen(skb), DMA_TO_DEVICE);
2112
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002113 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002114 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2115 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2116 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2117 tx_start_bd->nbd = cpu_to_le16(nbd);
2118 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2119 pkt_size = tx_start_bd->nbytes;
2120
2121 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2122 " nbytes %d flags %x vlan %x\n",
2123 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2124 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002125 tx_start_bd->bd_flags.as_bitfield,
2126 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002127
2128 if (xmit_type & XMIT_GSO) {
2129
2130 DP(NETIF_MSG_TX_QUEUED,
2131 "TSO packet len %d hlen %d total len %d tso size %d\n",
2132 skb->len, hlen, skb_headlen(skb),
2133 skb_shinfo(skb)->gso_size);
2134
2135 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2136
2137 if (unlikely(skb_headlen(skb) > hlen))
2138 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2139 hlen, bd_prod, ++nbd);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002140 if (CHIP_IS_E2(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002141 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2142 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002143 else
2144 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002145 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002146
2147 /* Set the PBD's parsing_data field if not zero
2148 * (for the chips newer than 57711).
2149 */
2150 if (pbd_e2_parsing_data)
2151 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2152
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002153 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2154
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002155 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002156 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2157 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2158
2159 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2160 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2161 if (total_pkt_bd == NULL)
2162 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2163
2164 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2165 frag->page_offset,
2166 frag->size, DMA_TO_DEVICE);
2167
2168 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2169 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2170 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2171 le16_add_cpu(&pkt_size, frag->size);
2172
2173 DP(NETIF_MSG_TX_QUEUED,
2174 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2175 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2176 le16_to_cpu(tx_data_bd->nbytes));
2177 }
2178
2179 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2180
2181 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2182
2183 /* now send a tx doorbell, counting the next BD
2184 * if the packet contains or ends with it
2185 */
2186 if (TX_BD_POFF(bd_prod) < nbd)
2187 nbd++;
2188
2189 if (total_pkt_bd != NULL)
2190 total_pkt_bd->total_pkt_bytes = pkt_size;
2191
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002192 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002193 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002194 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002195 " tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002196 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2197 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2198 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2199 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002200 if (pbd_e2)
2201 DP(NETIF_MSG_TX_QUEUED,
2202 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2203 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2204 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2205 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2206 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002207 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2208
2209 /*
2210 * Make sure that the BD data is updated before updating the producer
2211 * since FW might read the BD right after the producer is updated.
2212 * This is only applicable for weak-ordered memory model archs such
2213 * as IA-64. The following barrier is also mandatory since FW will
2214 * assumes packets must have BDs.
2215 */
2216 wmb();
2217
2218 fp->tx_db.data.prod += nbd;
2219 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002220
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002221 DOORBELL(bp, fp->cid, fp->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002222
2223 mmiowb();
2224
2225 fp->tx_bd_prod += nbd;
2226
2227 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2228 netif_tx_stop_queue(txq);
2229
2230 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2231 * ordering of set_bit() in netif_tx_stop_queue() and read of
2232 * fp->bd_tx_cons */
2233 smp_mb();
2234
2235 fp->eth_q_stats.driver_xoff++;
2236 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2237 netif_tx_wake_queue(txq);
2238 }
2239 fp->tx_pkt++;
2240
2241 return NETDEV_TX_OK;
2242}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002243
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002244/* called with rtnl_lock */
2245int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2246{
2247 struct sockaddr *addr = p;
2248 struct bnx2x *bp = netdev_priv(dev);
2249
2250 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2251 return -EINVAL;
2252
2253 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002254 if (netif_running(dev))
2255 bnx2x_set_eth_mac(bp, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002256
2257 return 0;
2258}
2259
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002260
stephen hemminger8d962862010-10-21 07:50:56 +00002261static int bnx2x_setup_irqs(struct bnx2x *bp)
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002262{
2263 int rc = 0;
2264 if (bp->flags & USING_MSIX_FLAG) {
2265 rc = bnx2x_req_msix_irqs(bp);
2266 if (rc)
2267 return rc;
2268 } else {
2269 bnx2x_ack_int(bp);
2270 rc = bnx2x_req_irq(bp);
2271 if (rc) {
2272 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2273 return rc;
2274 }
2275 if (bp->flags & USING_MSI_FLAG) {
2276 bp->dev->irq = bp->pdev->irq;
2277 netdev_info(bp->dev, "using MSI IRQ %d\n",
2278 bp->pdev->irq);
2279 }
2280 }
2281
2282 return 0;
2283}
2284
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002285void bnx2x_free_mem_bp(struct bnx2x *bp)
2286{
2287 kfree(bp->fp);
2288 kfree(bp->msix_table);
2289 kfree(bp->ilt);
2290}
2291
2292int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2293{
2294 struct bnx2x_fastpath *fp;
2295 struct msix_entry *tbl;
2296 struct bnx2x_ilt *ilt;
2297
2298 /* fp array */
2299 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2300 if (!fp)
2301 goto alloc_err;
2302 bp->fp = fp;
2303
2304 /* msix table */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002305 tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002306 GFP_KERNEL);
2307 if (!tbl)
2308 goto alloc_err;
2309 bp->msix_table = tbl;
2310
2311 /* ilt */
2312 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2313 if (!ilt)
2314 goto alloc_err;
2315 bp->ilt = ilt;
2316
2317 return 0;
2318alloc_err:
2319 bnx2x_free_mem_bp(bp);
2320 return -ENOMEM;
2321
2322}
2323
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002324/* called with rtnl_lock */
2325int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2326{
2327 struct bnx2x *bp = netdev_priv(dev);
2328 int rc = 0;
2329
2330 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2331 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2332 return -EAGAIN;
2333 }
2334
2335 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2336 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2337 return -EINVAL;
2338
2339 /* This does not race with packet allocation
2340 * because the actual alloc size is
2341 * only updated as part of load
2342 */
2343 dev->mtu = new_mtu;
2344
2345 if (netif_running(dev)) {
2346 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2347 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2348 }
2349
2350 return rc;
2351}
2352
2353void bnx2x_tx_timeout(struct net_device *dev)
2354{
2355 struct bnx2x *bp = netdev_priv(dev);
2356
2357#ifdef BNX2X_STOP_ON_ERROR
2358 if (!bp->panic)
2359 bnx2x_panic();
2360#endif
2361 /* This allows the netif to be shutdown gracefully before resetting */
2362 schedule_delayed_work(&bp->reset_task, 0);
2363}
2364
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002365int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2366{
2367 struct net_device *dev = pci_get_drvdata(pdev);
2368 struct bnx2x *bp;
2369
2370 if (!dev) {
2371 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2372 return -ENODEV;
2373 }
2374 bp = netdev_priv(dev);
2375
2376 rtnl_lock();
2377
2378 pci_save_state(pdev);
2379
2380 if (!netif_running(dev)) {
2381 rtnl_unlock();
2382 return 0;
2383 }
2384
2385 netif_device_detach(dev);
2386
2387 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2388
2389 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2390
2391 rtnl_unlock();
2392
2393 return 0;
2394}
2395
2396int bnx2x_resume(struct pci_dev *pdev)
2397{
2398 struct net_device *dev = pci_get_drvdata(pdev);
2399 struct bnx2x *bp;
2400 int rc;
2401
2402 if (!dev) {
2403 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2404 return -ENODEV;
2405 }
2406 bp = netdev_priv(dev);
2407
2408 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2409 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2410 return -EAGAIN;
2411 }
2412
2413 rtnl_lock();
2414
2415 pci_restore_state(pdev);
2416
2417 if (!netif_running(dev)) {
2418 rtnl_unlock();
2419 return 0;
2420 }
2421
2422 bnx2x_set_power_state(bp, PCI_D0);
2423 netif_device_attach(dev);
2424
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002425 /* Since the chip was reset, clear the FW sequence number */
2426 bp->fw_seq = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002427 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2428
2429 rtnl_unlock();
2430
2431 return rc;
2432}