blob: bc5837514074d410fb740d57dbe90350376dc11b [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000018#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000019#include <linux/if_vlan.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000021#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070022#include <net/ip6_checksum.h>
Dmitry Kravkov6891dd22010-08-03 21:49:40 +000023#include <linux/firmware.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000024#include "bnx2x_cmn.h"
25
Dmitry Kravkov523224a2010-10-06 03:23:26 +000026#include "bnx2x_init.h"
27
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000028
29/* free skb in the packet ring at pos idx
30 * return idx of last bd freed
31 */
32static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
33 u16 idx)
34{
35 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
36 struct eth_tx_start_bd *tx_start_bd;
37 struct eth_tx_bd *tx_data_bd;
38 struct sk_buff *skb = tx_buf->skb;
39 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
40 int nbd;
41
42 /* prefetch skb end pointer to speedup dev_kfree_skb() */
43 prefetch(&skb->end);
44
45 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
46 idx, tx_buf, skb);
47
48 /* unmap first bd */
49 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
50 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
51 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +000052 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000053
54 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
55#ifdef BNX2X_STOP_ON_ERROR
56 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
57 BNX2X_ERR("BAD nbd!\n");
58 bnx2x_panic();
59 }
60#endif
61 new_cons = nbd + tx_buf->first_bd;
62
63 /* Get the next bd */
64 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
65
66 /* Skip a parse bd... */
67 --nbd;
68 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
69
70 /* ...and the TSO split header bd since they have no mapping */
71 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
72 --nbd;
73 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
74 }
75
76 /* now free frags */
77 while (nbd > 0) {
78
79 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
80 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
81 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
82 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
83 if (--nbd)
84 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
85 }
86
87 /* release skb */
88 WARN_ON(!skb);
89 dev_kfree_skb(skb);
90 tx_buf->first_bd = 0;
91 tx_buf->skb = NULL;
92
93 return new_cons;
94}
95
96int bnx2x_tx_int(struct bnx2x_fastpath *fp)
97{
98 struct bnx2x *bp = fp->bp;
99 struct netdev_queue *txq;
100 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
101
102#ifdef BNX2X_STOP_ON_ERROR
103 if (unlikely(bp->panic))
104 return -1;
105#endif
106
107 txq = netdev_get_tx_queue(bp->dev, fp->index);
108 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
109 sw_cons = fp->tx_pkt_cons;
110
111 while (sw_cons != hw_cons) {
112 u16 pkt_cons;
113
114 pkt_cons = TX_BD(sw_cons);
115
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000116 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
117 " pkt_cons %u\n",
118 fp->index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000119
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000120 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
121 sw_cons++;
122 }
123
124 fp->tx_pkt_cons = sw_cons;
125 fp->tx_bd_cons = bd_cons;
126
127 /* Need to make the tx_bd_cons update visible to start_xmit()
128 * before checking for netif_tx_queue_stopped(). Without the
129 * memory barrier, there is a small possibility that
130 * start_xmit() will miss it and cause the queue to be stopped
131 * forever.
132 */
133 smp_mb();
134
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000135 if (unlikely(netif_tx_queue_stopped(txq))) {
136 /* Taking tx_lock() is needed to prevent reenabling the queue
137 * while it's empty. This could have happen if rx_action() gets
138 * suspended in bnx2x_tx_int() after the condition before
139 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
140 *
141 * stops the queue->sees fresh tx_bd_cons->releases the queue->
142 * sends some packets consuming the whole queue again->
143 * stops the queue
144 */
145
146 __netif_tx_lock(txq, smp_processor_id());
147
148 if ((netif_tx_queue_stopped(txq)) &&
149 (bp->state == BNX2X_STATE_OPEN) &&
150 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
151 netif_tx_wake_queue(txq);
152
153 __netif_tx_unlock(txq);
154 }
155 return 0;
156}
157
158static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
159 u16 idx)
160{
161 u16 last_max = fp->last_max_sge;
162
163 if (SUB_S16(idx, last_max) > 0)
164 fp->last_max_sge = idx;
165}
166
167static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
168 struct eth_fast_path_rx_cqe *fp_cqe)
169{
170 struct bnx2x *bp = fp->bp;
171 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
172 le16_to_cpu(fp_cqe->len_on_bd)) >>
173 SGE_PAGE_SHIFT;
174 u16 last_max, last_elem, first_elem;
175 u16 delta = 0;
176 u16 i;
177
178 if (!sge_len)
179 return;
180
181 /* First mark all used pages */
182 for (i = 0; i < sge_len; i++)
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000183 SGE_MASK_CLEAR_BIT(fp,
184 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000185
186 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000187 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000188
189 /* Here we assume that the last SGE index is the biggest */
190 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000191 bnx2x_update_last_max_sge(fp,
192 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000193
194 last_max = RX_SGE(fp->last_max_sge);
195 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
196 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
197
198 /* If ring is not full */
199 if (last_elem + 1 != first_elem)
200 last_elem++;
201
202 /* Now update the prod */
203 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
204 if (likely(fp->sge_mask[i]))
205 break;
206
207 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
208 delta += RX_SGE_MASK_ELEM_SZ;
209 }
210
211 if (delta > 0) {
212 fp->rx_sge_prod += delta;
213 /* clear page-end entries */
214 bnx2x_clear_sge_mask_next_elems(fp);
215 }
216
217 DP(NETIF_MSG_RX_STATUS,
218 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
219 fp->last_max_sge, fp->rx_sge_prod);
220}
221
222static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
223 struct sk_buff *skb, u16 cons, u16 prod)
224{
225 struct bnx2x *bp = fp->bp;
226 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
227 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
228 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
229 dma_addr_t mapping;
230
231 /* move empty skb from pool to prod and map it */
232 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
233 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
234 bp->rx_buf_size, DMA_FROM_DEVICE);
235 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
236
237 /* move partial skb from cons to pool (don't unmap yet) */
238 fp->tpa_pool[queue] = *cons_rx_buf;
239
240 /* mark bin state as start - print error if current state != stop */
241 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
242 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
243
244 fp->tpa_state[queue] = BNX2X_TPA_START;
245
246 /* point prod_bd to new skb */
247 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
248 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
249
250#ifdef BNX2X_STOP_ON_ERROR
251 fp->tpa_queue_used |= (1 << queue);
252#ifdef _ASM_GENERIC_INT_L64_H
253 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
254#else
255 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
256#endif
257 fp->tpa_queue_used);
258#endif
259}
260
261static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
262 struct sk_buff *skb,
263 struct eth_fast_path_rx_cqe *fp_cqe,
264 u16 cqe_idx)
265{
266 struct sw_rx_page *rx_pg, old_rx_pg;
267 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
268 u32 i, frag_len, frag_size, pages;
269 int err;
270 int j;
271
272 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
273 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
274
275 /* This is needed in order to enable forwarding support */
276 if (frag_size)
277 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
278 max(frag_size, (u32)len_on_bd));
279
280#ifdef BNX2X_STOP_ON_ERROR
281 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
282 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
283 pages, cqe_idx);
284 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
285 fp_cqe->pkt_len, len_on_bd);
286 bnx2x_panic();
287 return -EINVAL;
288 }
289#endif
290
291 /* Run through the SGL and compose the fragmented skb */
292 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000293 u16 sge_idx =
294 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000295
296 /* FW gives the indices of the SGE as if the ring is an array
297 (meaning that "next" element will consume 2 indices) */
298 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
299 rx_pg = &fp->rx_page_ring[sge_idx];
300 old_rx_pg = *rx_pg;
301
302 /* If we fail to allocate a substitute page, we simply stop
303 where we are and drop the whole packet */
304 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
305 if (unlikely(err)) {
306 fp->eth_q_stats.rx_skb_alloc_failed++;
307 return err;
308 }
309
310 /* Unmap the page as we r going to pass it to the stack */
311 dma_unmap_page(&bp->pdev->dev,
312 dma_unmap_addr(&old_rx_pg, mapping),
313 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
314
315 /* Add one frag and update the appropriate fields in the skb */
316 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
317
318 skb->data_len += frag_len;
319 skb->truesize += frag_len;
320 skb->len += frag_len;
321
322 frag_size -= frag_len;
323 }
324
325 return 0;
326}
327
328static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
329 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
330 u16 cqe_idx)
331{
332 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
333 struct sk_buff *skb = rx_buf->skb;
334 /* alloc new skb */
335 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
336
337 /* Unmap skb in the pool anyway, as we are going to change
338 pool entry status to BNX2X_TPA_STOP even if new skb allocation
339 fails. */
340 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
341 bp->rx_buf_size, DMA_FROM_DEVICE);
342
343 if (likely(new_skb)) {
344 /* fix ip xsum and give it to the stack */
345 /* (no need to map the new skb) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000346
347 prefetch(skb);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000348 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000349
350#ifdef BNX2X_STOP_ON_ERROR
351 if (pad + len > bp->rx_buf_size) {
352 BNX2X_ERR("skb_put is about to fail... "
353 "pad %d len %d rx_buf_size %d\n",
354 pad, len, bp->rx_buf_size);
355 bnx2x_panic();
356 return;
357 }
358#endif
359
360 skb_reserve(skb, pad);
361 skb_put(skb, len);
362
363 skb->protocol = eth_type_trans(skb, bp->dev);
364 skb->ip_summed = CHECKSUM_UNNECESSARY;
365
366 {
367 struct iphdr *iph;
368
369 iph = (struct iphdr *)skb->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000370 iph->check = 0;
371 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
372 }
373
374 if (!bnx2x_fill_frag_skb(bp, fp, skb,
375 &cqe->fast_path_cqe, cqe_idx)) {
Hao Zheng9bcc0892010-10-20 13:56:11 +0000376 if ((le16_to_cpu(cqe->fast_path_cqe.
377 pars_flags.flags) & PARSING_FLAGS_VLAN))
378 __vlan_hwaccel_put_tag(skb,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000379 le16_to_cpu(cqe->fast_path_cqe.
Hao Zheng9bcc0892010-10-20 13:56:11 +0000380 vlan_tag));
381 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000382 } else {
383 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
384 " - dropping packet!\n");
385 dev_kfree_skb(skb);
386 }
387
388
389 /* put new skb in bin */
390 fp->tpa_pool[queue].skb = new_skb;
391
392 } else {
393 /* else drop the packet and keep the buffer in the bin */
394 DP(NETIF_MSG_RX_STATUS,
395 "Failed to allocate new skb - dropping packet!\n");
396 fp->eth_q_stats.rx_skb_alloc_failed++;
397 }
398
399 fp->tpa_state[queue] = BNX2X_TPA_STOP;
400}
401
402/* Set Toeplitz hash value in the skb using the value from the
403 * CQE (calculated by HW).
404 */
405static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
406 struct sk_buff *skb)
407{
408 /* Set Toeplitz hash from CQE */
409 if ((bp->dev->features & NETIF_F_RXHASH) &&
410 (cqe->fast_path_cqe.status_flags &
411 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
412 skb->rxhash =
413 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
414}
415
416int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
417{
418 struct bnx2x *bp = fp->bp;
419 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
420 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
421 int rx_pkt = 0;
422
423#ifdef BNX2X_STOP_ON_ERROR
424 if (unlikely(bp->panic))
425 return 0;
426#endif
427
428 /* CQ "next element" is of the size of the regular element,
429 that's why it's ok here */
430 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
431 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
432 hw_comp_cons++;
433
434 bd_cons = fp->rx_bd_cons;
435 bd_prod = fp->rx_bd_prod;
436 bd_prod_fw = bd_prod;
437 sw_comp_cons = fp->rx_comp_cons;
438 sw_comp_prod = fp->rx_comp_prod;
439
440 /* Memory barrier necessary as speculative reads of the rx
441 * buffer can be ahead of the index in the status block
442 */
443 rmb();
444
445 DP(NETIF_MSG_RX_STATUS,
446 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
447 fp->index, hw_comp_cons, sw_comp_cons);
448
449 while (sw_comp_cons != hw_comp_cons) {
450 struct sw_rx_bd *rx_buf = NULL;
451 struct sk_buff *skb;
452 union eth_rx_cqe *cqe;
453 u8 cqe_fp_flags;
454 u16 len, pad;
455
456 comp_ring_cons = RCQ_BD(sw_comp_cons);
457 bd_prod = RX_BD(bd_prod);
458 bd_cons = RX_BD(bd_cons);
459
460 /* Prefetch the page containing the BD descriptor
461 at producer's index. It will be needed when new skb is
462 allocated */
463 prefetch((void *)(PAGE_ALIGN((unsigned long)
464 (&fp->rx_desc_ring[bd_prod])) -
465 PAGE_SIZE + 1));
466
467 cqe = &fp->rx_comp_ring[comp_ring_cons];
468 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
469
470 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
471 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
472 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
473 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
474 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
475 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
476
477 /* is this a slowpath msg? */
478 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
479 bnx2x_sp_event(fp, cqe);
480 goto next_cqe;
481
482 /* this is an rx packet */
483 } else {
484 rx_buf = &fp->rx_buf_ring[bd_cons];
485 skb = rx_buf->skb;
486 prefetch(skb);
487 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
488 pad = cqe->fast_path_cqe.placement_offset;
489
Vladislav Zolotarovfe78d262010-10-17 23:02:20 +0000490 /* - If CQE is marked both TPA_START and TPA_END it is
491 * a non-TPA CQE.
492 * - FP CQE will always have either TPA_START or/and
493 * TPA_STOP flags set.
494 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000495 if ((!fp->disable_tpa) &&
496 (TPA_TYPE(cqe_fp_flags) !=
497 (TPA_TYPE_START | TPA_TYPE_END))) {
498 u16 queue = cqe->fast_path_cqe.queue_index;
499
500 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
501 DP(NETIF_MSG_RX_STATUS,
502 "calling tpa_start on queue %d\n",
503 queue);
504
505 bnx2x_tpa_start(fp, queue, skb,
506 bd_cons, bd_prod);
507
508 /* Set Toeplitz hash for an LRO skb */
509 bnx2x_set_skb_rxhash(bp, cqe, skb);
510
511 goto next_rx;
Vladislav Zolotarovfe78d262010-10-17 23:02:20 +0000512 } else { /* TPA_STOP */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000513 DP(NETIF_MSG_RX_STATUS,
514 "calling tpa_stop on queue %d\n",
515 queue);
516
517 if (!BNX2X_RX_SUM_FIX(cqe))
518 BNX2X_ERR("STOP on none TCP "
519 "data\n");
520
521 /* This is a size of the linear data
522 on this skb */
523 len = le16_to_cpu(cqe->fast_path_cqe.
524 len_on_bd);
525 bnx2x_tpa_stop(bp, fp, queue, pad,
526 len, cqe, comp_ring_cons);
527#ifdef BNX2X_STOP_ON_ERROR
528 if (bp->panic)
529 return 0;
530#endif
531
532 bnx2x_update_sge_prod(fp,
533 &cqe->fast_path_cqe);
534 goto next_cqe;
535 }
536 }
537
538 dma_sync_single_for_device(&bp->pdev->dev,
539 dma_unmap_addr(rx_buf, mapping),
540 pad + RX_COPY_THRESH,
541 DMA_FROM_DEVICE);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000542 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000543
544 /* is this an error packet? */
545 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
546 DP(NETIF_MSG_RX_ERR,
547 "ERROR flags %x rx packet %u\n",
548 cqe_fp_flags, sw_comp_cons);
549 fp->eth_q_stats.rx_err_discard_pkt++;
550 goto reuse_rx;
551 }
552
553 /* Since we don't have a jumbo ring
554 * copy small packets if mtu > 1500
555 */
556 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
557 (len <= RX_COPY_THRESH)) {
558 struct sk_buff *new_skb;
559
560 new_skb = netdev_alloc_skb(bp->dev,
561 len + pad);
562 if (new_skb == NULL) {
563 DP(NETIF_MSG_RX_ERR,
564 "ERROR packet dropped "
565 "because of alloc failure\n");
566 fp->eth_q_stats.rx_skb_alloc_failed++;
567 goto reuse_rx;
568 }
569
570 /* aligned copy */
571 skb_copy_from_linear_data_offset(skb, pad,
572 new_skb->data + pad, len);
573 skb_reserve(new_skb, pad);
574 skb_put(new_skb, len);
575
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000576 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000577
578 skb = new_skb;
579
580 } else
581 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
582 dma_unmap_single(&bp->pdev->dev,
583 dma_unmap_addr(rx_buf, mapping),
584 bp->rx_buf_size,
585 DMA_FROM_DEVICE);
586 skb_reserve(skb, pad);
587 skb_put(skb, len);
588
589 } else {
590 DP(NETIF_MSG_RX_ERR,
591 "ERROR packet dropped because "
592 "of alloc failure\n");
593 fp->eth_q_stats.rx_skb_alloc_failed++;
594reuse_rx:
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000595 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000596 goto next_rx;
597 }
598
599 skb->protocol = eth_type_trans(skb, bp->dev);
600
601 /* Set Toeplitz hash for a none-LRO skb */
602 bnx2x_set_skb_rxhash(bp, cqe, skb);
603
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700604 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000605
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000606 if (bp->rx_csum) {
607 if (likely(BNX2X_RX_CSUM_OK(cqe)))
608 skb->ip_summed = CHECKSUM_UNNECESSARY;
609 else
610 fp->eth_q_stats.hw_csum_err++;
611 }
612 }
613
614 skb_record_rx_queue(skb, fp->index);
615
Hao Zheng9bcc0892010-10-20 13:56:11 +0000616 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
617 PARSING_FLAGS_VLAN)
618 __vlan_hwaccel_put_tag(skb,
619 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
620 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000621
622
623next_rx:
624 rx_buf->skb = NULL;
625
626 bd_cons = NEXT_RX_IDX(bd_cons);
627 bd_prod = NEXT_RX_IDX(bd_prod);
628 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
629 rx_pkt++;
630next_cqe:
631 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
632 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
633
634 if (rx_pkt == budget)
635 break;
636 } /* while */
637
638 fp->rx_bd_cons = bd_cons;
639 fp->rx_bd_prod = bd_prod_fw;
640 fp->rx_comp_cons = sw_comp_cons;
641 fp->rx_comp_prod = sw_comp_prod;
642
643 /* Update producers */
644 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
645 fp->rx_sge_prod);
646
647 fp->rx_pkt += rx_pkt;
648 fp->rx_calls++;
649
650 return rx_pkt;
651}
652
653static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
654{
655 struct bnx2x_fastpath *fp = fp_cookie;
656 struct bnx2x *bp = fp->bp;
657
658 /* Return here if interrupt is disabled */
659 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
660 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
661 return IRQ_HANDLED;
662 }
663
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000664 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
665 "[fp %d fw_sd %d igusb %d]\n",
666 fp->index, fp->fw_sb_id, fp->igu_sb_id);
667 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000668
669#ifdef BNX2X_STOP_ON_ERROR
670 if (unlikely(bp->panic))
671 return IRQ_HANDLED;
672#endif
673
674 /* Handle Rx and Tx according to MSI-X vector */
675 prefetch(fp->rx_cons_sb);
676 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000677 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000678 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
679
680 return IRQ_HANDLED;
681}
682
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000683/* HW Lock for shared dual port PHYs */
684void bnx2x_acquire_phy_lock(struct bnx2x *bp)
685{
686 mutex_lock(&bp->port.phy_mutex);
687
688 if (bp->port.need_hw_lock)
689 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
690}
691
692void bnx2x_release_phy_lock(struct bnx2x *bp)
693{
694 if (bp->port.need_hw_lock)
695 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
696
697 mutex_unlock(&bp->port.phy_mutex);
698}
699
700void bnx2x_link_report(struct bnx2x *bp)
701{
702 if (bp->flags & MF_FUNC_DIS) {
703 netif_carrier_off(bp->dev);
704 netdev_err(bp->dev, "NIC Link is Down\n");
705 return;
706 }
707
708 if (bp->link_vars.link_up) {
709 u16 line_speed;
710
711 if (bp->state == BNX2X_STATE_OPEN)
712 netif_carrier_on(bp->dev);
713 netdev_info(bp->dev, "NIC Link is Up, ");
714
715 line_speed = bp->link_vars.line_speed;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +0000716 if (IS_MF(bp)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000717 u16 vn_max_rate;
718
719 vn_max_rate =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000720 ((bp->mf_config[BP_VN(bp)] &
721 FUNC_MF_CFG_MAX_BW_MASK) >>
722 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000723 if (vn_max_rate < line_speed)
724 line_speed = vn_max_rate;
725 }
726 pr_cont("%d Mbps ", line_speed);
727
728 if (bp->link_vars.duplex == DUPLEX_FULL)
729 pr_cont("full duplex");
730 else
731 pr_cont("half duplex");
732
733 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
734 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
735 pr_cont(", receive ");
736 if (bp->link_vars.flow_ctrl &
737 BNX2X_FLOW_CTRL_TX)
738 pr_cont("& transmit ");
739 } else {
740 pr_cont(", transmit ");
741 }
742 pr_cont("flow control ON");
743 }
744 pr_cont("\n");
745
746 } else { /* link_down */
747 netif_carrier_off(bp->dev);
748 netdev_err(bp->dev, "NIC Link is Down\n");
749 }
750}
751
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000752/* Returns the number of actually allocated BDs */
753static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
754 int rx_ring_size)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000755{
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000756 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000757 u16 ring_prod, cqe_ring_prod;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000758 int i;
759
760 fp->rx_comp_cons = 0;
761 cqe_ring_prod = ring_prod = 0;
762 for (i = 0; i < rx_ring_size; i++) {
763 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
764 BNX2X_ERR("was only able to allocate "
765 "%d rx skbs on queue[%d]\n", i, fp->index);
766 fp->eth_q_stats.rx_skb_alloc_failed++;
767 break;
768 }
769 ring_prod = NEXT_RX_IDX(ring_prod);
770 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
771 WARN_ON(ring_prod <= i);
772 }
773
774 fp->rx_bd_prod = ring_prod;
775 /* Limit the CQE producer by the CQE ring size */
776 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
777 cqe_ring_prod);
778 fp->rx_pkt = fp->rx_calls = 0;
779
780 return i;
781}
782
783static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
784{
785 struct bnx2x *bp = fp->bp;
Dmitry Kravkov25141582010-09-12 05:48:28 +0000786 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
787 MAX_RX_AVAIL/bp->num_queues;
788
789 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000790
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000791 bnx2x_alloc_rx_bds(fp, rx_ring_size);
792
793 /* Warning!
794 * this will generate an interrupt (to the TSTORM)
795 * must only be done after chip is initialized
796 */
797 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
798 fp->rx_sge_prod);
799}
800
801void bnx2x_init_rx_rings(struct bnx2x *bp)
802{
803 int func = BP_FUNC(bp);
804 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
805 ETH_MAX_AGGREGATION_QUEUES_E1H;
806 u16 ring_prod;
807 int i, j;
808
809 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
Dmitry Kravkovc8e4f482010-10-17 23:09:30 +0000810 IP_HEADER_ALIGNMENT_PADDING;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000811
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000812 DP(NETIF_MSG_IFUP,
813 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
814
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000815 for_each_queue(bp, j) {
816 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000817
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000818 if (!fp->disable_tpa) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000819 for (i = 0; i < max_agg_queues; i++) {
820 fp->tpa_pool[i].skb =
821 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
822 if (!fp->tpa_pool[i].skb) {
823 BNX2X_ERR("Failed to allocate TPA "
824 "skb pool for queue[%d] - "
825 "disabling TPA on this "
826 "queue!\n", j);
827 bnx2x_free_tpa_pool(bp, fp, i);
828 fp->disable_tpa = 1;
829 break;
830 }
831 dma_unmap_addr_set((struct sw_rx_bd *)
832 &bp->fp->tpa_pool[i],
833 mapping, 0);
834 fp->tpa_state[i] = BNX2X_TPA_STOP;
835 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000836
837 /* "next page" elements initialization */
838 bnx2x_set_next_page_sgl(fp);
839
840 /* set SGEs bit mask */
841 bnx2x_init_sge_ring_bit_mask(fp);
842
843 /* Allocate SGEs and initialize the ring elements */
844 for (i = 0, ring_prod = 0;
845 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
846
847 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
848 BNX2X_ERR("was only able to allocate "
849 "%d rx sges\n", i);
850 BNX2X_ERR("disabling TPA for"
851 " queue[%d]\n", j);
852 /* Cleanup already allocated elements */
853 bnx2x_free_rx_sge_range(bp,
854 fp, ring_prod);
855 bnx2x_free_tpa_pool(bp,
856 fp, max_agg_queues);
857 fp->disable_tpa = 1;
858 ring_prod = 0;
859 break;
860 }
861 ring_prod = NEXT_SGE_IDX(ring_prod);
862 }
863
864 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000865 }
866 }
867
868 for_each_queue(bp, j) {
869 struct bnx2x_fastpath *fp = &bp->fp[j];
870
871 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000872
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000873 bnx2x_set_next_page_rx_bd(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000874
875 /* CQ ring */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000876 bnx2x_set_next_page_rx_cq(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000877
878 /* Allocate BDs and initialize BD ring */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000879 bnx2x_alloc_rx_bd_ring(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000880
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000881 if (j != 0)
882 continue;
883
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000884 if (!CHIP_IS_E2(bp)) {
885 REG_WR(bp, BAR_USTRORM_INTMEM +
886 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
887 U64_LO(fp->rx_comp_mapping));
888 REG_WR(bp, BAR_USTRORM_INTMEM +
889 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
890 U64_HI(fp->rx_comp_mapping));
891 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000892 }
893}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000894
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000895static void bnx2x_free_tx_skbs(struct bnx2x *bp)
896{
897 int i;
898
899 for_each_queue(bp, i) {
900 struct bnx2x_fastpath *fp = &bp->fp[i];
901
902 u16 bd_cons = fp->tx_bd_cons;
903 u16 sw_prod = fp->tx_pkt_prod;
904 u16 sw_cons = fp->tx_pkt_cons;
905
906 while (sw_cons != sw_prod) {
907 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
908 sw_cons++;
909 }
910 }
911}
912
913static void bnx2x_free_rx_skbs(struct bnx2x *bp)
914{
915 int i, j;
916
917 for_each_queue(bp, j) {
918 struct bnx2x_fastpath *fp = &bp->fp[j];
919
920 for (i = 0; i < NUM_RX_BD; i++) {
921 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
922 struct sk_buff *skb = rx_buf->skb;
923
924 if (skb == NULL)
925 continue;
926
927 dma_unmap_single(&bp->pdev->dev,
928 dma_unmap_addr(rx_buf, mapping),
929 bp->rx_buf_size, DMA_FROM_DEVICE);
930
931 rx_buf->skb = NULL;
932 dev_kfree_skb(skb);
933 }
934 if (!fp->disable_tpa)
935 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
936 ETH_MAX_AGGREGATION_QUEUES_E1 :
937 ETH_MAX_AGGREGATION_QUEUES_E1H);
938 }
939}
940
941void bnx2x_free_skbs(struct bnx2x *bp)
942{
943 bnx2x_free_tx_skbs(bp);
944 bnx2x_free_rx_skbs(bp);
945}
946
947static void bnx2x_free_msix_irqs(struct bnx2x *bp)
948{
949 int i, offset = 1;
950
951 free_irq(bp->msix_table[0].vector, bp->dev);
952 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
953 bp->msix_table[0].vector);
954
955#ifdef BCM_CNIC
956 offset++;
957#endif
958 for_each_queue(bp, i) {
959 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
960 "state %x\n", i, bp->msix_table[i + offset].vector,
961 bnx2x_fp(bp, i, state));
962
963 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
964 }
965}
966
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000967void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000968{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000969 if (bp->flags & USING_MSIX_FLAG)
970 bnx2x_free_msix_irqs(bp);
971 else if (bp->flags & USING_MSI_FLAG)
972 free_irq(bp->pdev->irq, bp->dev);
973 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000974 free_irq(bp->pdev->irq, bp->dev);
975}
976
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000977int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000978{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000979 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000980
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000981 bp->msix_table[msix_vec].entry = msix_vec;
982 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
983 bp->msix_table[0].entry);
984 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000985
986#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000987 bp->msix_table[msix_vec].entry = msix_vec;
988 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
989 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
990 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000991#endif
992 for_each_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000993 bp->msix_table[msix_vec].entry = msix_vec;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000994 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000995 "(fastpath #%u)\n", msix_vec, msix_vec, i);
996 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000997 }
998
Dmitry Kravkovd6214d72010-10-06 03:32:10 +0000999 req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1000
1001 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001002
1003 /*
1004 * reconfigure number of tx/rx queues according to available
1005 * MSI-X vectors
1006 */
1007 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001008 /* how less vectors we will have? */
1009 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001010
1011 DP(NETIF_MSG_IFUP,
1012 "Trying to use less MSI-X vectors: %d\n", rc);
1013
1014 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1015
1016 if (rc) {
1017 DP(NETIF_MSG_IFUP,
1018 "MSI-X is not attainable rc %d\n", rc);
1019 return rc;
1020 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001021 /*
1022 * decrease number of queues by number of unallocated entries
1023 */
1024 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001025
1026 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1027 bp->num_queues);
1028 } else if (rc) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001029 /* fall to INTx if not enough memory */
1030 if (rc == -ENOMEM)
1031 bp->flags |= DISABLE_MSI_FLAG;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001032 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1033 return rc;
1034 }
1035
1036 bp->flags |= USING_MSIX_FLAG;
1037
1038 return 0;
1039}
1040
1041static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1042{
1043 int i, rc, offset = 1;
1044
1045 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1046 bp->dev->name, bp->dev);
1047 if (rc) {
1048 BNX2X_ERR("request sp irq failed\n");
1049 return -EBUSY;
1050 }
1051
1052#ifdef BCM_CNIC
1053 offset++;
1054#endif
1055 for_each_queue(bp, i) {
1056 struct bnx2x_fastpath *fp = &bp->fp[i];
1057 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1058 bp->dev->name, i);
1059
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001060 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001061 bnx2x_msix_fp_int, 0, fp->name, fp);
1062 if (rc) {
1063 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1064 bnx2x_free_msix_irqs(bp);
1065 return -EBUSY;
1066 }
1067
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001068 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001069 fp->state = BNX2X_FP_STATE_IRQ;
1070 }
1071
1072 i = BNX2X_NUM_QUEUES(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001073 offset = 1 + CNIC_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001074 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1075 " ... fp[%d] %d\n",
1076 bp->msix_table[0].vector,
1077 0, bp->msix_table[offset].vector,
1078 i - 1, bp->msix_table[offset + i - 1].vector);
1079
1080 return 0;
1081}
1082
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001083int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001084{
1085 int rc;
1086
1087 rc = pci_enable_msi(bp->pdev);
1088 if (rc) {
1089 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1090 return -1;
1091 }
1092 bp->flags |= USING_MSI_FLAG;
1093
1094 return 0;
1095}
1096
1097static int bnx2x_req_irq(struct bnx2x *bp)
1098{
1099 unsigned long flags;
1100 int rc;
1101
1102 if (bp->flags & USING_MSI_FLAG)
1103 flags = 0;
1104 else
1105 flags = IRQF_SHARED;
1106
1107 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1108 bp->dev->name, bp->dev);
1109 if (!rc)
1110 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1111
1112 return rc;
1113}
1114
1115static void bnx2x_napi_enable(struct bnx2x *bp)
1116{
1117 int i;
1118
1119 for_each_queue(bp, i)
1120 napi_enable(&bnx2x_fp(bp, i, napi));
1121}
1122
1123static void bnx2x_napi_disable(struct bnx2x *bp)
1124{
1125 int i;
1126
1127 for_each_queue(bp, i)
1128 napi_disable(&bnx2x_fp(bp, i, napi));
1129}
1130
1131void bnx2x_netif_start(struct bnx2x *bp)
1132{
1133 int intr_sem;
1134
1135 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1136 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1137
1138 if (intr_sem) {
1139 if (netif_running(bp->dev)) {
1140 bnx2x_napi_enable(bp);
1141 bnx2x_int_enable(bp);
1142 if (bp->state == BNX2X_STATE_OPEN)
1143 netif_tx_wake_all_queues(bp->dev);
1144 }
1145 }
1146}
1147
1148void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1149{
1150 bnx2x_int_disable_sync(bp, disable_hw);
1151 bnx2x_napi_disable(bp);
1152 netif_tx_disable(bp->dev);
1153}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001154
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001155void bnx2x_set_num_queues(struct bnx2x *bp)
1156{
1157 switch (bp->multi_mode) {
1158 case ETH_RSS_MODE_DISABLED:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001159 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001160 break;
1161 case ETH_RSS_MODE_REGULAR:
1162 bp->num_queues = bnx2x_calc_num_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001163 break;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001164
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001165 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001166 bp->num_queues = 1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001167 break;
1168 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001169}
1170
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001171static void bnx2x_release_firmware(struct bnx2x *bp)
1172{
1173 kfree(bp->init_ops_offsets);
1174 kfree(bp->init_ops);
1175 kfree(bp->init_data);
1176 release_firmware(bp->firmware);
1177}
1178
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001179/* must be called with rtnl_lock */
1180int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1181{
1182 u32 load_code;
1183 int i, rc;
1184
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001185 /* Set init arrays */
1186 rc = bnx2x_init_firmware(bp);
1187 if (rc) {
1188 BNX2X_ERR("Error loading firmware\n");
1189 return rc;
1190 }
1191
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001192#ifdef BNX2X_STOP_ON_ERROR
1193 if (unlikely(bp->panic))
1194 return -EPERM;
1195#endif
1196
1197 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1198
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001199 /* must be called before memory allocation and HW init */
1200 bnx2x_ilt_set_info(bp);
1201
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001202 if (bnx2x_alloc_mem(bp))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001203 return -ENOMEM;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001204
1205 netif_set_real_num_tx_queues(bp->dev, bp->num_queues);
1206 rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1207 if (rc) {
1208 BNX2X_ERR("Unable to update real_num_rx_queues\n");
1209 goto load_error0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001210 }
1211
1212 for_each_queue(bp, i)
1213 bnx2x_fp(bp, i, disable_tpa) =
1214 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1215
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001216 bnx2x_napi_enable(bp);
1217
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001218 /* Send LOAD_REQUEST command to MCP
1219 Returns the type of LOAD command:
1220 if it is the first port to be initialized
1221 common blocks should be initialized, otherwise - not
1222 */
1223 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001224 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001225 if (!load_code) {
1226 BNX2X_ERR("MCP response failure, aborting\n");
1227 rc = -EBUSY;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001228 goto load_error1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001229 }
1230 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1231 rc = -EBUSY; /* other port in diagnostic mode */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001232 goto load_error1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001233 }
1234
1235 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001236 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001237 int port = BP_PORT(bp);
1238
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001239 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1240 path, load_count[path][0], load_count[path][1],
1241 load_count[path][2]);
1242 load_count[path][0]++;
1243 load_count[path][1 + port]++;
1244 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1245 path, load_count[path][0], load_count[path][1],
1246 load_count[path][2]);
1247 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001248 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001249 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001250 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1251 else
1252 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1253 }
1254
1255 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001256 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001257 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1258 bp->port.pmf = 1;
1259 else
1260 bp->port.pmf = 0;
1261 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1262
1263 /* Initialize HW */
1264 rc = bnx2x_init_hw(bp, load_code);
1265 if (rc) {
1266 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001267 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001268 goto load_error2;
1269 }
1270
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001271 /* Connect to IRQs */
1272 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001273 if (rc) {
1274 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1275 goto load_error2;
1276 }
1277
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001278 /* Setup NIC internals and enable interrupts */
1279 bnx2x_nic_init(bp, load_code);
1280
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001281 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1282 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001283 (bp->common.shmem2_base))
1284 SHMEM2_WR(bp, dcc_support,
1285 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1286 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1287
1288 /* Send LOAD_DONE command to MCP */
1289 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001290 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001291 if (!load_code) {
1292 BNX2X_ERR("MCP response failure, aborting\n");
1293 rc = -EBUSY;
1294 goto load_error3;
1295 }
1296 }
1297
1298 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1299
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001300 rc = bnx2x_func_start(bp);
1301 if (rc) {
1302 BNX2X_ERR("Function start failed!\n");
1303#ifndef BNX2X_STOP_ON_ERROR
1304 goto load_error3;
1305#else
1306 bp->panic = 1;
1307 return -EBUSY;
1308#endif
1309 }
1310
1311 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001312 if (rc) {
1313 BNX2X_ERR("Setup leading failed!\n");
1314#ifndef BNX2X_STOP_ON_ERROR
1315 goto load_error3;
1316#else
1317 bp->panic = 1;
1318 return -EBUSY;
1319#endif
1320 }
1321
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001322 if (!CHIP_IS_E1(bp) &&
1323 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1324 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1325 bp->flags |= MF_FUNC_DIS;
1326 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001327
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001328#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001329 /* Enable Timer scan */
1330 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001331#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001332
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001333 for_each_nondefault_queue(bp, i) {
1334 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1335 if (rc)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001336#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001337 goto load_error4;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001338#else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001339 goto load_error3;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001340#endif
1341 }
1342
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001343 /* Now when Clients are configured we are ready to work */
1344 bp->state = BNX2X_STATE_OPEN;
1345
1346 bnx2x_set_eth_mac(bp, 1);
1347
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001348 if (bp->port.pmf)
1349 bnx2x_initial_phy_init(bp, load_mode);
1350
1351 /* Start fast path */
1352 switch (load_mode) {
1353 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001354 /* Tx queue should be only reenabled */
1355 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001356 /* Initialize the receive filter. */
1357 bnx2x_set_rx_mode(bp->dev);
1358 break;
1359
1360 case LOAD_OPEN:
1361 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001362 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001363 /* Initialize the receive filter. */
1364 bnx2x_set_rx_mode(bp->dev);
1365 break;
1366
1367 case LOAD_DIAG:
1368 /* Initialize the receive filter. */
1369 bnx2x_set_rx_mode(bp->dev);
1370 bp->state = BNX2X_STATE_DIAG;
1371 break;
1372
1373 default:
1374 break;
1375 }
1376
1377 if (!bp->port.pmf)
1378 bnx2x__link_status_update(bp);
1379
1380 /* start the timer */
1381 mod_timer(&bp->timer, jiffies + bp->current_interval);
1382
1383#ifdef BCM_CNIC
1384 bnx2x_setup_cnic_irq_info(bp);
1385 if (bp->state == BNX2X_STATE_OPEN)
1386 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1387#endif
1388 bnx2x_inc_load_cnt(bp);
1389
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001390 bnx2x_release_firmware(bp);
1391
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001392 return 0;
1393
1394#ifdef BCM_CNIC
1395load_error4:
1396 /* Disable Timer scan */
1397 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1398#endif
1399load_error3:
1400 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001401
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001402 /* Free SKBs, SGEs, TPA pool and driver internals */
1403 bnx2x_free_skbs(bp);
1404 for_each_queue(bp, i)
1405 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001406
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001407 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001408 bnx2x_free_irq(bp);
1409load_error2:
1410 if (!BP_NOMCP(bp)) {
1411 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1412 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1413 }
1414
1415 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001416load_error1:
1417 bnx2x_napi_disable(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001418load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001419 bnx2x_free_mem(bp);
1420
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001421 bnx2x_release_firmware(bp);
1422
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001423 return rc;
1424}
1425
1426/* must be called with rtnl_lock */
1427int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1428{
1429 int i;
1430
1431 if (bp->state == BNX2X_STATE_CLOSED) {
1432 /* Interface has been removed - nothing to recover */
1433 bp->recovery_state = BNX2X_RECOVERY_DONE;
1434 bp->is_leader = 0;
1435 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1436 smp_wmb();
1437
1438 return -EINVAL;
1439 }
1440
1441#ifdef BCM_CNIC
1442 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1443#endif
1444 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1445
1446 /* Set "drop all" */
1447 bp->rx_mode = BNX2X_RX_MODE_NONE;
1448 bnx2x_set_storm_rx_mode(bp);
1449
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001450 /* Stop Tx */
1451 bnx2x_tx_disable(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001452
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001453 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001454
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001455 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001456 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001457
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001458 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001459
1460 /* Cleanup the chip if needed */
1461 if (unload_mode != UNLOAD_RECOVERY)
1462 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001463 else {
1464 /* Disable HW interrupts, NAPI and Tx */
1465 bnx2x_netif_stop(bp, 1);
1466
1467 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001468 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001469 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001470
1471 bp->port.pmf = 0;
1472
1473 /* Free SKBs, SGEs, TPA pool and driver internals */
1474 bnx2x_free_skbs(bp);
1475 for_each_queue(bp, i)
1476 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001477
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001478 bnx2x_free_mem(bp);
1479
1480 bp->state = BNX2X_STATE_CLOSED;
1481
1482 /* The last driver must disable a "close the gate" if there is no
1483 * parity attention or "process kill" pending.
1484 */
1485 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1486 bnx2x_reset_is_done(bp))
1487 bnx2x_disable_close_the_gate(bp);
1488
1489 /* Reset MCP mail box sequence if there is on going recovery */
1490 if (unload_mode == UNLOAD_RECOVERY)
1491 bp->fw_seq = 0;
1492
1493 return 0;
1494}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001495
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001496int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1497{
1498 u16 pmcsr;
1499
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00001500 /* If there is no power capability, silently succeed */
1501 if (!bp->pm_cap) {
1502 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1503 return 0;
1504 }
1505
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001506 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1507
1508 switch (state) {
1509 case PCI_D0:
1510 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1511 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1512 PCI_PM_CTRL_PME_STATUS));
1513
1514 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1515 /* delay required during transition out of D3hot */
1516 msleep(20);
1517 break;
1518
1519 case PCI_D3hot:
1520 /* If there are other clients above don't
1521 shut down the power */
1522 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1523 return 0;
1524 /* Don't shut down the power for emulation and FPGA */
1525 if (CHIP_REV_IS_SLOW(bp))
1526 return 0;
1527
1528 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1529 pmcsr |= 3;
1530
1531 if (bp->wol)
1532 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1533
1534 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1535 pmcsr);
1536
1537 /* No more memory access after this point until
1538 * device is brought back to D0.
1539 */
1540 break;
1541
1542 default:
1543 return -EINVAL;
1544 }
1545 return 0;
1546}
1547
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001548/*
1549 * net_device service functions
1550 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001551int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001552{
1553 int work_done = 0;
1554 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1555 napi);
1556 struct bnx2x *bp = fp->bp;
1557
1558 while (1) {
1559#ifdef BNX2X_STOP_ON_ERROR
1560 if (unlikely(bp->panic)) {
1561 napi_complete(napi);
1562 return 0;
1563 }
1564#endif
1565
1566 if (bnx2x_has_tx_work(fp))
1567 bnx2x_tx_int(fp);
1568
1569 if (bnx2x_has_rx_work(fp)) {
1570 work_done += bnx2x_rx_int(fp, budget - work_done);
1571
1572 /* must not complete if we consumed full budget */
1573 if (work_done >= budget)
1574 break;
1575 }
1576
1577 /* Fall out from the NAPI loop if needed */
1578 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1579 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001580 /* bnx2x_has_rx_work() reads the status block,
1581 * thus we need to ensure that status block indices
1582 * have been actually read (bnx2x_update_fpsb_idx)
1583 * prior to this check (bnx2x_has_rx_work) so that
1584 * we won't write the "newer" value of the status block
1585 * to IGU (if there was a DMA right after
1586 * bnx2x_has_rx_work and if there is no rmb, the memory
1587 * reading (bnx2x_update_fpsb_idx) may be postponed
1588 * to right before bnx2x_ack_sb). In this case there
1589 * will never be another interrupt until there is
1590 * another update of the status block, while there
1591 * is still unhandled work.
1592 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001593 rmb();
1594
1595 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1596 napi_complete(napi);
1597 /* Re-enable interrupts */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001598 DP(NETIF_MSG_HW,
1599 "Update index to %d\n", fp->fp_hc_idx);
1600 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1601 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001602 IGU_INT_ENABLE, 1);
1603 break;
1604 }
1605 }
1606 }
1607
1608 return work_done;
1609}
1610
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001611/* we split the first BD into headers and data BDs
1612 * to ease the pain of our fellow microcode engineers
1613 * we use one mapping for both BDs
1614 * So far this has only been observed to happen
1615 * in Other Operating Systems(TM)
1616 */
1617static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1618 struct bnx2x_fastpath *fp,
1619 struct sw_tx_bd *tx_buf,
1620 struct eth_tx_start_bd **tx_bd, u16 hlen,
1621 u16 bd_prod, int nbd)
1622{
1623 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1624 struct eth_tx_bd *d_tx_bd;
1625 dma_addr_t mapping;
1626 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1627
1628 /* first fix first BD */
1629 h_tx_bd->nbd = cpu_to_le16(nbd);
1630 h_tx_bd->nbytes = cpu_to_le16(hlen);
1631
1632 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1633 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1634 h_tx_bd->addr_lo, h_tx_bd->nbd);
1635
1636 /* now get a new data BD
1637 * (after the pbd) and fill it */
1638 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1639 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1640
1641 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1642 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1643
1644 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1645 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1646 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1647
1648 /* this marks the BD as one that has no individual mapping */
1649 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1650
1651 DP(NETIF_MSG_TX_QUEUED,
1652 "TSO split data size is %d (%x:%x)\n",
1653 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1654
1655 /* update tx_bd */
1656 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1657
1658 return bd_prod;
1659}
1660
1661static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1662{
1663 if (fix > 0)
1664 csum = (u16) ~csum_fold(csum_sub(csum,
1665 csum_partial(t_header - fix, fix, 0)));
1666
1667 else if (fix < 0)
1668 csum = (u16) ~csum_fold(csum_add(csum,
1669 csum_partial(t_header, -fix, 0)));
1670
1671 return swab16(csum);
1672}
1673
1674static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1675{
1676 u32 rc;
1677
1678 if (skb->ip_summed != CHECKSUM_PARTIAL)
1679 rc = XMIT_PLAIN;
1680
1681 else {
1682 if (skb->protocol == htons(ETH_P_IPV6)) {
1683 rc = XMIT_CSUM_V6;
1684 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1685 rc |= XMIT_CSUM_TCP;
1686
1687 } else {
1688 rc = XMIT_CSUM_V4;
1689 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1690 rc |= XMIT_CSUM_TCP;
1691 }
1692 }
1693
1694 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
1695 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
1696
1697 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
1698 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1699
1700 return rc;
1701}
1702
1703#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1704/* check if packet requires linearization (packet is too fragmented)
1705 no need to check fragmentation if page size > 8K (there will be no
1706 violation to FW restrictions) */
1707static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1708 u32 xmit_type)
1709{
1710 int to_copy = 0;
1711 int hlen = 0;
1712 int first_bd_sz = 0;
1713
1714 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1715 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1716
1717 if (xmit_type & XMIT_GSO) {
1718 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1719 /* Check if LSO packet needs to be copied:
1720 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1721 int wnd_size = MAX_FETCH_BD - 3;
1722 /* Number of windows to check */
1723 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1724 int wnd_idx = 0;
1725 int frag_idx = 0;
1726 u32 wnd_sum = 0;
1727
1728 /* Headers length */
1729 hlen = (int)(skb_transport_header(skb) - skb->data) +
1730 tcp_hdrlen(skb);
1731
1732 /* Amount of data (w/o headers) on linear part of SKB*/
1733 first_bd_sz = skb_headlen(skb) - hlen;
1734
1735 wnd_sum = first_bd_sz;
1736
1737 /* Calculate the first sum - it's special */
1738 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
1739 wnd_sum +=
1740 skb_shinfo(skb)->frags[frag_idx].size;
1741
1742 /* If there was data on linear skb data - check it */
1743 if (first_bd_sz > 0) {
1744 if (unlikely(wnd_sum < lso_mss)) {
1745 to_copy = 1;
1746 goto exit_lbl;
1747 }
1748
1749 wnd_sum -= first_bd_sz;
1750 }
1751
1752 /* Others are easier: run through the frag list and
1753 check all windows */
1754 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
1755 wnd_sum +=
1756 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
1757
1758 if (unlikely(wnd_sum < lso_mss)) {
1759 to_copy = 1;
1760 break;
1761 }
1762 wnd_sum -=
1763 skb_shinfo(skb)->frags[wnd_idx].size;
1764 }
1765 } else {
1766 /* in non-LSO too fragmented packet should always
1767 be linearized */
1768 to_copy = 1;
1769 }
1770 }
1771
1772exit_lbl:
1773 if (unlikely(to_copy))
1774 DP(NETIF_MSG_TX_QUEUED,
1775 "Linearization IS REQUIRED for %s packet. "
1776 "num_frags %d hlen %d first_bd_sz %d\n",
1777 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
1778 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
1779
1780 return to_copy;
1781}
1782#endif
1783
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001784static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
1785 struct eth_tx_parse_bd_e2 *pbd,
1786 u32 xmit_type)
1787{
1788 pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
1789 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
1790 if ((xmit_type & XMIT_GSO_V6) &&
1791 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
1792 pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
1793}
1794
1795/**
1796 * Update PBD in GSO case.
1797 *
1798 * @param skb
1799 * @param tx_start_bd
1800 * @param pbd
1801 * @param xmit_type
1802 */
1803static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
1804 struct eth_tx_parse_bd_e1x *pbd,
1805 u32 xmit_type)
1806{
1807 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1808 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
1809 pbd->tcp_flags = pbd_tcp_flags(skb);
1810
1811 if (xmit_type & XMIT_GSO_V4) {
1812 pbd->ip_id = swab16(ip_hdr(skb)->id);
1813 pbd->tcp_pseudo_csum =
1814 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1815 ip_hdr(skb)->daddr,
1816 0, IPPROTO_TCP, 0));
1817
1818 } else
1819 pbd->tcp_pseudo_csum =
1820 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1821 &ipv6_hdr(skb)->daddr,
1822 0, IPPROTO_TCP, 0));
1823
1824 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
1825}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001826
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001827/**
1828 *
1829 * @param skb
1830 * @param tx_start_bd
1831 * @param pbd_e2
1832 * @param xmit_type
1833 *
1834 * @return header len
1835 */
1836static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
1837 struct eth_tx_parse_bd_e2 *pbd,
1838 u32 xmit_type)
1839{
1840 pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
1841 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
1842
1843 pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
1844 skb->data) / 2) <<
1845 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
1846
1847 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
1848}
1849
1850/**
1851 *
1852 * @param skb
1853 * @param tx_start_bd
1854 * @param pbd
1855 * @param xmit_type
1856 *
1857 * @return Header length
1858 */
1859static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
1860 struct eth_tx_parse_bd_e1x *pbd,
1861 u32 xmit_type)
1862{
1863 u8 hlen = (skb_network_header(skb) - skb->data) / 2;
1864
1865 /* for now NS flag is not used in Linux */
1866 pbd->global_data =
1867 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
1868 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
1869
1870 pbd->ip_hlen_w = (skb_transport_header(skb) -
1871 skb_network_header(skb)) / 2;
1872
1873 hlen += pbd->ip_hlen_w + tcp_hdrlen(skb) / 2;
1874
1875 pbd->total_hlen_w = cpu_to_le16(hlen);
1876 hlen = hlen*2;
1877
1878 if (xmit_type & XMIT_CSUM_TCP) {
1879 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
1880
1881 } else {
1882 s8 fix = SKB_CS_OFF(skb); /* signed! */
1883
1884 DP(NETIF_MSG_TX_QUEUED,
1885 "hlen %d fix %d csum before fix %x\n",
1886 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
1887
1888 /* HW bug: fixup the CSUM */
1889 pbd->tcp_pseudo_csum =
1890 bnx2x_csum_fix(skb_transport_header(skb),
1891 SKB_CS(skb), fix);
1892
1893 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
1894 pbd->tcp_pseudo_csum);
1895 }
1896
1897 return hlen;
1898}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001899
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001900/* called with netif_tx_lock
1901 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
1902 * netif_wake_queue()
1903 */
1904netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
1905{
1906 struct bnx2x *bp = netdev_priv(dev);
1907 struct bnx2x_fastpath *fp;
1908 struct netdev_queue *txq;
1909 struct sw_tx_bd *tx_buf;
1910 struct eth_tx_start_bd *tx_start_bd;
1911 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001912 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001913 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001914 u16 pkt_prod, bd_prod;
1915 int nbd, fp_index;
1916 dma_addr_t mapping;
1917 u32 xmit_type = bnx2x_xmit_type(bp, skb);
1918 int i;
1919 u8 hlen = 0;
1920 __le16 pkt_size = 0;
1921 struct ethhdr *eth;
1922 u8 mac_type = UNICAST_ADDRESS;
1923
1924#ifdef BNX2X_STOP_ON_ERROR
1925 if (unlikely(bp->panic))
1926 return NETDEV_TX_BUSY;
1927#endif
1928
1929 fp_index = skb_get_queue_mapping(skb);
1930 txq = netdev_get_tx_queue(dev, fp_index);
1931
1932 fp = &bp->fp[fp_index];
1933
1934 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
1935 fp->eth_q_stats.driver_xoff++;
1936 netif_tx_stop_queue(txq);
1937 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
1938 return NETDEV_TX_BUSY;
1939 }
1940
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001941 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
1942 "protocol(%x,%x) gso type %x xmit_type %x\n",
1943 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001944 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
1945
1946 eth = (struct ethhdr *)skb->data;
1947
1948 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
1949 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
1950 if (is_broadcast_ether_addr(eth->h_dest))
1951 mac_type = BROADCAST_ADDRESS;
1952 else
1953 mac_type = MULTICAST_ADDRESS;
1954 }
1955
1956#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1957 /* First, check if we need to linearize the skb (due to FW
1958 restrictions). No need to check fragmentation if page size > 8K
1959 (there will be no violation to FW restrictions) */
1960 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
1961 /* Statistics of linearization */
1962 bp->lin_cnt++;
1963 if (skb_linearize(skb) != 0) {
1964 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
1965 "silently dropping this SKB\n");
1966 dev_kfree_skb_any(skb);
1967 return NETDEV_TX_OK;
1968 }
1969 }
1970#endif
1971
1972 /*
1973 Please read carefully. First we use one BD which we mark as start,
1974 then we have a parsing info BD (used for TSO or xsum),
1975 and only then we have the rest of the TSO BDs.
1976 (don't forget to mark the last one as last,
1977 and to unmap only AFTER you write to the BD ...)
1978 And above all, all pdb sizes are in words - NOT DWORDS!
1979 */
1980
1981 pkt_prod = fp->tx_pkt_prod++;
1982 bd_prod = TX_BD(fp->tx_bd_prod);
1983
1984 /* get a tx_buf and first BD */
1985 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
1986 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
1987
1988 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001989 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
1990 mac_type);
1991
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001992 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001993 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001994
1995 /* remember the first BD of the packet */
1996 tx_buf->first_bd = fp->tx_bd_prod;
1997 tx_buf->skb = skb;
1998 tx_buf->flags = 0;
1999
2000 DP(NETIF_MSG_TX_QUEUED,
2001 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2002 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2003
Jesse Grosseab6d182010-10-20 13:56:03 +00002004 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002005 tx_start_bd->vlan_or_ethertype =
2006 cpu_to_le16(vlan_tx_tag_get(skb));
2007 tx_start_bd->bd_flags.as_bitfield |=
2008 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002009 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002010 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002011
2012 /* turn on parsing and get a BD */
2013 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002014
2015 if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002016 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2017
2018 if (xmit_type & XMIT_CSUM_V4)
2019 tx_start_bd->bd_flags.as_bitfield |=
2020 ETH_TX_BD_FLAGS_IP_CSUM;
2021 else
2022 tx_start_bd->bd_flags.as_bitfield |=
2023 ETH_TX_BD_FLAGS_IPV6;
2024
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002025 if (!(xmit_type & XMIT_CSUM_TCP))
2026 tx_start_bd->bd_flags.as_bitfield |=
2027 ETH_TX_BD_FLAGS_IS_UDP;
2028 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002029
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002030 if (CHIP_IS_E2(bp)) {
2031 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2032 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2033 /* Set PBD in checksum offload case */
2034 if (xmit_type & XMIT_CSUM)
2035 hlen = bnx2x_set_pbd_csum_e2(bp,
2036 skb, pbd_e2, xmit_type);
2037 } else {
2038 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2039 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2040 /* Set PBD in checksum offload case */
2041 if (xmit_type & XMIT_CSUM)
2042 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002043
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002044 }
2045
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002046 /* Map skb linear data for DMA */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002047 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2048 skb_headlen(skb), DMA_TO_DEVICE);
2049
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002050 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002051 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2052 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2053 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2054 tx_start_bd->nbd = cpu_to_le16(nbd);
2055 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2056 pkt_size = tx_start_bd->nbytes;
2057
2058 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2059 " nbytes %d flags %x vlan %x\n",
2060 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2061 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002062 tx_start_bd->bd_flags.as_bitfield,
2063 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002064
2065 if (xmit_type & XMIT_GSO) {
2066
2067 DP(NETIF_MSG_TX_QUEUED,
2068 "TSO packet len %d hlen %d total len %d tso size %d\n",
2069 skb->len, hlen, skb_headlen(skb),
2070 skb_shinfo(skb)->gso_size);
2071
2072 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2073
2074 if (unlikely(skb_headlen(skb) > hlen))
2075 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2076 hlen, bd_prod, ++nbd);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002077 if (CHIP_IS_E2(bp))
2078 bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
2079 else
2080 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002081 }
2082 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2083
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002084 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002085 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2086 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2087
2088 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2089 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2090 if (total_pkt_bd == NULL)
2091 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2092
2093 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2094 frag->page_offset,
2095 frag->size, DMA_TO_DEVICE);
2096
2097 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2098 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2099 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2100 le16_add_cpu(&pkt_size, frag->size);
2101
2102 DP(NETIF_MSG_TX_QUEUED,
2103 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2104 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2105 le16_to_cpu(tx_data_bd->nbytes));
2106 }
2107
2108 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2109
2110 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2111
2112 /* now send a tx doorbell, counting the next BD
2113 * if the packet contains or ends with it
2114 */
2115 if (TX_BD_POFF(bd_prod) < nbd)
2116 nbd++;
2117
2118 if (total_pkt_bd != NULL)
2119 total_pkt_bd->total_pkt_bytes = pkt_size;
2120
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002121 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002122 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002123 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002124 " tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002125 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2126 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2127 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2128 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002129 if (pbd_e2)
2130 DP(NETIF_MSG_TX_QUEUED,
2131 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2132 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2133 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2134 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2135 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002136 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2137
2138 /*
2139 * Make sure that the BD data is updated before updating the producer
2140 * since FW might read the BD right after the producer is updated.
2141 * This is only applicable for weak-ordered memory model archs such
2142 * as IA-64. The following barrier is also mandatory since FW will
2143 * assumes packets must have BDs.
2144 */
2145 wmb();
2146
2147 fp->tx_db.data.prod += nbd;
2148 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002149
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002150 DOORBELL(bp, fp->cid, fp->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002151
2152 mmiowb();
2153
2154 fp->tx_bd_prod += nbd;
2155
2156 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2157 netif_tx_stop_queue(txq);
2158
2159 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2160 * ordering of set_bit() in netif_tx_stop_queue() and read of
2161 * fp->bd_tx_cons */
2162 smp_mb();
2163
2164 fp->eth_q_stats.driver_xoff++;
2165 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2166 netif_tx_wake_queue(txq);
2167 }
2168 fp->tx_pkt++;
2169
2170 return NETDEV_TX_OK;
2171}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002172
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002173/* called with rtnl_lock */
2174int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2175{
2176 struct sockaddr *addr = p;
2177 struct bnx2x *bp = netdev_priv(dev);
2178
2179 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2180 return -EINVAL;
2181
2182 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002183 if (netif_running(dev))
2184 bnx2x_set_eth_mac(bp, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002185
2186 return 0;
2187}
2188
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002189
2190int bnx2x_setup_irqs(struct bnx2x *bp)
2191{
2192 int rc = 0;
2193 if (bp->flags & USING_MSIX_FLAG) {
2194 rc = bnx2x_req_msix_irqs(bp);
2195 if (rc)
2196 return rc;
2197 } else {
2198 bnx2x_ack_int(bp);
2199 rc = bnx2x_req_irq(bp);
2200 if (rc) {
2201 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2202 return rc;
2203 }
2204 if (bp->flags & USING_MSI_FLAG) {
2205 bp->dev->irq = bp->pdev->irq;
2206 netdev_info(bp->dev, "using MSI IRQ %d\n",
2207 bp->pdev->irq);
2208 }
2209 }
2210
2211 return 0;
2212}
2213
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002214void bnx2x_free_mem_bp(struct bnx2x *bp)
2215{
2216 kfree(bp->fp);
2217 kfree(bp->msix_table);
2218 kfree(bp->ilt);
2219}
2220
2221int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2222{
2223 struct bnx2x_fastpath *fp;
2224 struct msix_entry *tbl;
2225 struct bnx2x_ilt *ilt;
2226
2227 /* fp array */
2228 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2229 if (!fp)
2230 goto alloc_err;
2231 bp->fp = fp;
2232
2233 /* msix table */
2234 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl),
2235 GFP_KERNEL);
2236 if (!tbl)
2237 goto alloc_err;
2238 bp->msix_table = tbl;
2239
2240 /* ilt */
2241 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2242 if (!ilt)
2243 goto alloc_err;
2244 bp->ilt = ilt;
2245
2246 return 0;
2247alloc_err:
2248 bnx2x_free_mem_bp(bp);
2249 return -ENOMEM;
2250
2251}
2252
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002253/* called with rtnl_lock */
2254int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2255{
2256 struct bnx2x *bp = netdev_priv(dev);
2257 int rc = 0;
2258
2259 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2260 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2261 return -EAGAIN;
2262 }
2263
2264 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2265 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2266 return -EINVAL;
2267
2268 /* This does not race with packet allocation
2269 * because the actual alloc size is
2270 * only updated as part of load
2271 */
2272 dev->mtu = new_mtu;
2273
2274 if (netif_running(dev)) {
2275 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2276 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
2277 }
2278
2279 return rc;
2280}
2281
2282void bnx2x_tx_timeout(struct net_device *dev)
2283{
2284 struct bnx2x *bp = netdev_priv(dev);
2285
2286#ifdef BNX2X_STOP_ON_ERROR
2287 if (!bp->panic)
2288 bnx2x_panic();
2289#endif
2290 /* This allows the netif to be shutdown gracefully before resetting */
2291 schedule_delayed_work(&bp->reset_task, 0);
2292}
2293
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002294int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2295{
2296 struct net_device *dev = pci_get_drvdata(pdev);
2297 struct bnx2x *bp;
2298
2299 if (!dev) {
2300 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2301 return -ENODEV;
2302 }
2303 bp = netdev_priv(dev);
2304
2305 rtnl_lock();
2306
2307 pci_save_state(pdev);
2308
2309 if (!netif_running(dev)) {
2310 rtnl_unlock();
2311 return 0;
2312 }
2313
2314 netif_device_detach(dev);
2315
2316 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2317
2318 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2319
2320 rtnl_unlock();
2321
2322 return 0;
2323}
2324
2325int bnx2x_resume(struct pci_dev *pdev)
2326{
2327 struct net_device *dev = pci_get_drvdata(pdev);
2328 struct bnx2x *bp;
2329 int rc;
2330
2331 if (!dev) {
2332 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2333 return -ENODEV;
2334 }
2335 bp = netdev_priv(dev);
2336
2337 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2338 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2339 return -EAGAIN;
2340 }
2341
2342 rtnl_lock();
2343
2344 pci_restore_state(pdev);
2345
2346 if (!netif_running(dev)) {
2347 rtnl_unlock();
2348 return 0;
2349 }
2350
2351 bnx2x_set_power_state(bp, PCI_D0);
2352 netif_device_attach(dev);
2353
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002354 /* Since the chip was reset, clear the FW sequence number */
2355 bp->fw_seq = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002356 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2357
2358 rtnl_unlock();
2359
2360 return rc;
2361}