blob: 8853ae2a042ec8e4d1d74c3a09c0eff769f21e3d [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
3 * Copyright (c) 2007-2010 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000018#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000019#include <linux/if_vlan.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000020#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000021#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070022#include <net/ip6_checksum.h>
Dmitry Kravkov6891dd22010-08-03 21:49:40 +000023#include <linux/firmware.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000024#include "bnx2x_cmn.h"
25
Dmitry Kravkov523224a2010-10-06 03:23:26 +000026#include "bnx2x_init.h"
27
stephen hemminger8d962862010-10-21 07:50:56 +000028static int bnx2x_setup_irqs(struct bnx2x *bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000029
30/* free skb in the packet ring at pos idx
31 * return idx of last bd freed
32 */
33static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
34 u16 idx)
35{
36 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
37 struct eth_tx_start_bd *tx_start_bd;
38 struct eth_tx_bd *tx_data_bd;
39 struct sk_buff *skb = tx_buf->skb;
40 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
41 int nbd;
42
43 /* prefetch skb end pointer to speedup dev_kfree_skb() */
44 prefetch(&skb->end);
45
46 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
47 idx, tx_buf, skb);
48
49 /* unmap first bd */
50 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
51 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
52 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +000053 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000054
55 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
56#ifdef BNX2X_STOP_ON_ERROR
57 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
58 BNX2X_ERR("BAD nbd!\n");
59 bnx2x_panic();
60 }
61#endif
62 new_cons = nbd + tx_buf->first_bd;
63
64 /* Get the next bd */
65 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
66
67 /* Skip a parse bd... */
68 --nbd;
69 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
70
71 /* ...and the TSO split header bd since they have no mapping */
72 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
73 --nbd;
74 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
75 }
76
77 /* now free frags */
78 while (nbd > 0) {
79
80 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
81 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
82 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
83 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
84 if (--nbd)
85 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
86 }
87
88 /* release skb */
89 WARN_ON(!skb);
90 dev_kfree_skb(skb);
91 tx_buf->first_bd = 0;
92 tx_buf->skb = NULL;
93
94 return new_cons;
95}
96
97int bnx2x_tx_int(struct bnx2x_fastpath *fp)
98{
99 struct bnx2x *bp = fp->bp;
100 struct netdev_queue *txq;
101 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
102
103#ifdef BNX2X_STOP_ON_ERROR
104 if (unlikely(bp->panic))
105 return -1;
106#endif
107
108 txq = netdev_get_tx_queue(bp->dev, fp->index);
109 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
110 sw_cons = fp->tx_pkt_cons;
111
112 while (sw_cons != hw_cons) {
113 u16 pkt_cons;
114
115 pkt_cons = TX_BD(sw_cons);
116
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000117 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
118 " pkt_cons %u\n",
119 fp->index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000120
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000121 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
122 sw_cons++;
123 }
124
125 fp->tx_pkt_cons = sw_cons;
126 fp->tx_bd_cons = bd_cons;
127
128 /* Need to make the tx_bd_cons update visible to start_xmit()
129 * before checking for netif_tx_queue_stopped(). Without the
130 * memory barrier, there is a small possibility that
131 * start_xmit() will miss it and cause the queue to be stopped
132 * forever.
133 */
134 smp_mb();
135
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000136 if (unlikely(netif_tx_queue_stopped(txq))) {
137 /* Taking tx_lock() is needed to prevent reenabling the queue
138 * while it's empty. This could have happen if rx_action() gets
139 * suspended in bnx2x_tx_int() after the condition before
140 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
141 *
142 * stops the queue->sees fresh tx_bd_cons->releases the queue->
143 * sends some packets consuming the whole queue again->
144 * stops the queue
145 */
146
147 __netif_tx_lock(txq, smp_processor_id());
148
149 if ((netif_tx_queue_stopped(txq)) &&
150 (bp->state == BNX2X_STATE_OPEN) &&
151 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
152 netif_tx_wake_queue(txq);
153
154 __netif_tx_unlock(txq);
155 }
156 return 0;
157}
158
159static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
160 u16 idx)
161{
162 u16 last_max = fp->last_max_sge;
163
164 if (SUB_S16(idx, last_max) > 0)
165 fp->last_max_sge = idx;
166}
167
168static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
169 struct eth_fast_path_rx_cqe *fp_cqe)
170{
171 struct bnx2x *bp = fp->bp;
172 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
173 le16_to_cpu(fp_cqe->len_on_bd)) >>
174 SGE_PAGE_SHIFT;
175 u16 last_max, last_elem, first_elem;
176 u16 delta = 0;
177 u16 i;
178
179 if (!sge_len)
180 return;
181
182 /* First mark all used pages */
183 for (i = 0; i < sge_len; i++)
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000184 SGE_MASK_CLEAR_BIT(fp,
185 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000186
187 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000188 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000189
190 /* Here we assume that the last SGE index is the biggest */
191 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000192 bnx2x_update_last_max_sge(fp,
193 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000194
195 last_max = RX_SGE(fp->last_max_sge);
196 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
197 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
198
199 /* If ring is not full */
200 if (last_elem + 1 != first_elem)
201 last_elem++;
202
203 /* Now update the prod */
204 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
205 if (likely(fp->sge_mask[i]))
206 break;
207
208 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
209 delta += RX_SGE_MASK_ELEM_SZ;
210 }
211
212 if (delta > 0) {
213 fp->rx_sge_prod += delta;
214 /* clear page-end entries */
215 bnx2x_clear_sge_mask_next_elems(fp);
216 }
217
218 DP(NETIF_MSG_RX_STATUS,
219 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
220 fp->last_max_sge, fp->rx_sge_prod);
221}
222
223static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
224 struct sk_buff *skb, u16 cons, u16 prod)
225{
226 struct bnx2x *bp = fp->bp;
227 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
228 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
229 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
230 dma_addr_t mapping;
231
232 /* move empty skb from pool to prod and map it */
233 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
234 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800235 fp->rx_buf_size, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000236 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
237
238 /* move partial skb from cons to pool (don't unmap yet) */
239 fp->tpa_pool[queue] = *cons_rx_buf;
240
241 /* mark bin state as start - print error if current state != stop */
242 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
243 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
244
245 fp->tpa_state[queue] = BNX2X_TPA_START;
246
247 /* point prod_bd to new skb */
248 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
249 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
250
251#ifdef BNX2X_STOP_ON_ERROR
252 fp->tpa_queue_used |= (1 << queue);
253#ifdef _ASM_GENERIC_INT_L64_H
254 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
255#else
256 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
257#endif
258 fp->tpa_queue_used);
259#endif
260}
261
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000262/* Timestamp option length allowed for TPA aggregation:
263 *
264 * nop nop kind length echo val
265 */
266#define TPA_TSTAMP_OPT_LEN 12
267/**
268 * Calculate the approximate value of the MSS for this
269 * aggregation using the first packet of it.
270 *
271 * @param bp
272 * @param parsing_flags Parsing flags from the START CQE
273 * @param len_on_bd Total length of the first packet for the
274 * aggregation.
275 */
276static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
277 u16 len_on_bd)
278{
279 /* TPA arrgregation won't have an IP options and TCP options
280 * other than timestamp.
281 */
282 u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
283
284
285 /* Check if there was a TCP timestamp, if there is it's will
286 * always be 12 bytes length: nop nop kind length echo val.
287 *
288 * Otherwise FW would close the aggregation.
289 */
290 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
291 hdrs_len += TPA_TSTAMP_OPT_LEN;
292
293 return len_on_bd - hdrs_len;
294}
295
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000296static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
297 struct sk_buff *skb,
298 struct eth_fast_path_rx_cqe *fp_cqe,
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000299 u16 cqe_idx, u16 parsing_flags)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000300{
301 struct sw_rx_page *rx_pg, old_rx_pg;
302 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
303 u32 i, frag_len, frag_size, pages;
304 int err;
305 int j;
306
307 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
308 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
309
310 /* This is needed in order to enable forwarding support */
311 if (frag_size)
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000312 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
313 len_on_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000314
315#ifdef BNX2X_STOP_ON_ERROR
316 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
317 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
318 pages, cqe_idx);
319 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
320 fp_cqe->pkt_len, len_on_bd);
321 bnx2x_panic();
322 return -EINVAL;
323 }
324#endif
325
326 /* Run through the SGL and compose the fragmented skb */
327 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000328 u16 sge_idx =
329 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000330
331 /* FW gives the indices of the SGE as if the ring is an array
332 (meaning that "next" element will consume 2 indices) */
333 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
334 rx_pg = &fp->rx_page_ring[sge_idx];
335 old_rx_pg = *rx_pg;
336
337 /* If we fail to allocate a substitute page, we simply stop
338 where we are and drop the whole packet */
339 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
340 if (unlikely(err)) {
341 fp->eth_q_stats.rx_skb_alloc_failed++;
342 return err;
343 }
344
345 /* Unmap the page as we r going to pass it to the stack */
346 dma_unmap_page(&bp->pdev->dev,
347 dma_unmap_addr(&old_rx_pg, mapping),
348 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
349
350 /* Add one frag and update the appropriate fields in the skb */
351 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
352
353 skb->data_len += frag_len;
354 skb->truesize += frag_len;
355 skb->len += frag_len;
356
357 frag_size -= frag_len;
358 }
359
360 return 0;
361}
362
363static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
364 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
365 u16 cqe_idx)
366{
367 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
368 struct sk_buff *skb = rx_buf->skb;
369 /* alloc new skb */
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800370 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000371
372 /* Unmap skb in the pool anyway, as we are going to change
373 pool entry status to BNX2X_TPA_STOP even if new skb allocation
374 fails. */
375 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800376 fp->rx_buf_size, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000377
378 if (likely(new_skb)) {
379 /* fix ip xsum and give it to the stack */
380 /* (no need to map the new skb) */
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000381 u16 parsing_flags =
382 le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000383
384 prefetch(skb);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000385 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000386
387#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800388 if (pad + len > fp->rx_buf_size) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000389 BNX2X_ERR("skb_put is about to fail... "
390 "pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800391 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000392 bnx2x_panic();
393 return;
394 }
395#endif
396
397 skb_reserve(skb, pad);
398 skb_put(skb, len);
399
400 skb->protocol = eth_type_trans(skb, bp->dev);
401 skb->ip_summed = CHECKSUM_UNNECESSARY;
402
403 {
404 struct iphdr *iph;
405
406 iph = (struct iphdr *)skb->data;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000407 iph->check = 0;
408 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
409 }
410
411 if (!bnx2x_fill_frag_skb(bp, fp, skb,
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000412 &cqe->fast_path_cqe, cqe_idx,
413 parsing_flags)) {
414 if (parsing_flags & PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000415 __vlan_hwaccel_put_tag(skb,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000416 le16_to_cpu(cqe->fast_path_cqe.
Hao Zheng9bcc0892010-10-20 13:56:11 +0000417 vlan_tag));
418 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000419 } else {
420 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
421 " - dropping packet!\n");
422 dev_kfree_skb(skb);
423 }
424
425
426 /* put new skb in bin */
427 fp->tpa_pool[queue].skb = new_skb;
428
429 } else {
430 /* else drop the packet and keep the buffer in the bin */
431 DP(NETIF_MSG_RX_STATUS,
432 "Failed to allocate new skb - dropping packet!\n");
433 fp->eth_q_stats.rx_skb_alloc_failed++;
434 }
435
436 fp->tpa_state[queue] = BNX2X_TPA_STOP;
437}
438
439/* Set Toeplitz hash value in the skb using the value from the
440 * CQE (calculated by HW).
441 */
442static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
443 struct sk_buff *skb)
444{
445 /* Set Toeplitz hash from CQE */
446 if ((bp->dev->features & NETIF_F_RXHASH) &&
447 (cqe->fast_path_cqe.status_flags &
448 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
449 skb->rxhash =
450 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
451}
452
453int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
454{
455 struct bnx2x *bp = fp->bp;
456 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
457 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
458 int rx_pkt = 0;
459
460#ifdef BNX2X_STOP_ON_ERROR
461 if (unlikely(bp->panic))
462 return 0;
463#endif
464
465 /* CQ "next element" is of the size of the regular element,
466 that's why it's ok here */
467 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
468 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
469 hw_comp_cons++;
470
471 bd_cons = fp->rx_bd_cons;
472 bd_prod = fp->rx_bd_prod;
473 bd_prod_fw = bd_prod;
474 sw_comp_cons = fp->rx_comp_cons;
475 sw_comp_prod = fp->rx_comp_prod;
476
477 /* Memory barrier necessary as speculative reads of the rx
478 * buffer can be ahead of the index in the status block
479 */
480 rmb();
481
482 DP(NETIF_MSG_RX_STATUS,
483 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
484 fp->index, hw_comp_cons, sw_comp_cons);
485
486 while (sw_comp_cons != hw_comp_cons) {
487 struct sw_rx_bd *rx_buf = NULL;
488 struct sk_buff *skb;
489 union eth_rx_cqe *cqe;
490 u8 cqe_fp_flags;
491 u16 len, pad;
492
493 comp_ring_cons = RCQ_BD(sw_comp_cons);
494 bd_prod = RX_BD(bd_prod);
495 bd_cons = RX_BD(bd_cons);
496
497 /* Prefetch the page containing the BD descriptor
498 at producer's index. It will be needed when new skb is
499 allocated */
500 prefetch((void *)(PAGE_ALIGN((unsigned long)
501 (&fp->rx_desc_ring[bd_prod])) -
502 PAGE_SIZE + 1));
503
504 cqe = &fp->rx_comp_ring[comp_ring_cons];
505 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
506
507 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
508 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
509 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
510 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
511 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
512 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
513
514 /* is this a slowpath msg? */
515 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
516 bnx2x_sp_event(fp, cqe);
517 goto next_cqe;
518
519 /* this is an rx packet */
520 } else {
521 rx_buf = &fp->rx_buf_ring[bd_cons];
522 skb = rx_buf->skb;
523 prefetch(skb);
524 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
525 pad = cqe->fast_path_cqe.placement_offset;
526
Vladislav Zolotarovfe78d262010-10-17 23:02:20 +0000527 /* - If CQE is marked both TPA_START and TPA_END it is
528 * a non-TPA CQE.
529 * - FP CQE will always have either TPA_START or/and
530 * TPA_STOP flags set.
531 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000532 if ((!fp->disable_tpa) &&
533 (TPA_TYPE(cqe_fp_flags) !=
534 (TPA_TYPE_START | TPA_TYPE_END))) {
535 u16 queue = cqe->fast_path_cqe.queue_index;
536
537 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
538 DP(NETIF_MSG_RX_STATUS,
539 "calling tpa_start on queue %d\n",
540 queue);
541
542 bnx2x_tpa_start(fp, queue, skb,
543 bd_cons, bd_prod);
544
545 /* Set Toeplitz hash for an LRO skb */
546 bnx2x_set_skb_rxhash(bp, cqe, skb);
547
548 goto next_rx;
Vladislav Zolotarovfe78d262010-10-17 23:02:20 +0000549 } else { /* TPA_STOP */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000550 DP(NETIF_MSG_RX_STATUS,
551 "calling tpa_stop on queue %d\n",
552 queue);
553
554 if (!BNX2X_RX_SUM_FIX(cqe))
555 BNX2X_ERR("STOP on none TCP "
556 "data\n");
557
558 /* This is a size of the linear data
559 on this skb */
560 len = le16_to_cpu(cqe->fast_path_cqe.
561 len_on_bd);
562 bnx2x_tpa_stop(bp, fp, queue, pad,
563 len, cqe, comp_ring_cons);
564#ifdef BNX2X_STOP_ON_ERROR
565 if (bp->panic)
566 return 0;
567#endif
568
569 bnx2x_update_sge_prod(fp,
570 &cqe->fast_path_cqe);
571 goto next_cqe;
572 }
573 }
574
575 dma_sync_single_for_device(&bp->pdev->dev,
576 dma_unmap_addr(rx_buf, mapping),
577 pad + RX_COPY_THRESH,
578 DMA_FROM_DEVICE);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000579 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000580
581 /* is this an error packet? */
582 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
583 DP(NETIF_MSG_RX_ERR,
584 "ERROR flags %x rx packet %u\n",
585 cqe_fp_flags, sw_comp_cons);
586 fp->eth_q_stats.rx_err_discard_pkt++;
587 goto reuse_rx;
588 }
589
590 /* Since we don't have a jumbo ring
591 * copy small packets if mtu > 1500
592 */
593 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
594 (len <= RX_COPY_THRESH)) {
595 struct sk_buff *new_skb;
596
597 new_skb = netdev_alloc_skb(bp->dev,
598 len + pad);
599 if (new_skb == NULL) {
600 DP(NETIF_MSG_RX_ERR,
601 "ERROR packet dropped "
602 "because of alloc failure\n");
603 fp->eth_q_stats.rx_skb_alloc_failed++;
604 goto reuse_rx;
605 }
606
607 /* aligned copy */
608 skb_copy_from_linear_data_offset(skb, pad,
609 new_skb->data + pad, len);
610 skb_reserve(new_skb, pad);
611 skb_put(new_skb, len);
612
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000613 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000614
615 skb = new_skb;
616
617 } else
618 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
619 dma_unmap_single(&bp->pdev->dev,
620 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800621 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000622 DMA_FROM_DEVICE);
623 skb_reserve(skb, pad);
624 skb_put(skb, len);
625
626 } else {
627 DP(NETIF_MSG_RX_ERR,
628 "ERROR packet dropped because "
629 "of alloc failure\n");
630 fp->eth_q_stats.rx_skb_alloc_failed++;
631reuse_rx:
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000632 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000633 goto next_rx;
634 }
635
636 skb->protocol = eth_type_trans(skb, bp->dev);
637
638 /* Set Toeplitz hash for a none-LRO skb */
639 bnx2x_set_skb_rxhash(bp, cqe, skb);
640
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700641 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000642
Michał Mirosław66371c42011-04-12 09:38:23 +0000643 if (bp->dev->features & NETIF_F_RXCSUM) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000644 if (likely(BNX2X_RX_CSUM_OK(cqe)))
645 skb->ip_summed = CHECKSUM_UNNECESSARY;
646 else
647 fp->eth_q_stats.hw_csum_err++;
648 }
649 }
650
651 skb_record_rx_queue(skb, fp->index);
652
Hao Zheng9bcc0892010-10-20 13:56:11 +0000653 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
654 PARSING_FLAGS_VLAN)
655 __vlan_hwaccel_put_tag(skb,
656 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
657 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000658
659
660next_rx:
661 rx_buf->skb = NULL;
662
663 bd_cons = NEXT_RX_IDX(bd_cons);
664 bd_prod = NEXT_RX_IDX(bd_prod);
665 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
666 rx_pkt++;
667next_cqe:
668 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
669 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
670
671 if (rx_pkt == budget)
672 break;
673 } /* while */
674
675 fp->rx_bd_cons = bd_cons;
676 fp->rx_bd_prod = bd_prod_fw;
677 fp->rx_comp_cons = sw_comp_cons;
678 fp->rx_comp_prod = sw_comp_prod;
679
680 /* Update producers */
681 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
682 fp->rx_sge_prod);
683
684 fp->rx_pkt += rx_pkt;
685 fp->rx_calls++;
686
687 return rx_pkt;
688}
689
690static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
691{
692 struct bnx2x_fastpath *fp = fp_cookie;
693 struct bnx2x *bp = fp->bp;
694
695 /* Return here if interrupt is disabled */
696 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
697 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
698 return IRQ_HANDLED;
699 }
700
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000701 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
702 "[fp %d fw_sd %d igusb %d]\n",
703 fp->index, fp->fw_sb_id, fp->igu_sb_id);
704 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000705
706#ifdef BNX2X_STOP_ON_ERROR
707 if (unlikely(bp->panic))
708 return IRQ_HANDLED;
709#endif
710
711 /* Handle Rx and Tx according to MSI-X vector */
712 prefetch(fp->rx_cons_sb);
713 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000714 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000715 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
716
717 return IRQ_HANDLED;
718}
719
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000720/* HW Lock for shared dual port PHYs */
721void bnx2x_acquire_phy_lock(struct bnx2x *bp)
722{
723 mutex_lock(&bp->port.phy_mutex);
724
725 if (bp->port.need_hw_lock)
726 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
727}
728
729void bnx2x_release_phy_lock(struct bnx2x *bp)
730{
731 if (bp->port.need_hw_lock)
732 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
733
734 mutex_unlock(&bp->port.phy_mutex);
735}
736
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800737/* calculates MF speed according to current linespeed and MF configuration */
738u16 bnx2x_get_mf_speed(struct bnx2x *bp)
739{
740 u16 line_speed = bp->link_vars.line_speed;
741 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000742 u16 maxCfg = bnx2x_extract_max_cfg(bp,
743 bp->mf_config[BP_VN(bp)]);
744
745 /* Calculate the current MAX line speed limit for the MF
746 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800747 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000748 if (IS_MF_SI(bp))
749 line_speed = (line_speed * maxCfg) / 100;
750 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800751 u16 vn_max_rate = maxCfg * 100;
752
753 if (vn_max_rate < line_speed)
754 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000755 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800756 }
757
758 return line_speed;
759}
760
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000761/**
762 * bnx2x_fill_report_data - fill link report data to report
763 *
764 * @bp: driver handle
765 * @data: link state to update
766 *
767 * It uses a none-atomic bit operations because is called under the mutex.
768 */
769static inline void bnx2x_fill_report_data(struct bnx2x *bp,
770 struct bnx2x_link_report_data *data)
771{
772 u16 line_speed = bnx2x_get_mf_speed(bp);
773
774 memset(data, 0, sizeof(*data));
775
776 /* Fill the report data: efective line speed */
777 data->line_speed = line_speed;
778
779 /* Link is down */
780 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
781 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
782 &data->link_report_flags);
783
784 /* Full DUPLEX */
785 if (bp->link_vars.duplex == DUPLEX_FULL)
786 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
787
788 /* Rx Flow Control is ON */
789 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
790 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
791
792 /* Tx Flow Control is ON */
793 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
794 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
795}
796
797/**
798 * bnx2x_link_report - report link status to OS.
799 *
800 * @bp: driver handle
801 *
802 * Calls the __bnx2x_link_report() under the same locking scheme
803 * as a link/PHY state managing code to ensure a consistent link
804 * reporting.
805 */
806
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000807void bnx2x_link_report(struct bnx2x *bp)
808{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000809 bnx2x_acquire_phy_lock(bp);
810 __bnx2x_link_report(bp);
811 bnx2x_release_phy_lock(bp);
812}
813
814/**
815 * __bnx2x_link_report - report link status to OS.
816 *
817 * @bp: driver handle
818 *
819 * None atomic inmlementation.
820 * Should be called under the phy_lock.
821 */
822void __bnx2x_link_report(struct bnx2x *bp)
823{
824 struct bnx2x_link_report_data cur_data;
825
826 /* reread mf_cfg */
827 if (!CHIP_IS_E1(bp))
828 bnx2x_read_mf_cfg(bp);
829
830 /* Read the current link report info */
831 bnx2x_fill_report_data(bp, &cur_data);
832
833 /* Don't report link down or exactly the same link status twice */
834 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
835 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
836 &bp->last_reported_link.link_report_flags) &&
837 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
838 &cur_data.link_report_flags)))
839 return;
840
841 bp->link_cnt++;
842
843 /* We are going to report a new link parameters now -
844 * remember the current data for the next time.
845 */
846 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
847
848 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
849 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000850 netif_carrier_off(bp->dev);
851 netdev_err(bp->dev, "NIC Link is Down\n");
852 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000853 } else {
854 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000855 netdev_info(bp->dev, "NIC Link is Up, ");
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000856 pr_cont("%d Mbps ", cur_data.line_speed);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000857
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000858 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
859 &cur_data.link_report_flags))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000860 pr_cont("full duplex");
861 else
862 pr_cont("half duplex");
863
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000864 /* Handle the FC at the end so that only these flags would be
865 * possibly set. This way we may easily check if there is no FC
866 * enabled.
867 */
868 if (cur_data.link_report_flags) {
869 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
870 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000871 pr_cont(", receive ");
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000872 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
873 &cur_data.link_report_flags))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000874 pr_cont("& transmit ");
875 } else {
876 pr_cont(", transmit ");
877 }
878 pr_cont("flow control ON");
879 }
880 pr_cont("\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000881 }
882}
883
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000884/* Returns the number of actually allocated BDs */
885static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
886 int rx_ring_size)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000887{
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000888 struct bnx2x *bp = fp->bp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000889 u16 ring_prod, cqe_ring_prod;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000890 int i;
891
892 fp->rx_comp_cons = 0;
893 cqe_ring_prod = ring_prod = 0;
894 for (i = 0; i < rx_ring_size; i++) {
895 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
896 BNX2X_ERR("was only able to allocate "
897 "%d rx skbs on queue[%d]\n", i, fp->index);
898 fp->eth_q_stats.rx_skb_alloc_failed++;
899 break;
900 }
901 ring_prod = NEXT_RX_IDX(ring_prod);
902 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
903 WARN_ON(ring_prod <= i);
904 }
905
906 fp->rx_bd_prod = ring_prod;
907 /* Limit the CQE producer by the CQE ring size */
908 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
909 cqe_ring_prod);
910 fp->rx_pkt = fp->rx_calls = 0;
911
912 return i;
913}
914
915static inline void bnx2x_alloc_rx_bd_ring(struct bnx2x_fastpath *fp)
916{
917 struct bnx2x *bp = fp->bp;
Dmitry Kravkov25141582010-09-12 05:48:28 +0000918 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
919 MAX_RX_AVAIL/bp->num_queues;
920
921 rx_ring_size = max_t(int, MIN_RX_AVAIL, rx_ring_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000922
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000923 bnx2x_alloc_rx_bds(fp, rx_ring_size);
924
925 /* Warning!
926 * this will generate an interrupt (to the TSTORM)
927 * must only be done after chip is initialized
928 */
929 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
930 fp->rx_sge_prod);
931}
932
933void bnx2x_init_rx_rings(struct bnx2x *bp)
934{
935 int func = BP_FUNC(bp);
936 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
937 ETH_MAX_AGGREGATION_QUEUES_E1H;
938 u16 ring_prod;
939 int i, j;
940
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000941 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000942 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000943
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800944 DP(NETIF_MSG_IFUP,
945 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
946
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000947 if (!fp->disable_tpa) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000948 for (i = 0; i < max_agg_queues; i++) {
949 fp->tpa_pool[i].skb =
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800950 netdev_alloc_skb(bp->dev, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000951 if (!fp->tpa_pool[i].skb) {
952 BNX2X_ERR("Failed to allocate TPA "
953 "skb pool for queue[%d] - "
954 "disabling TPA on this "
955 "queue!\n", j);
956 bnx2x_free_tpa_pool(bp, fp, i);
957 fp->disable_tpa = 1;
958 break;
959 }
960 dma_unmap_addr_set((struct sw_rx_bd *)
961 &bp->fp->tpa_pool[i],
962 mapping, 0);
963 fp->tpa_state[i] = BNX2X_TPA_STOP;
964 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000965
966 /* "next page" elements initialization */
967 bnx2x_set_next_page_sgl(fp);
968
969 /* set SGEs bit mask */
970 bnx2x_init_sge_ring_bit_mask(fp);
971
972 /* Allocate SGEs and initialize the ring elements */
973 for (i = 0, ring_prod = 0;
974 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
975
976 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
977 BNX2X_ERR("was only able to allocate "
978 "%d rx sges\n", i);
979 BNX2X_ERR("disabling TPA for"
980 " queue[%d]\n", j);
981 /* Cleanup already allocated elements */
982 bnx2x_free_rx_sge_range(bp,
983 fp, ring_prod);
984 bnx2x_free_tpa_pool(bp,
985 fp, max_agg_queues);
986 fp->disable_tpa = 1;
987 ring_prod = 0;
988 break;
989 }
990 ring_prod = NEXT_SGE_IDX(ring_prod);
991 }
992
993 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000994 }
995 }
996
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000997 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000998 struct bnx2x_fastpath *fp = &bp->fp[j];
999
1000 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001001
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001002 bnx2x_set_next_page_rx_bd(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001003
1004 /* CQ ring */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001005 bnx2x_set_next_page_rx_cq(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001006
1007 /* Allocate BDs and initialize BD ring */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001008 bnx2x_alloc_rx_bd_ring(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001009
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001010 if (j != 0)
1011 continue;
1012
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001013 if (!CHIP_IS_E2(bp)) {
1014 REG_WR(bp, BAR_USTRORM_INTMEM +
1015 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1016 U64_LO(fp->rx_comp_mapping));
1017 REG_WR(bp, BAR_USTRORM_INTMEM +
1018 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1019 U64_HI(fp->rx_comp_mapping));
1020 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001021 }
1022}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001023
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001024static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1025{
1026 int i;
1027
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001028 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001029 struct bnx2x_fastpath *fp = &bp->fp[i];
1030
1031 u16 bd_cons = fp->tx_bd_cons;
1032 u16 sw_prod = fp->tx_pkt_prod;
1033 u16 sw_cons = fp->tx_pkt_cons;
1034
1035 while (sw_cons != sw_prod) {
1036 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
1037 sw_cons++;
1038 }
1039 }
1040}
1041
1042static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1043{
1044 int i, j;
1045
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001046 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001047 struct bnx2x_fastpath *fp = &bp->fp[j];
1048
1049 for (i = 0; i < NUM_RX_BD; i++) {
1050 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1051 struct sk_buff *skb = rx_buf->skb;
1052
1053 if (skb == NULL)
1054 continue;
1055
1056 dma_unmap_single(&bp->pdev->dev,
1057 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001058 fp->rx_buf_size, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001059
1060 rx_buf->skb = NULL;
1061 dev_kfree_skb(skb);
1062 }
1063 if (!fp->disable_tpa)
1064 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1065 ETH_MAX_AGGREGATION_QUEUES_E1 :
1066 ETH_MAX_AGGREGATION_QUEUES_E1H);
1067 }
1068}
1069
1070void bnx2x_free_skbs(struct bnx2x *bp)
1071{
1072 bnx2x_free_tx_skbs(bp);
1073 bnx2x_free_rx_skbs(bp);
1074}
1075
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001076void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1077{
1078 /* load old values */
1079 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1080
1081 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1082 /* leave all but MAX value */
1083 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1084
1085 /* set new MAX value */
1086 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1087 & FUNC_MF_CFG_MAX_BW_MASK;
1088
1089 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1090 }
1091}
1092
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001093static void bnx2x_free_msix_irqs(struct bnx2x *bp)
1094{
1095 int i, offset = 1;
1096
1097 free_irq(bp->msix_table[0].vector, bp->dev);
1098 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1099 bp->msix_table[0].vector);
1100
1101#ifdef BCM_CNIC
1102 offset++;
1103#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001104 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001105 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
1106 "state %x\n", i, bp->msix_table[i + offset].vector,
1107 bnx2x_fp(bp, i, state));
1108
1109 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
1110 }
1111}
1112
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001113void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001114{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001115 if (bp->flags & USING_MSIX_FLAG)
1116 bnx2x_free_msix_irqs(bp);
1117 else if (bp->flags & USING_MSI_FLAG)
1118 free_irq(bp->pdev->irq, bp->dev);
1119 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001120 free_irq(bp->pdev->irq, bp->dev);
1121}
1122
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001123int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001124{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001125 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001126
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001127 bp->msix_table[msix_vec].entry = msix_vec;
1128 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1129 bp->msix_table[0].entry);
1130 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001131
1132#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001133 bp->msix_table[msix_vec].entry = msix_vec;
1134 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1135 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1136 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001137#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001138 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001139 bp->msix_table[msix_vec].entry = msix_vec;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001140 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001141 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1142 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001143 }
1144
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001145 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001146
1147 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001148
1149 /*
1150 * reconfigure number of tx/rx queues according to available
1151 * MSI-X vectors
1152 */
1153 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001154 /* how less vectors we will have? */
1155 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001156
1157 DP(NETIF_MSG_IFUP,
1158 "Trying to use less MSI-X vectors: %d\n", rc);
1159
1160 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1161
1162 if (rc) {
1163 DP(NETIF_MSG_IFUP,
1164 "MSI-X is not attainable rc %d\n", rc);
1165 return rc;
1166 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001167 /*
1168 * decrease number of queues by number of unallocated entries
1169 */
1170 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001171
1172 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1173 bp->num_queues);
1174 } else if (rc) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001175 /* fall to INTx if not enough memory */
1176 if (rc == -ENOMEM)
1177 bp->flags |= DISABLE_MSI_FLAG;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001178 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1179 return rc;
1180 }
1181
1182 bp->flags |= USING_MSIX_FLAG;
1183
1184 return 0;
1185}
1186
1187static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1188{
1189 int i, rc, offset = 1;
1190
1191 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
1192 bp->dev->name, bp->dev);
1193 if (rc) {
1194 BNX2X_ERR("request sp irq failed\n");
1195 return -EBUSY;
1196 }
1197
1198#ifdef BCM_CNIC
1199 offset++;
1200#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001201 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001202 struct bnx2x_fastpath *fp = &bp->fp[i];
1203 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1204 bp->dev->name, i);
1205
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001206 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001207 bnx2x_msix_fp_int, 0, fp->name, fp);
1208 if (rc) {
1209 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
1210 bnx2x_free_msix_irqs(bp);
1211 return -EBUSY;
1212 }
1213
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001214 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001215 fp->state = BNX2X_FP_STATE_IRQ;
1216 }
1217
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001218 i = BNX2X_NUM_ETH_QUEUES(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001219 offset = 1 + CNIC_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001220 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1221 " ... fp[%d] %d\n",
1222 bp->msix_table[0].vector,
1223 0, bp->msix_table[offset].vector,
1224 i - 1, bp->msix_table[offset + i - 1].vector);
1225
1226 return 0;
1227}
1228
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001229int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001230{
1231 int rc;
1232
1233 rc = pci_enable_msi(bp->pdev);
1234 if (rc) {
1235 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1236 return -1;
1237 }
1238 bp->flags |= USING_MSI_FLAG;
1239
1240 return 0;
1241}
1242
1243static int bnx2x_req_irq(struct bnx2x *bp)
1244{
1245 unsigned long flags;
1246 int rc;
1247
1248 if (bp->flags & USING_MSI_FLAG)
1249 flags = 0;
1250 else
1251 flags = IRQF_SHARED;
1252
1253 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1254 bp->dev->name, bp->dev);
1255 if (!rc)
1256 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1257
1258 return rc;
1259}
1260
1261static void bnx2x_napi_enable(struct bnx2x *bp)
1262{
1263 int i;
1264
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001265 for_each_napi_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001266 napi_enable(&bnx2x_fp(bp, i, napi));
1267}
1268
1269static void bnx2x_napi_disable(struct bnx2x *bp)
1270{
1271 int i;
1272
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001273 for_each_napi_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001274 napi_disable(&bnx2x_fp(bp, i, napi));
1275}
1276
1277void bnx2x_netif_start(struct bnx2x *bp)
1278{
1279 int intr_sem;
1280
1281 intr_sem = atomic_dec_and_test(&bp->intr_sem);
1282 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1283
1284 if (intr_sem) {
1285 if (netif_running(bp->dev)) {
1286 bnx2x_napi_enable(bp);
1287 bnx2x_int_enable(bp);
1288 if (bp->state == BNX2X_STATE_OPEN)
1289 netif_tx_wake_all_queues(bp->dev);
1290 }
1291 }
1292}
1293
1294void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1295{
1296 bnx2x_int_disable_sync(bp, disable_hw);
1297 bnx2x_napi_disable(bp);
1298 netif_tx_disable(bp->dev);
1299}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001300
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001301u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1302{
1303#ifdef BCM_CNIC
1304 struct bnx2x *bp = netdev_priv(dev);
1305 if (NO_FCOE(bp))
1306 return skb_tx_hash(dev, skb);
1307 else {
1308 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1309 u16 ether_type = ntohs(hdr->h_proto);
1310
1311 /* Skip VLAN tag if present */
1312 if (ether_type == ETH_P_8021Q) {
1313 struct vlan_ethhdr *vhdr =
1314 (struct vlan_ethhdr *)skb->data;
1315
1316 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1317 }
1318
1319 /* If ethertype is FCoE or FIP - use FCoE ring */
1320 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1321 return bnx2x_fcoe(bp, index);
1322 }
1323#endif
1324 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1325 */
1326 return __skb_tx_hash(dev, skb,
1327 dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1328}
1329
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001330void bnx2x_set_num_queues(struct bnx2x *bp)
1331{
1332 switch (bp->multi_mode) {
1333 case ETH_RSS_MODE_DISABLED:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001334 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001335 break;
1336 case ETH_RSS_MODE_REGULAR:
1337 bp->num_queues = bnx2x_calc_num_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001338 break;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001339
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001340 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001341 bp->num_queues = 1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001342 break;
1343 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001344
1345 /* Add special queues */
1346 bp->num_queues += NONE_ETH_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001347}
1348
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001349#ifdef BCM_CNIC
1350static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1351{
1352 if (!NO_FCOE(bp)) {
1353 if (!IS_MF_SD(bp))
1354 bnx2x_set_fip_eth_mac_addr(bp, 1);
1355 bnx2x_set_all_enode_macs(bp, 1);
1356 bp->flags |= FCOE_MACS_SET;
1357 }
1358}
1359#endif
1360
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001361static void bnx2x_release_firmware(struct bnx2x *bp)
1362{
1363 kfree(bp->init_ops_offsets);
1364 kfree(bp->init_ops);
1365 kfree(bp->init_data);
1366 release_firmware(bp->firmware);
1367}
1368
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001369static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1370{
1371 int rc, num = bp->num_queues;
1372
1373#ifdef BCM_CNIC
1374 if (NO_FCOE(bp))
1375 num -= FCOE_CONTEXT_USE;
1376
1377#endif
1378 netif_set_real_num_tx_queues(bp->dev, num);
1379 rc = netif_set_real_num_rx_queues(bp->dev, num);
1380 return rc;
1381}
1382
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001383static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1384{
1385 int i;
1386
1387 for_each_queue(bp, i) {
1388 struct bnx2x_fastpath *fp = &bp->fp[i];
1389
1390 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1391 if (IS_FCOE_IDX(i))
1392 /*
1393 * Although there are no IP frames expected to arrive to
1394 * this ring we still want to add an
1395 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1396 * overrun attack.
1397 */
1398 fp->rx_buf_size =
1399 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1400 BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1401 else
1402 fp->rx_buf_size =
1403 bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
1404 IP_HEADER_ALIGNMENT_PADDING;
1405 }
1406}
1407
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001408/* must be called with rtnl_lock */
1409int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1410{
1411 u32 load_code;
1412 int i, rc;
1413
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001414 /* Set init arrays */
1415 rc = bnx2x_init_firmware(bp);
1416 if (rc) {
1417 BNX2X_ERR("Error loading firmware\n");
1418 return rc;
1419 }
1420
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001421#ifdef BNX2X_STOP_ON_ERROR
1422 if (unlikely(bp->panic))
1423 return -EPERM;
1424#endif
1425
1426 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1427
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001428 /* Set the initial link reported state to link down */
1429 bnx2x_acquire_phy_lock(bp);
1430 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1431 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1432 &bp->last_reported_link.link_report_flags);
1433 bnx2x_release_phy_lock(bp);
1434
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001435 /* must be called before memory allocation and HW init */
1436 bnx2x_ilt_set_info(bp);
1437
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001438 /* Set the receive queues buffer size */
1439 bnx2x_set_rx_buf_size(bp);
1440
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001441 if (bnx2x_alloc_mem(bp))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001442 return -ENOMEM;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001443
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001444 rc = bnx2x_set_real_num_queues(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001445 if (rc) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001446 BNX2X_ERR("Unable to set real_num_queues\n");
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001447 goto load_error0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001448 }
1449
1450 for_each_queue(bp, i)
1451 bnx2x_fp(bp, i, disable_tpa) =
1452 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1453
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001454#ifdef BCM_CNIC
1455 /* We don't want TPA on FCoE L2 ring */
1456 bnx2x_fcoe(bp, disable_tpa) = 1;
1457#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001458 bnx2x_napi_enable(bp);
1459
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001460 /* Send LOAD_REQUEST command to MCP
1461 Returns the type of LOAD command:
1462 if it is the first port to be initialized
1463 common blocks should be initialized, otherwise - not
1464 */
1465 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001466 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001467 if (!load_code) {
1468 BNX2X_ERR("MCP response failure, aborting\n");
1469 rc = -EBUSY;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001470 goto load_error1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001471 }
1472 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1473 rc = -EBUSY; /* other port in diagnostic mode */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001474 goto load_error1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001475 }
1476
1477 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001478 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001479 int port = BP_PORT(bp);
1480
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001481 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1482 path, load_count[path][0], load_count[path][1],
1483 load_count[path][2]);
1484 load_count[path][0]++;
1485 load_count[path][1 + port]++;
1486 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1487 path, load_count[path][0], load_count[path][1],
1488 load_count[path][2]);
1489 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001490 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001491 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001492 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1493 else
1494 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1495 }
1496
1497 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001498 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001499 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1500 bp->port.pmf = 1;
1501 else
1502 bp->port.pmf = 0;
1503 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1504
1505 /* Initialize HW */
1506 rc = bnx2x_init_hw(bp, load_code);
1507 if (rc) {
1508 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001509 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001510 goto load_error2;
1511 }
1512
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001513 /* Connect to IRQs */
1514 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001515 if (rc) {
1516 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1517 goto load_error2;
1518 }
1519
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001520 /* Setup NIC internals and enable interrupts */
1521 bnx2x_nic_init(bp, load_code);
1522
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001523 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1524 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001525 (bp->common.shmem2_base))
1526 SHMEM2_WR(bp, dcc_support,
1527 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1528 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1529
1530 /* Send LOAD_DONE command to MCP */
1531 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001532 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001533 if (!load_code) {
1534 BNX2X_ERR("MCP response failure, aborting\n");
1535 rc = -EBUSY;
1536 goto load_error3;
1537 }
1538 }
1539
Vladislav Zolotarove4901dd2010-12-13 05:44:18 +00001540 bnx2x_dcbx_init(bp);
1541
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001542 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1543
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001544 rc = bnx2x_func_start(bp);
1545 if (rc) {
1546 BNX2X_ERR("Function start failed!\n");
1547#ifndef BNX2X_STOP_ON_ERROR
1548 goto load_error3;
1549#else
1550 bp->panic = 1;
1551 return -EBUSY;
1552#endif
1553 }
1554
1555 rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001556 if (rc) {
1557 BNX2X_ERR("Setup leading failed!\n");
1558#ifndef BNX2X_STOP_ON_ERROR
1559 goto load_error3;
1560#else
1561 bp->panic = 1;
1562 return -EBUSY;
1563#endif
1564 }
1565
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001566 if (!CHIP_IS_E1(bp) &&
1567 (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1568 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1569 bp->flags |= MF_FUNC_DIS;
1570 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001571
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001572#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001573 /* Enable Timer scan */
1574 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001575#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001576
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001577 for_each_nondefault_queue(bp, i) {
1578 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1579 if (rc)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001580#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001581 goto load_error4;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001582#else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001583 goto load_error3;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001584#endif
1585 }
1586
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001587 /* Now when Clients are configured we are ready to work */
1588 bp->state = BNX2X_STATE_OPEN;
1589
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001590#ifdef BCM_CNIC
1591 bnx2x_set_fcoe_eth_macs(bp);
1592#endif
1593
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001594 bnx2x_set_eth_mac(bp, 1);
1595
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08001596 /* Clear MC configuration */
1597 if (CHIP_IS_E1(bp))
1598 bnx2x_invalidate_e1_mc_list(bp);
1599 else
1600 bnx2x_invalidate_e1h_mc_list(bp);
1601
1602 /* Clear UC lists configuration */
1603 bnx2x_invalidate_uc_list(bp);
1604
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001605 if (bp->pending_max) {
1606 bnx2x_update_max_mf_config(bp, bp->pending_max);
1607 bp->pending_max = 0;
1608 }
1609
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001610 if (bp->port.pmf)
1611 bnx2x_initial_phy_init(bp, load_mode);
1612
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08001613 /* Initialize Rx filtering */
1614 bnx2x_set_rx_mode(bp->dev);
1615
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001616 /* Start fast path */
1617 switch (load_mode) {
1618 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001619 /* Tx queue should be only reenabled */
1620 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001621 /* Initialize the receive filter. */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001622 break;
1623
1624 case LOAD_OPEN:
1625 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001626 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001627 break;
1628
1629 case LOAD_DIAG:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001630 bp->state = BNX2X_STATE_DIAG;
1631 break;
1632
1633 default:
1634 break;
1635 }
1636
1637 if (!bp->port.pmf)
1638 bnx2x__link_status_update(bp);
1639
1640 /* start the timer */
1641 mod_timer(&bp->timer, jiffies + bp->current_interval);
1642
1643#ifdef BCM_CNIC
1644 bnx2x_setup_cnic_irq_info(bp);
1645 if (bp->state == BNX2X_STATE_OPEN)
1646 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1647#endif
1648 bnx2x_inc_load_cnt(bp);
1649
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001650 bnx2x_release_firmware(bp);
1651
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001652 return 0;
1653
1654#ifdef BCM_CNIC
1655load_error4:
1656 /* Disable Timer scan */
1657 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1658#endif
1659load_error3:
1660 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001661
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001662 /* Free SKBs, SGEs, TPA pool and driver internals */
1663 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001664 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001665 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001666
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001667 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001668 bnx2x_free_irq(bp);
1669load_error2:
1670 if (!BP_NOMCP(bp)) {
1671 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1672 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1673 }
1674
1675 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001676load_error1:
1677 bnx2x_napi_disable(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001678load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001679 bnx2x_free_mem(bp);
1680
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001681 bnx2x_release_firmware(bp);
1682
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001683 return rc;
1684}
1685
1686/* must be called with rtnl_lock */
1687int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1688{
1689 int i;
1690
1691 if (bp->state == BNX2X_STATE_CLOSED) {
1692 /* Interface has been removed - nothing to recover */
1693 bp->recovery_state = BNX2X_RECOVERY_DONE;
1694 bp->is_leader = 0;
1695 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1696 smp_wmb();
1697
1698 return -EINVAL;
1699 }
1700
1701#ifdef BCM_CNIC
1702 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1703#endif
1704 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1705
1706 /* Set "drop all" */
1707 bp->rx_mode = BNX2X_RX_MODE_NONE;
1708 bnx2x_set_storm_rx_mode(bp);
1709
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001710 /* Stop Tx */
1711 bnx2x_tx_disable(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001712
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001713 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001714
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001715 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001716 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001717
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001718 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001719
1720 /* Cleanup the chip if needed */
1721 if (unload_mode != UNLOAD_RECOVERY)
1722 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001723 else {
1724 /* Disable HW interrupts, NAPI and Tx */
1725 bnx2x_netif_stop(bp, 1);
1726
1727 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001728 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001729 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001730
1731 bp->port.pmf = 0;
1732
1733 /* Free SKBs, SGEs, TPA pool and driver internals */
1734 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001735 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001736 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001737
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001738 bnx2x_free_mem(bp);
1739
1740 bp->state = BNX2X_STATE_CLOSED;
1741
1742 /* The last driver must disable a "close the gate" if there is no
1743 * parity attention or "process kill" pending.
1744 */
1745 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1746 bnx2x_reset_is_done(bp))
1747 bnx2x_disable_close_the_gate(bp);
1748
1749 /* Reset MCP mail box sequence if there is on going recovery */
1750 if (unload_mode == UNLOAD_RECOVERY)
1751 bp->fw_seq = 0;
1752
1753 return 0;
1754}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001755
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001756int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1757{
1758 u16 pmcsr;
1759
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00001760 /* If there is no power capability, silently succeed */
1761 if (!bp->pm_cap) {
1762 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1763 return 0;
1764 }
1765
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001766 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1767
1768 switch (state) {
1769 case PCI_D0:
1770 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1771 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1772 PCI_PM_CTRL_PME_STATUS));
1773
1774 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1775 /* delay required during transition out of D3hot */
1776 msleep(20);
1777 break;
1778
1779 case PCI_D3hot:
1780 /* If there are other clients above don't
1781 shut down the power */
1782 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1783 return 0;
1784 /* Don't shut down the power for emulation and FPGA */
1785 if (CHIP_REV_IS_SLOW(bp))
1786 return 0;
1787
1788 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1789 pmcsr |= 3;
1790
1791 if (bp->wol)
1792 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1793
1794 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1795 pmcsr);
1796
1797 /* No more memory access after this point until
1798 * device is brought back to D0.
1799 */
1800 break;
1801
1802 default:
1803 return -EINVAL;
1804 }
1805 return 0;
1806}
1807
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001808/*
1809 * net_device service functions
1810 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001811int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001812{
1813 int work_done = 0;
1814 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1815 napi);
1816 struct bnx2x *bp = fp->bp;
1817
1818 while (1) {
1819#ifdef BNX2X_STOP_ON_ERROR
1820 if (unlikely(bp->panic)) {
1821 napi_complete(napi);
1822 return 0;
1823 }
1824#endif
1825
1826 if (bnx2x_has_tx_work(fp))
1827 bnx2x_tx_int(fp);
1828
1829 if (bnx2x_has_rx_work(fp)) {
1830 work_done += bnx2x_rx_int(fp, budget - work_done);
1831
1832 /* must not complete if we consumed full budget */
1833 if (work_done >= budget)
1834 break;
1835 }
1836
1837 /* Fall out from the NAPI loop if needed */
1838 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001839#ifdef BCM_CNIC
1840 /* No need to update SB for FCoE L2 ring as long as
1841 * it's connected to the default SB and the SB
1842 * has been updated when NAPI was scheduled.
1843 */
1844 if (IS_FCOE_FP(fp)) {
1845 napi_complete(napi);
1846 break;
1847 }
1848#endif
1849
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001850 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001851 /* bnx2x_has_rx_work() reads the status block,
1852 * thus we need to ensure that status block indices
1853 * have been actually read (bnx2x_update_fpsb_idx)
1854 * prior to this check (bnx2x_has_rx_work) so that
1855 * we won't write the "newer" value of the status block
1856 * to IGU (if there was a DMA right after
1857 * bnx2x_has_rx_work and if there is no rmb, the memory
1858 * reading (bnx2x_update_fpsb_idx) may be postponed
1859 * to right before bnx2x_ack_sb). In this case there
1860 * will never be another interrupt until there is
1861 * another update of the status block, while there
1862 * is still unhandled work.
1863 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001864 rmb();
1865
1866 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1867 napi_complete(napi);
1868 /* Re-enable interrupts */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001869 DP(NETIF_MSG_HW,
1870 "Update index to %d\n", fp->fp_hc_idx);
1871 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1872 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001873 IGU_INT_ENABLE, 1);
1874 break;
1875 }
1876 }
1877 }
1878
1879 return work_done;
1880}
1881
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001882/* we split the first BD into headers and data BDs
1883 * to ease the pain of our fellow microcode engineers
1884 * we use one mapping for both BDs
1885 * So far this has only been observed to happen
1886 * in Other Operating Systems(TM)
1887 */
1888static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1889 struct bnx2x_fastpath *fp,
1890 struct sw_tx_bd *tx_buf,
1891 struct eth_tx_start_bd **tx_bd, u16 hlen,
1892 u16 bd_prod, int nbd)
1893{
1894 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1895 struct eth_tx_bd *d_tx_bd;
1896 dma_addr_t mapping;
1897 int old_len = le16_to_cpu(h_tx_bd->nbytes);
1898
1899 /* first fix first BD */
1900 h_tx_bd->nbd = cpu_to_le16(nbd);
1901 h_tx_bd->nbytes = cpu_to_le16(hlen);
1902
1903 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1904 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1905 h_tx_bd->addr_lo, h_tx_bd->nbd);
1906
1907 /* now get a new data BD
1908 * (after the pbd) and fill it */
1909 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1910 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1911
1912 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1913 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1914
1915 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1916 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1917 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1918
1919 /* this marks the BD as one that has no individual mapping */
1920 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1921
1922 DP(NETIF_MSG_TX_QUEUED,
1923 "TSO split data size is %d (%x:%x)\n",
1924 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1925
1926 /* update tx_bd */
1927 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1928
1929 return bd_prod;
1930}
1931
1932static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1933{
1934 if (fix > 0)
1935 csum = (u16) ~csum_fold(csum_sub(csum,
1936 csum_partial(t_header - fix, fix, 0)));
1937
1938 else if (fix < 0)
1939 csum = (u16) ~csum_fold(csum_add(csum,
1940 csum_partial(t_header, -fix, 0)));
1941
1942 return swab16(csum);
1943}
1944
1945static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1946{
1947 u32 rc;
1948
1949 if (skb->ip_summed != CHECKSUM_PARTIAL)
1950 rc = XMIT_PLAIN;
1951
1952 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00001953 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001954 rc = XMIT_CSUM_V6;
1955 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1956 rc |= XMIT_CSUM_TCP;
1957
1958 } else {
1959 rc = XMIT_CSUM_V4;
1960 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1961 rc |= XMIT_CSUM_TCP;
1962 }
1963 }
1964
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00001965 if (skb_is_gso_v6(skb))
1966 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1967 else if (skb_is_gso(skb))
1968 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001969
1970 return rc;
1971}
1972
1973#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
1974/* check if packet requires linearization (packet is too fragmented)
1975 no need to check fragmentation if page size > 8K (there will be no
1976 violation to FW restrictions) */
1977static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
1978 u32 xmit_type)
1979{
1980 int to_copy = 0;
1981 int hlen = 0;
1982 int first_bd_sz = 0;
1983
1984 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
1985 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
1986
1987 if (xmit_type & XMIT_GSO) {
1988 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
1989 /* Check if LSO packet needs to be copied:
1990 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
1991 int wnd_size = MAX_FETCH_BD - 3;
1992 /* Number of windows to check */
1993 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
1994 int wnd_idx = 0;
1995 int frag_idx = 0;
1996 u32 wnd_sum = 0;
1997
1998 /* Headers length */
1999 hlen = (int)(skb_transport_header(skb) - skb->data) +
2000 tcp_hdrlen(skb);
2001
2002 /* Amount of data (w/o headers) on linear part of SKB*/
2003 first_bd_sz = skb_headlen(skb) - hlen;
2004
2005 wnd_sum = first_bd_sz;
2006
2007 /* Calculate the first sum - it's special */
2008 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2009 wnd_sum +=
2010 skb_shinfo(skb)->frags[frag_idx].size;
2011
2012 /* If there was data on linear skb data - check it */
2013 if (first_bd_sz > 0) {
2014 if (unlikely(wnd_sum < lso_mss)) {
2015 to_copy = 1;
2016 goto exit_lbl;
2017 }
2018
2019 wnd_sum -= first_bd_sz;
2020 }
2021
2022 /* Others are easier: run through the frag list and
2023 check all windows */
2024 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2025 wnd_sum +=
2026 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2027
2028 if (unlikely(wnd_sum < lso_mss)) {
2029 to_copy = 1;
2030 break;
2031 }
2032 wnd_sum -=
2033 skb_shinfo(skb)->frags[wnd_idx].size;
2034 }
2035 } else {
2036 /* in non-LSO too fragmented packet should always
2037 be linearized */
2038 to_copy = 1;
2039 }
2040 }
2041
2042exit_lbl:
2043 if (unlikely(to_copy))
2044 DP(NETIF_MSG_TX_QUEUED,
2045 "Linearization IS REQUIRED for %s packet. "
2046 "num_frags %d hlen %d first_bd_sz %d\n",
2047 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2048 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2049
2050 return to_copy;
2051}
2052#endif
2053
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002054static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2055 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002056{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002057 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2058 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2059 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002060 if ((xmit_type & XMIT_GSO_V6) &&
2061 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002062 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002063}
2064
2065/**
2066 * Update PBD in GSO case.
2067 *
2068 * @param skb
2069 * @param tx_start_bd
2070 * @param pbd
2071 * @param xmit_type
2072 */
2073static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2074 struct eth_tx_parse_bd_e1x *pbd,
2075 u32 xmit_type)
2076{
2077 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2078 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2079 pbd->tcp_flags = pbd_tcp_flags(skb);
2080
2081 if (xmit_type & XMIT_GSO_V4) {
2082 pbd->ip_id = swab16(ip_hdr(skb)->id);
2083 pbd->tcp_pseudo_csum =
2084 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2085 ip_hdr(skb)->daddr,
2086 0, IPPROTO_TCP, 0));
2087
2088 } else
2089 pbd->tcp_pseudo_csum =
2090 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2091 &ipv6_hdr(skb)->daddr,
2092 0, IPPROTO_TCP, 0));
2093
2094 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2095}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002096
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002097/**
2098 *
2099 * @param skb
2100 * @param tx_start_bd
2101 * @param pbd_e2
2102 * @param xmit_type
2103 *
2104 * @return header len
2105 */
2106static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002107 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002108{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002109 *parsing_data |=
2110 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2111 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2112 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002113
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002114 if (xmit_type & XMIT_CSUM_TCP) {
2115 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2116 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2117 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002118
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002119 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2120 } else
2121 /* We support checksum offload for TCP and UDP only.
2122 * No need to pass the UDP header length - it's a constant.
2123 */
2124 return skb_transport_header(skb) +
2125 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002126}
2127
2128/**
2129 *
2130 * @param skb
2131 * @param tx_start_bd
2132 * @param pbd
2133 * @param xmit_type
2134 *
2135 * @return Header length
2136 */
2137static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2138 struct eth_tx_parse_bd_e1x *pbd,
2139 u32 xmit_type)
2140{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002141 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002142
2143 /* for now NS flag is not used in Linux */
2144 pbd->global_data =
2145 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2146 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2147
2148 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002149 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002150
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002151 hlen += pbd->ip_hlen_w;
2152
2153 /* We support checksum offload for TCP and UDP only */
2154 if (xmit_type & XMIT_CSUM_TCP)
2155 hlen += tcp_hdrlen(skb) / 2;
2156 else
2157 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002158
2159 pbd->total_hlen_w = cpu_to_le16(hlen);
2160 hlen = hlen*2;
2161
2162 if (xmit_type & XMIT_CSUM_TCP) {
2163 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2164
2165 } else {
2166 s8 fix = SKB_CS_OFF(skb); /* signed! */
2167
2168 DP(NETIF_MSG_TX_QUEUED,
2169 "hlen %d fix %d csum before fix %x\n",
2170 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2171
2172 /* HW bug: fixup the CSUM */
2173 pbd->tcp_pseudo_csum =
2174 bnx2x_csum_fix(skb_transport_header(skb),
2175 SKB_CS(skb), fix);
2176
2177 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2178 pbd->tcp_pseudo_csum);
2179 }
2180
2181 return hlen;
2182}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002183
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002184/* called with netif_tx_lock
2185 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2186 * netif_wake_queue()
2187 */
2188netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2189{
2190 struct bnx2x *bp = netdev_priv(dev);
2191 struct bnx2x_fastpath *fp;
2192 struct netdev_queue *txq;
2193 struct sw_tx_bd *tx_buf;
2194 struct eth_tx_start_bd *tx_start_bd;
2195 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002196 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002197 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002198 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002199 u16 pkt_prod, bd_prod;
2200 int nbd, fp_index;
2201 dma_addr_t mapping;
2202 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2203 int i;
2204 u8 hlen = 0;
2205 __le16 pkt_size = 0;
2206 struct ethhdr *eth;
2207 u8 mac_type = UNICAST_ADDRESS;
2208
2209#ifdef BNX2X_STOP_ON_ERROR
2210 if (unlikely(bp->panic))
2211 return NETDEV_TX_BUSY;
2212#endif
2213
2214 fp_index = skb_get_queue_mapping(skb);
2215 txq = netdev_get_tx_queue(dev, fp_index);
2216
2217 fp = &bp->fp[fp_index];
2218
2219 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2220 fp->eth_q_stats.driver_xoff++;
2221 netif_tx_stop_queue(txq);
2222 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2223 return NETDEV_TX_BUSY;
2224 }
2225
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002226 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2227 "protocol(%x,%x) gso type %x xmit_type %x\n",
2228 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002229 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2230
2231 eth = (struct ethhdr *)skb->data;
2232
2233 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2234 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2235 if (is_broadcast_ether_addr(eth->h_dest))
2236 mac_type = BROADCAST_ADDRESS;
2237 else
2238 mac_type = MULTICAST_ADDRESS;
2239 }
2240
2241#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2242 /* First, check if we need to linearize the skb (due to FW
2243 restrictions). No need to check fragmentation if page size > 8K
2244 (there will be no violation to FW restrictions) */
2245 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2246 /* Statistics of linearization */
2247 bp->lin_cnt++;
2248 if (skb_linearize(skb) != 0) {
2249 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2250 "silently dropping this SKB\n");
2251 dev_kfree_skb_any(skb);
2252 return NETDEV_TX_OK;
2253 }
2254 }
2255#endif
2256
2257 /*
2258 Please read carefully. First we use one BD which we mark as start,
2259 then we have a parsing info BD (used for TSO or xsum),
2260 and only then we have the rest of the TSO BDs.
2261 (don't forget to mark the last one as last,
2262 and to unmap only AFTER you write to the BD ...)
2263 And above all, all pdb sizes are in words - NOT DWORDS!
2264 */
2265
2266 pkt_prod = fp->tx_pkt_prod++;
2267 bd_prod = TX_BD(fp->tx_bd_prod);
2268
2269 /* get a tx_buf and first BD */
2270 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2271 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2272
2273 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002274 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2275 mac_type);
2276
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002277 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002278 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002279
2280 /* remember the first BD of the packet */
2281 tx_buf->first_bd = fp->tx_bd_prod;
2282 tx_buf->skb = skb;
2283 tx_buf->flags = 0;
2284
2285 DP(NETIF_MSG_TX_QUEUED,
2286 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2287 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2288
Jesse Grosseab6d182010-10-20 13:56:03 +00002289 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002290 tx_start_bd->vlan_or_ethertype =
2291 cpu_to_le16(vlan_tx_tag_get(skb));
2292 tx_start_bd->bd_flags.as_bitfield |=
2293 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002294 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002295 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002296
2297 /* turn on parsing and get a BD */
2298 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002299
2300 if (xmit_type & XMIT_CSUM) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002301 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2302
2303 if (xmit_type & XMIT_CSUM_V4)
2304 tx_start_bd->bd_flags.as_bitfield |=
2305 ETH_TX_BD_FLAGS_IP_CSUM;
2306 else
2307 tx_start_bd->bd_flags.as_bitfield |=
2308 ETH_TX_BD_FLAGS_IPV6;
2309
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002310 if (!(xmit_type & XMIT_CSUM_TCP))
2311 tx_start_bd->bd_flags.as_bitfield |=
2312 ETH_TX_BD_FLAGS_IS_UDP;
2313 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002314
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002315 if (CHIP_IS_E2(bp)) {
2316 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2317 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2318 /* Set PBD in checksum offload case */
2319 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002320 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2321 &pbd_e2_parsing_data,
2322 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002323 } else {
2324 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2325 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2326 /* Set PBD in checksum offload case */
2327 if (xmit_type & XMIT_CSUM)
2328 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002329
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002330 }
2331
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002332 /* Map skb linear data for DMA */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002333 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2334 skb_headlen(skb), DMA_TO_DEVICE);
2335
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002336 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002337 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2338 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2339 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2340 tx_start_bd->nbd = cpu_to_le16(nbd);
2341 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2342 pkt_size = tx_start_bd->nbytes;
2343
2344 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2345 " nbytes %d flags %x vlan %x\n",
2346 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2347 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002348 tx_start_bd->bd_flags.as_bitfield,
2349 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002350
2351 if (xmit_type & XMIT_GSO) {
2352
2353 DP(NETIF_MSG_TX_QUEUED,
2354 "TSO packet len %d hlen %d total len %d tso size %d\n",
2355 skb->len, hlen, skb_headlen(skb),
2356 skb_shinfo(skb)->gso_size);
2357
2358 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2359
2360 if (unlikely(skb_headlen(skb) > hlen))
2361 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2362 hlen, bd_prod, ++nbd);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002363 if (CHIP_IS_E2(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002364 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2365 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002366 else
2367 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002368 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002369
2370 /* Set the PBD's parsing_data field if not zero
2371 * (for the chips newer than 57711).
2372 */
2373 if (pbd_e2_parsing_data)
2374 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2375
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002376 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2377
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002378 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002379 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2380 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2381
2382 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2383 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2384 if (total_pkt_bd == NULL)
2385 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2386
2387 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2388 frag->page_offset,
2389 frag->size, DMA_TO_DEVICE);
2390
2391 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2392 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2393 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2394 le16_add_cpu(&pkt_size, frag->size);
2395
2396 DP(NETIF_MSG_TX_QUEUED,
2397 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2398 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2399 le16_to_cpu(tx_data_bd->nbytes));
2400 }
2401
2402 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2403
2404 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2405
2406 /* now send a tx doorbell, counting the next BD
2407 * if the packet contains or ends with it
2408 */
2409 if (TX_BD_POFF(bd_prod) < nbd)
2410 nbd++;
2411
2412 if (total_pkt_bd != NULL)
2413 total_pkt_bd->total_pkt_bytes = pkt_size;
2414
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002415 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002416 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002417 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002418 " tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002419 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2420 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2421 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2422 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002423 if (pbd_e2)
2424 DP(NETIF_MSG_TX_QUEUED,
2425 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2426 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2427 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2428 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2429 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002430 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2431
2432 /*
2433 * Make sure that the BD data is updated before updating the producer
2434 * since FW might read the BD right after the producer is updated.
2435 * This is only applicable for weak-ordered memory model archs such
2436 * as IA-64. The following barrier is also mandatory since FW will
2437 * assumes packets must have BDs.
2438 */
2439 wmb();
2440
2441 fp->tx_db.data.prod += nbd;
2442 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002443
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002444 DOORBELL(bp, fp->cid, fp->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002445
2446 mmiowb();
2447
2448 fp->tx_bd_prod += nbd;
2449
2450 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2451 netif_tx_stop_queue(txq);
2452
2453 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2454 * ordering of set_bit() in netif_tx_stop_queue() and read of
2455 * fp->bd_tx_cons */
2456 smp_mb();
2457
2458 fp->eth_q_stats.driver_xoff++;
2459 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2460 netif_tx_wake_queue(txq);
2461 }
2462 fp->tx_pkt++;
2463
2464 return NETDEV_TX_OK;
2465}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002466
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002467/* called with rtnl_lock */
2468int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2469{
2470 struct sockaddr *addr = p;
2471 struct bnx2x *bp = netdev_priv(dev);
2472
2473 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2474 return -EINVAL;
2475
2476 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002477 if (netif_running(dev))
2478 bnx2x_set_eth_mac(bp, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002479
2480 return 0;
2481}
2482
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002483
stephen hemminger8d962862010-10-21 07:50:56 +00002484static int bnx2x_setup_irqs(struct bnx2x *bp)
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002485{
2486 int rc = 0;
2487 if (bp->flags & USING_MSIX_FLAG) {
2488 rc = bnx2x_req_msix_irqs(bp);
2489 if (rc)
2490 return rc;
2491 } else {
2492 bnx2x_ack_int(bp);
2493 rc = bnx2x_req_irq(bp);
2494 if (rc) {
2495 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
2496 return rc;
2497 }
2498 if (bp->flags & USING_MSI_FLAG) {
2499 bp->dev->irq = bp->pdev->irq;
2500 netdev_info(bp->dev, "using MSI IRQ %d\n",
2501 bp->pdev->irq);
2502 }
2503 }
2504
2505 return 0;
2506}
2507
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002508void bnx2x_free_mem_bp(struct bnx2x *bp)
2509{
2510 kfree(bp->fp);
2511 kfree(bp->msix_table);
2512 kfree(bp->ilt);
2513}
2514
2515int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2516{
2517 struct bnx2x_fastpath *fp;
2518 struct msix_entry *tbl;
2519 struct bnx2x_ilt *ilt;
2520
2521 /* fp array */
2522 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2523 if (!fp)
2524 goto alloc_err;
2525 bp->fp = fp;
2526
2527 /* msix table */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002528 tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002529 GFP_KERNEL);
2530 if (!tbl)
2531 goto alloc_err;
2532 bp->msix_table = tbl;
2533
2534 /* ilt */
2535 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2536 if (!ilt)
2537 goto alloc_err;
2538 bp->ilt = ilt;
2539
2540 return 0;
2541alloc_err:
2542 bnx2x_free_mem_bp(bp);
2543 return -ENOMEM;
2544
2545}
2546
Michał Mirosław66371c42011-04-12 09:38:23 +00002547static int bnx2x_reload_if_running(struct net_device *dev)
2548{
2549 struct bnx2x *bp = netdev_priv(dev);
2550
2551 if (unlikely(!netif_running(dev)))
2552 return 0;
2553
2554 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2555 return bnx2x_nic_load(bp, LOAD_NORMAL);
2556}
2557
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002558/* called with rtnl_lock */
2559int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2560{
2561 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002562
2563 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2564 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2565 return -EAGAIN;
2566 }
2567
2568 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2569 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2570 return -EINVAL;
2571
2572 /* This does not race with packet allocation
2573 * because the actual alloc size is
2574 * only updated as part of load
2575 */
2576 dev->mtu = new_mtu;
2577
Michał Mirosław66371c42011-04-12 09:38:23 +00002578 return bnx2x_reload_if_running(dev);
2579}
2580
2581u32 bnx2x_fix_features(struct net_device *dev, u32 features)
2582{
2583 struct bnx2x *bp = netdev_priv(dev);
2584
2585 /* TPA requires Rx CSUM offloading */
2586 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
2587 features &= ~NETIF_F_LRO;
2588
2589 return features;
2590}
2591
2592int bnx2x_set_features(struct net_device *dev, u32 features)
2593{
2594 struct bnx2x *bp = netdev_priv(dev);
2595 u32 flags = bp->flags;
2596
2597 if (features & NETIF_F_LRO)
2598 flags |= TPA_ENABLE_FLAG;
2599 else
2600 flags &= ~TPA_ENABLE_FLAG;
2601
2602 if (flags ^ bp->flags) {
2603 bp->flags = flags;
2604
2605 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
2606 return bnx2x_reload_if_running(dev);
2607 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002608 }
2609
Michał Mirosław66371c42011-04-12 09:38:23 +00002610 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002611}
2612
2613void bnx2x_tx_timeout(struct net_device *dev)
2614{
2615 struct bnx2x *bp = netdev_priv(dev);
2616
2617#ifdef BNX2X_STOP_ON_ERROR
2618 if (!bp->panic)
2619 bnx2x_panic();
2620#endif
2621 /* This allows the netif to be shutdown gracefully before resetting */
2622 schedule_delayed_work(&bp->reset_task, 0);
2623}
2624
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002625int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2626{
2627 struct net_device *dev = pci_get_drvdata(pdev);
2628 struct bnx2x *bp;
2629
2630 if (!dev) {
2631 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2632 return -ENODEV;
2633 }
2634 bp = netdev_priv(dev);
2635
2636 rtnl_lock();
2637
2638 pci_save_state(pdev);
2639
2640 if (!netif_running(dev)) {
2641 rtnl_unlock();
2642 return 0;
2643 }
2644
2645 netif_device_detach(dev);
2646
2647 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2648
2649 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2650
2651 rtnl_unlock();
2652
2653 return 0;
2654}
2655
2656int bnx2x_resume(struct pci_dev *pdev)
2657{
2658 struct net_device *dev = pci_get_drvdata(pdev);
2659 struct bnx2x *bp;
2660 int rc;
2661
2662 if (!dev) {
2663 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2664 return -ENODEV;
2665 }
2666 bp = netdev_priv(dev);
2667
2668 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2669 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2670 return -EAGAIN;
2671 }
2672
2673 rtnl_lock();
2674
2675 pci_restore_state(pdev);
2676
2677 if (!netif_running(dev)) {
2678 rtnl_unlock();
2679 return 0;
2680 }
2681
2682 bnx2x_set_power_state(bp, PCI_D0);
2683 netif_device_attach(dev);
2684
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002685 /* Since the chip was reset, clear the FW sequence number */
2686 bp->fw_seq = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002687 rc = bnx2x_nic_load(bp, LOAD_OPEN);
2688
2689 rtnl_unlock();
2690
2691 return rc;
2692}