blob: ebd8b1cdd58cc56c87db8432a4739d853debaf25 [file] [log] [blame]
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001/* bnx2x_cmn.c: Broadcom Everest network driver.
2 *
Dmitry Kravkov5de92402011-05-04 23:51:13 +00003 * Copyright (c) 2007-2011 Broadcom Corporation
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
15 *
16 */
17
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000018#include <linux/etherdevice.h>
Hao Zheng9bcc0892010-10-20 13:56:11 +000019#include <linux/if_vlan.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000020#include <linux/interrupt.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000021#include <linux/ip.h>
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000022#include <net/ipv6.h>
Stephen Rothwell7f3e01f2010-07-28 22:20:34 -070023#include <net/ip6_checksum.h>
Dmitry Kravkov6891dd22010-08-03 21:49:40 +000024#include <linux/firmware.h>
Paul Gortmakerc0cba592011-05-22 11:02:08 +000025#include <linux/prefetch.h>
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000026#include "bnx2x_cmn.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000027#include "bnx2x_init.h"
Vladislav Zolotarov042181f2011-06-14 01:33:39 +000028#include "bnx2x_sp.h"
Dmitry Kravkov523224a2010-10-06 03:23:26 +000029
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030030
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000031
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +000032/**
33 * bnx2x_bz_fp - zero content of the fastpath structure.
34 *
35 * @bp: driver handle
36 * @index: fastpath index to be zeroed
37 *
38 * Makes sure the contents of the bp->fp[index].napi is kept
39 * intact.
40 */
41static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
42{
43 struct bnx2x_fastpath *fp = &bp->fp[index];
44 struct napi_struct orig_napi = fp->napi;
45 /* bzero bnx2x_fastpath contents */
46 memset(fp, 0, sizeof(*fp));
47
48 /* Restore the NAPI object as it has been already initialized */
49 fp->napi = orig_napi;
50}
51
52/**
53 * bnx2x_move_fp - move content of the fastpath structure.
54 *
55 * @bp: driver handle
56 * @from: source FP index
57 * @to: destination FP index
58 *
59 * Makes sure the contents of the bp->fp[to].napi is kept
60 * intact.
61 */
62static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
63{
64 struct bnx2x_fastpath *from_fp = &bp->fp[from];
65 struct bnx2x_fastpath *to_fp = &bp->fp[to];
66 struct napi_struct orig_napi = to_fp->napi;
67 /* Move bnx2x_fastpath contents */
68 memcpy(to_fp, from_fp, sizeof(*to_fp));
69 to_fp->index = to;
70
71 /* Restore the NAPI object as it has been already initialized */
72 to_fp->napi = orig_napi;
73}
74
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030075int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
76
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000077/* free skb in the packet ring at pos idx
78 * return idx of last bd freed
79 */
80static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
81 u16 idx)
82{
83 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
84 struct eth_tx_start_bd *tx_start_bd;
85 struct eth_tx_bd *tx_data_bd;
86 struct sk_buff *skb = tx_buf->skb;
87 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
88 int nbd;
89
90 /* prefetch skb end pointer to speedup dev_kfree_skb() */
91 prefetch(&skb->end);
92
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +030093 DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
94 fp->index, idx, tx_buf, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000095
96 /* unmap first bd */
97 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
98 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
99 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
Dmitry Kravkov4bca60f2010-10-06 03:30:27 +0000100 BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000101
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300102
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000103 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
104#ifdef BNX2X_STOP_ON_ERROR
105 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
106 BNX2X_ERR("BAD nbd!\n");
107 bnx2x_panic();
108 }
109#endif
110 new_cons = nbd + tx_buf->first_bd;
111
112 /* Get the next bd */
113 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
114
115 /* Skip a parse bd... */
116 --nbd;
117 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
118
119 /* ...and the TSO split header bd since they have no mapping */
120 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
121 --nbd;
122 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
123 }
124
125 /* now free frags */
126 while (nbd > 0) {
127
128 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
129 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
130 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
131 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
132 if (--nbd)
133 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
134 }
135
136 /* release skb */
137 WARN_ON(!skb);
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000138 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000139 tx_buf->first_bd = 0;
140 tx_buf->skb = NULL;
141
142 return new_cons;
143}
144
145int bnx2x_tx_int(struct bnx2x_fastpath *fp)
146{
147 struct bnx2x *bp = fp->bp;
148 struct netdev_queue *txq;
149 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
150
151#ifdef BNX2X_STOP_ON_ERROR
152 if (unlikely(bp->panic))
153 return -1;
154#endif
155
156 txq = netdev_get_tx_queue(bp->dev, fp->index);
157 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
158 sw_cons = fp->tx_pkt_cons;
159
160 while (sw_cons != hw_cons) {
161 u16 pkt_cons;
162
163 pkt_cons = TX_BD(sw_cons);
164
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000165 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
166 " pkt_cons %u\n",
167 fp->index, hw_cons, sw_cons, pkt_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000168
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000169 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
170 sw_cons++;
171 }
172
173 fp->tx_pkt_cons = sw_cons;
174 fp->tx_bd_cons = bd_cons;
175
176 /* Need to make the tx_bd_cons update visible to start_xmit()
177 * before checking for netif_tx_queue_stopped(). Without the
178 * memory barrier, there is a small possibility that
179 * start_xmit() will miss it and cause the queue to be stopped
180 * forever.
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300181 * On the other hand we need an rmb() here to ensure the proper
182 * ordering of bit testing in the following
183 * netif_tx_queue_stopped(txq) call.
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000184 */
185 smp_mb();
186
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000187 if (unlikely(netif_tx_queue_stopped(txq))) {
188 /* Taking tx_lock() is needed to prevent reenabling the queue
189 * while it's empty. This could have happen if rx_action() gets
190 * suspended in bnx2x_tx_int() after the condition before
191 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
192 *
193 * stops the queue->sees fresh tx_bd_cons->releases the queue->
194 * sends some packets consuming the whole queue again->
195 * stops the queue
196 */
197
198 __netif_tx_lock(txq, smp_processor_id());
199
200 if ((netif_tx_queue_stopped(txq)) &&
201 (bp->state == BNX2X_STATE_OPEN) &&
202 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
203 netif_tx_wake_queue(txq);
204
205 __netif_tx_unlock(txq);
206 }
207 return 0;
208}
209
210static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
211 u16 idx)
212{
213 u16 last_max = fp->last_max_sge;
214
215 if (SUB_S16(idx, last_max) > 0)
216 fp->last_max_sge = idx;
217}
218
219static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
220 struct eth_fast_path_rx_cqe *fp_cqe)
221{
222 struct bnx2x *bp = fp->bp;
223 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
224 le16_to_cpu(fp_cqe->len_on_bd)) >>
225 SGE_PAGE_SHIFT;
226 u16 last_max, last_elem, first_elem;
227 u16 delta = 0;
228 u16 i;
229
230 if (!sge_len)
231 return;
232
233 /* First mark all used pages */
234 for (i = 0; i < sge_len; i++)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300235 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000236 RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000237
238 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000239 sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000240
241 /* Here we assume that the last SGE index is the biggest */
242 prefetch((void *)(fp->sge_mask));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000243 bnx2x_update_last_max_sge(fp,
244 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000245
246 last_max = RX_SGE(fp->last_max_sge);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300247 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
248 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000249
250 /* If ring is not full */
251 if (last_elem + 1 != first_elem)
252 last_elem++;
253
254 /* Now update the prod */
255 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
256 if (likely(fp->sge_mask[i]))
257 break;
258
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300259 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
260 delta += BIT_VEC64_ELEM_SZ;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000261 }
262
263 if (delta > 0) {
264 fp->rx_sge_prod += delta;
265 /* clear page-end entries */
266 bnx2x_clear_sge_mask_next_elems(fp);
267 }
268
269 DP(NETIF_MSG_RX_STATUS,
270 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
271 fp->last_max_sge, fp->rx_sge_prod);
272}
273
274static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300275 struct sk_buff *skb, u16 cons, u16 prod,
276 struct eth_fast_path_rx_cqe *cqe)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000277{
278 struct bnx2x *bp = fp->bp;
279 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
280 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
281 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
282 dma_addr_t mapping;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300283 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
284 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000285
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300286 /* print error if current state != stop */
287 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000288 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
289
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300290 /* Try to map an empty skb from the aggregation info */
291 mapping = dma_map_single(&bp->pdev->dev,
292 first_buf->skb->data,
293 fp->rx_buf_size, DMA_FROM_DEVICE);
294 /*
295 * ...if it fails - move the skb from the consumer to the producer
296 * and set the current aggregation state as ERROR to drop it
297 * when TPA_STOP arrives.
298 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000299
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300300 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
301 /* Move the BD from the consumer to the producer */
302 bnx2x_reuse_rx_skb(fp, cons, prod);
303 tpa_info->tpa_state = BNX2X_TPA_ERROR;
304 return;
305 }
306
307 /* move empty skb from pool to prod */
308 prod_rx_buf->skb = first_buf->skb;
309 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000310 /* point prod_bd to new skb */
311 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
312 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
313
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300314 /* move partial skb from cons to pool (don't unmap yet) */
315 *first_buf = *cons_rx_buf;
316
317 /* mark bin state as START */
318 tpa_info->parsing_flags =
319 le16_to_cpu(cqe->pars_flags.flags);
320 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
321 tpa_info->tpa_state = BNX2X_TPA_START;
322 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
323 tpa_info->placement_offset = cqe->placement_offset;
324
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000325#ifdef BNX2X_STOP_ON_ERROR
326 fp->tpa_queue_used |= (1 << queue);
327#ifdef _ASM_GENERIC_INT_L64_H
328 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
329#else
330 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
331#endif
332 fp->tpa_queue_used);
333#endif
334}
335
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000336/* Timestamp option length allowed for TPA aggregation:
337 *
338 * nop nop kind length echo val
339 */
340#define TPA_TSTAMP_OPT_LEN 12
341/**
Dmitry Kravkove8920672011-05-04 23:52:40 +0000342 * bnx2x_set_lro_mss - calculate the approximate value of the MSS
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000343 *
Dmitry Kravkove8920672011-05-04 23:52:40 +0000344 * @bp: driver handle
345 * @parsing_flags: parsing flags from the START CQE
346 * @len_on_bd: total length of the first packet for the
347 * aggregation.
348 *
349 * Approximate value of the MSS for this aggregation calculated using
350 * the first packet of it.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000351 */
352static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
353 u16 len_on_bd)
354{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300355 /*
356 * TPA arrgregation won't have either IP options or TCP options
357 * other than timestamp or IPv6 extension headers.
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000358 */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300359 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
360
361 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
362 PRS_FLAG_OVERETH_IPV6)
363 hdrs_len += sizeof(struct ipv6hdr);
364 else /* IPv4 */
365 hdrs_len += sizeof(struct iphdr);
Vladislav Zolotarove4e3c022011-02-28 03:37:10 +0000366
367
368 /* Check if there was a TCP timestamp, if there is it's will
369 * always be 12 bytes length: nop nop kind length echo val.
370 *
371 * Otherwise FW would close the aggregation.
372 */
373 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
374 hdrs_len += TPA_TSTAMP_OPT_LEN;
375
376 return len_on_bd - hdrs_len;
377}
378
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000379static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300380 u16 queue, struct sk_buff *skb,
381 struct eth_end_agg_rx_cqe *cqe,
382 u16 cqe_idx)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000383{
384 struct sw_rx_page *rx_pg, old_rx_pg;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000385 u32 i, frag_len, frag_size, pages;
386 int err;
387 int j;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300388 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
389 u16 len_on_bd = tpa_info->len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000390
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300391 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000392 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
393
394 /* This is needed in order to enable forwarding support */
395 if (frag_size)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300396 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
397 tpa_info->parsing_flags, len_on_bd);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000398
399#ifdef BNX2X_STOP_ON_ERROR
400 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
401 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
402 pages, cqe_idx);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300403 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000404 bnx2x_panic();
405 return -EINVAL;
406 }
407#endif
408
409 /* Run through the SGL and compose the fragmented skb */
410 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300411 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000412
413 /* FW gives the indices of the SGE as if the ring is an array
414 (meaning that "next" element will consume 2 indices) */
415 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
416 rx_pg = &fp->rx_page_ring[sge_idx];
417 old_rx_pg = *rx_pg;
418
419 /* If we fail to allocate a substitute page, we simply stop
420 where we are and drop the whole packet */
421 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
422 if (unlikely(err)) {
423 fp->eth_q_stats.rx_skb_alloc_failed++;
424 return err;
425 }
426
427 /* Unmap the page as we r going to pass it to the stack */
428 dma_unmap_page(&bp->pdev->dev,
429 dma_unmap_addr(&old_rx_pg, mapping),
430 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
431
432 /* Add one frag and update the appropriate fields in the skb */
433 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
434
435 skb->data_len += frag_len;
436 skb->truesize += frag_len;
437 skb->len += frag_len;
438
439 frag_size -= frag_len;
440 }
441
442 return 0;
443}
444
445static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300446 u16 queue, struct eth_end_agg_rx_cqe *cqe,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000447 u16 cqe_idx)
448{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300449 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
450 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
451 u8 pad = tpa_info->placement_offset;
452 u16 len = tpa_info->len_on_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000453 struct sk_buff *skb = rx_buf->skb;
454 /* alloc new skb */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300455 struct sk_buff *new_skb;
456 u8 old_tpa_state = tpa_info->tpa_state;
457
458 tpa_info->tpa_state = BNX2X_TPA_STOP;
459
460 /* If we there was an error during the handling of the TPA_START -
461 * drop this aggregation.
462 */
463 if (old_tpa_state == BNX2X_TPA_ERROR)
464 goto drop;
465
466 /* Try to allocate the new skb */
467 new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000468
469 /* Unmap skb in the pool anyway, as we are going to change
470 pool entry status to BNX2X_TPA_STOP even if new skb allocation
471 fails. */
472 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800473 fp->rx_buf_size, DMA_FROM_DEVICE);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000474
475 if (likely(new_skb)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000476 prefetch(skb);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000477 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000478
479#ifdef BNX2X_STOP_ON_ERROR
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800480 if (pad + len > fp->rx_buf_size) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000481 BNX2X_ERR("skb_put is about to fail... "
482 "pad %d len %d rx_buf_size %d\n",
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800483 pad, len, fp->rx_buf_size);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000484 bnx2x_panic();
485 return;
486 }
487#endif
488
489 skb_reserve(skb, pad);
490 skb_put(skb, len);
491
492 skb->protocol = eth_type_trans(skb, bp->dev);
493 skb->ip_summed = CHECKSUM_UNNECESSARY;
494
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300495 if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
496 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
497 __vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
Hao Zheng9bcc0892010-10-20 13:56:11 +0000498 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000499 } else {
500 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
501 " - dropping packet!\n");
Vladislav Zolotarov40955532011-05-22 10:06:58 +0000502 dev_kfree_skb_any(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000503 }
504
505
506 /* put new skb in bin */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300507 rx_buf->skb = new_skb;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000508
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300509 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000510 }
511
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300512drop:
513 /* drop the packet and keep the buffer in the bin */
514 DP(NETIF_MSG_RX_STATUS,
515 "Failed to allocate or map a new skb - dropping packet!\n");
516 fp->eth_q_stats.rx_skb_alloc_failed++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000517}
518
519/* Set Toeplitz hash value in the skb using the value from the
520 * CQE (calculated by HW).
521 */
522static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
523 struct sk_buff *skb)
524{
525 /* Set Toeplitz hash from CQE */
526 if ((bp->dev->features & NETIF_F_RXHASH) &&
527 (cqe->fast_path_cqe.status_flags &
528 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
529 skb->rxhash =
530 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
531}
532
533int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
534{
535 struct bnx2x *bp = fp->bp;
536 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
537 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
538 int rx_pkt = 0;
539
540#ifdef BNX2X_STOP_ON_ERROR
541 if (unlikely(bp->panic))
542 return 0;
543#endif
544
545 /* CQ "next element" is of the size of the regular element,
546 that's why it's ok here */
547 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
548 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
549 hw_comp_cons++;
550
551 bd_cons = fp->rx_bd_cons;
552 bd_prod = fp->rx_bd_prod;
553 bd_prod_fw = bd_prod;
554 sw_comp_cons = fp->rx_comp_cons;
555 sw_comp_prod = fp->rx_comp_prod;
556
557 /* Memory barrier necessary as speculative reads of the rx
558 * buffer can be ahead of the index in the status block
559 */
560 rmb();
561
562 DP(NETIF_MSG_RX_STATUS,
563 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
564 fp->index, hw_comp_cons, sw_comp_cons);
565
566 while (sw_comp_cons != hw_comp_cons) {
567 struct sw_rx_bd *rx_buf = NULL;
568 struct sk_buff *skb;
569 union eth_rx_cqe *cqe;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300570 struct eth_fast_path_rx_cqe *cqe_fp;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000571 u8 cqe_fp_flags;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300572 enum eth_rx_cqe_type cqe_fp_type;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000573 u16 len, pad;
574
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300575#ifdef BNX2X_STOP_ON_ERROR
576 if (unlikely(bp->panic))
577 return 0;
578#endif
579
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000580 comp_ring_cons = RCQ_BD(sw_comp_cons);
581 bd_prod = RX_BD(bd_prod);
582 bd_cons = RX_BD(bd_cons);
583
584 /* Prefetch the page containing the BD descriptor
585 at producer's index. It will be needed when new skb is
586 allocated */
587 prefetch((void *)(PAGE_ALIGN((unsigned long)
588 (&fp->rx_desc_ring[bd_prod])) -
589 PAGE_SIZE + 1));
590
591 cqe = &fp->rx_comp_ring[comp_ring_cons];
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300592 cqe_fp = &cqe->fast_path_cqe;
593 cqe_fp_flags = cqe_fp->type_error_flags;
594 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000595
596 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
597 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300598 cqe_fp_flags, cqe_fp->status_flags,
599 le32_to_cpu(cqe_fp->rss_hash_result),
600 le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000601
602 /* is this a slowpath msg? */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300603 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000604 bnx2x_sp_event(fp, cqe);
605 goto next_cqe;
606
607 /* this is an rx packet */
608 } else {
609 rx_buf = &fp->rx_buf_ring[bd_cons];
610 skb = rx_buf->skb;
611 prefetch(skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000612
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300613 if (!CQE_TYPE_FAST(cqe_fp_type)) {
614#ifdef BNX2X_STOP_ON_ERROR
615 /* sanity check */
616 if (fp->disable_tpa &&
617 (CQE_TYPE_START(cqe_fp_type) ||
618 CQE_TYPE_STOP(cqe_fp_type)))
619 BNX2X_ERR("START/STOP packet while "
620 "disable_tpa type %x\n",
621 CQE_TYPE(cqe_fp_type));
622#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000623
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300624 if (CQE_TYPE_START(cqe_fp_type)) {
625 u16 queue = cqe_fp->queue_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000626 DP(NETIF_MSG_RX_STATUS,
627 "calling tpa_start on queue %d\n",
628 queue);
629
630 bnx2x_tpa_start(fp, queue, skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300631 bd_cons, bd_prod,
632 cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000633
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300634 /* Set Toeplitz hash for LRO skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000635 bnx2x_set_skb_rxhash(bp, cqe, skb);
636
637 goto next_rx;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300638
639 } else {
640 u16 queue =
641 cqe->end_agg_cqe.queue_index;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000642 DP(NETIF_MSG_RX_STATUS,
643 "calling tpa_stop on queue %d\n",
644 queue);
645
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300646 bnx2x_tpa_stop(bp, fp, queue,
647 &cqe->end_agg_cqe,
648 comp_ring_cons);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000649#ifdef BNX2X_STOP_ON_ERROR
650 if (bp->panic)
651 return 0;
652#endif
653
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300654 bnx2x_update_sge_prod(fp, cqe_fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000655 goto next_cqe;
656 }
657 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300658 /* non TPA */
659 len = le16_to_cpu(cqe_fp->pkt_len);
660 pad = cqe_fp->placement_offset;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000661 dma_sync_single_for_device(&bp->pdev->dev,
662 dma_unmap_addr(rx_buf, mapping),
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300663 pad + RX_COPY_THRESH,
664 DMA_FROM_DEVICE);
Dmitry Kravkov217de5a2010-10-06 03:31:20 +0000665 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000666
667 /* is this an error packet? */
668 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
669 DP(NETIF_MSG_RX_ERR,
670 "ERROR flags %x rx packet %u\n",
671 cqe_fp_flags, sw_comp_cons);
672 fp->eth_q_stats.rx_err_discard_pkt++;
673 goto reuse_rx;
674 }
675
676 /* Since we don't have a jumbo ring
677 * copy small packets if mtu > 1500
678 */
679 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
680 (len <= RX_COPY_THRESH)) {
681 struct sk_buff *new_skb;
682
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300683 new_skb = netdev_alloc_skb(bp->dev, len + pad);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000684 if (new_skb == NULL) {
685 DP(NETIF_MSG_RX_ERR,
686 "ERROR packet dropped "
687 "because of alloc failure\n");
688 fp->eth_q_stats.rx_skb_alloc_failed++;
689 goto reuse_rx;
690 }
691
692 /* aligned copy */
693 skb_copy_from_linear_data_offset(skb, pad,
694 new_skb->data + pad, len);
695 skb_reserve(new_skb, pad);
696 skb_put(new_skb, len);
697
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000698 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000699
700 skb = new_skb;
701
702 } else
703 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
704 dma_unmap_single(&bp->pdev->dev,
705 dma_unmap_addr(rx_buf, mapping),
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800706 fp->rx_buf_size,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000707 DMA_FROM_DEVICE);
708 skb_reserve(skb, pad);
709 skb_put(skb, len);
710
711 } else {
712 DP(NETIF_MSG_RX_ERR,
713 "ERROR packet dropped because "
714 "of alloc failure\n");
715 fp->eth_q_stats.rx_skb_alloc_failed++;
716reuse_rx:
Dmitry Kravkov749a8502010-10-06 03:29:05 +0000717 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000718 goto next_rx;
719 }
720
721 skb->protocol = eth_type_trans(skb, bp->dev);
722
723 /* Set Toeplitz hash for a none-LRO skb */
724 bnx2x_set_skb_rxhash(bp, cqe, skb);
725
Eric Dumazetbc8acf22010-09-02 13:07:41 -0700726 skb_checksum_none_assert(skb);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +0000727
Michał Mirosław66371c42011-04-12 09:38:23 +0000728 if (bp->dev->features & NETIF_F_RXCSUM) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300729
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000730 if (likely(BNX2X_RX_CSUM_OK(cqe)))
731 skb->ip_summed = CHECKSUM_UNNECESSARY;
732 else
733 fp->eth_q_stats.hw_csum_err++;
734 }
735 }
736
737 skb_record_rx_queue(skb, fp->index);
738
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300739 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
740 PARSING_FLAGS_VLAN)
Hao Zheng9bcc0892010-10-20 13:56:11 +0000741 __vlan_hwaccel_put_tag(skb,
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300742 le16_to_cpu(cqe_fp->vlan_tag));
Hao Zheng9bcc0892010-10-20 13:56:11 +0000743 napi_gro_receive(&fp->napi, skb);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000744
745
746next_rx:
747 rx_buf->skb = NULL;
748
749 bd_cons = NEXT_RX_IDX(bd_cons);
750 bd_prod = NEXT_RX_IDX(bd_prod);
751 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
752 rx_pkt++;
753next_cqe:
754 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
755 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
756
757 if (rx_pkt == budget)
758 break;
759 } /* while */
760
761 fp->rx_bd_cons = bd_cons;
762 fp->rx_bd_prod = bd_prod_fw;
763 fp->rx_comp_cons = sw_comp_cons;
764 fp->rx_comp_prod = sw_comp_prod;
765
766 /* Update producers */
767 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
768 fp->rx_sge_prod);
769
770 fp->rx_pkt += rx_pkt;
771 fp->rx_calls++;
772
773 return rx_pkt;
774}
775
776static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
777{
778 struct bnx2x_fastpath *fp = fp_cookie;
779 struct bnx2x *bp = fp->bp;
780
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000781 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
782 "[fp %d fw_sd %d igusb %d]\n",
783 fp->index, fp->fw_sb_id, fp->igu_sb_id);
784 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000785
786#ifdef BNX2X_STOP_ON_ERROR
787 if (unlikely(bp->panic))
788 return IRQ_HANDLED;
789#endif
790
791 /* Handle Rx and Tx according to MSI-X vector */
792 prefetch(fp->rx_cons_sb);
793 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000794 prefetch(&fp->sb_running_index[SM_RX_ID]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000795 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
796
797 return IRQ_HANDLED;
798}
799
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000800/* HW Lock for shared dual port PHYs */
801void bnx2x_acquire_phy_lock(struct bnx2x *bp)
802{
803 mutex_lock(&bp->port.phy_mutex);
804
805 if (bp->port.need_hw_lock)
806 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
807}
808
809void bnx2x_release_phy_lock(struct bnx2x *bp)
810{
811 if (bp->port.need_hw_lock)
812 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
813
814 mutex_unlock(&bp->port.phy_mutex);
815}
816
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800817/* calculates MF speed according to current linespeed and MF configuration */
818u16 bnx2x_get_mf_speed(struct bnx2x *bp)
819{
820 u16 line_speed = bp->link_vars.line_speed;
821 if (IS_MF(bp)) {
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000822 u16 maxCfg = bnx2x_extract_max_cfg(bp,
823 bp->mf_config[BP_VN(bp)]);
824
825 /* Calculate the current MAX line speed limit for the MF
826 * devices
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800827 */
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000828 if (IS_MF_SI(bp))
829 line_speed = (line_speed * maxCfg) / 100;
830 else { /* SD mode */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800831 u16 vn_max_rate = maxCfg * 100;
832
833 if (vn_max_rate < line_speed)
834 line_speed = vn_max_rate;
Dmitry Kravkovfaa6fcb2011-02-28 03:37:20 +0000835 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -0800836 }
837
838 return line_speed;
839}
840
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000841/**
842 * bnx2x_fill_report_data - fill link report data to report
843 *
844 * @bp: driver handle
845 * @data: link state to update
846 *
847 * It uses a none-atomic bit operations because is called under the mutex.
848 */
849static inline void bnx2x_fill_report_data(struct bnx2x *bp,
850 struct bnx2x_link_report_data *data)
851{
852 u16 line_speed = bnx2x_get_mf_speed(bp);
853
854 memset(data, 0, sizeof(*data));
855
856 /* Fill the report data: efective line speed */
857 data->line_speed = line_speed;
858
859 /* Link is down */
860 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
861 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
862 &data->link_report_flags);
863
864 /* Full DUPLEX */
865 if (bp->link_vars.duplex == DUPLEX_FULL)
866 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
867
868 /* Rx Flow Control is ON */
869 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
870 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
871
872 /* Tx Flow Control is ON */
873 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
874 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
875}
876
877/**
878 * bnx2x_link_report - report link status to OS.
879 *
880 * @bp: driver handle
881 *
882 * Calls the __bnx2x_link_report() under the same locking scheme
883 * as a link/PHY state managing code to ensure a consistent link
884 * reporting.
885 */
886
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000887void bnx2x_link_report(struct bnx2x *bp)
888{
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000889 bnx2x_acquire_phy_lock(bp);
890 __bnx2x_link_report(bp);
891 bnx2x_release_phy_lock(bp);
892}
893
894/**
895 * __bnx2x_link_report - report link status to OS.
896 *
897 * @bp: driver handle
898 *
899 * None atomic inmlementation.
900 * Should be called under the phy_lock.
901 */
902void __bnx2x_link_report(struct bnx2x *bp)
903{
904 struct bnx2x_link_report_data cur_data;
905
906 /* reread mf_cfg */
907 if (!CHIP_IS_E1(bp))
908 bnx2x_read_mf_cfg(bp);
909
910 /* Read the current link report info */
911 bnx2x_fill_report_data(bp, &cur_data);
912
913 /* Don't report link down or exactly the same link status twice */
914 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
915 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
916 &bp->last_reported_link.link_report_flags) &&
917 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
918 &cur_data.link_report_flags)))
919 return;
920
921 bp->link_cnt++;
922
923 /* We are going to report a new link parameters now -
924 * remember the current data for the next time.
925 */
926 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
927
928 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
929 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000930 netif_carrier_off(bp->dev);
931 netdev_err(bp->dev, "NIC Link is Down\n");
932 return;
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000933 } else {
934 netif_carrier_on(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000935 netdev_info(bp->dev, "NIC Link is Up, ");
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000936 pr_cont("%d Mbps ", cur_data.line_speed);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000937
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000938 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
939 &cur_data.link_report_flags))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000940 pr_cont("full duplex");
941 else
942 pr_cont("half duplex");
943
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000944 /* Handle the FC at the end so that only these flags would be
945 * possibly set. This way we may easily check if there is no FC
946 * enabled.
947 */
948 if (cur_data.link_report_flags) {
949 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
950 &cur_data.link_report_flags)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000951 pr_cont(", receive ");
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +0000952 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
953 &cur_data.link_report_flags))
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000954 pr_cont("& transmit ");
955 } else {
956 pr_cont(", transmit ");
957 }
958 pr_cont("flow control ON");
959 }
960 pr_cont("\n");
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000961 }
962}
963
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000964void bnx2x_init_rx_rings(struct bnx2x *bp)
965{
966 int func = BP_FUNC(bp);
967 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300968 ETH_MAX_AGGREGATION_QUEUES_E1H_E2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000969 u16 ring_prod;
970 int i, j;
971
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +0000972 /* Allocate TPA resources */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000973 for_each_rx_queue(bp, j) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000974 struct bnx2x_fastpath *fp = &bp->fp[j];
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000975
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -0800976 DP(NETIF_MSG_IFUP,
977 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
978
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000979 if (!fp->disable_tpa) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300980 /* Fill the per-aggregtion pool */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000981 for (i = 0; i < max_agg_queues; i++) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300982 struct bnx2x_agg_info *tpa_info =
983 &fp->tpa_info[i];
984 struct sw_rx_bd *first_buf =
985 &tpa_info->first_buf;
986
987 first_buf->skb = netdev_alloc_skb(bp->dev,
988 fp->rx_buf_size);
989 if (!first_buf->skb) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000990 BNX2X_ERR("Failed to allocate TPA "
991 "skb pool for queue[%d] - "
992 "disabling TPA on this "
993 "queue!\n", j);
994 bnx2x_free_tpa_pool(bp, fp, i);
995 fp->disable_tpa = 1;
996 break;
997 }
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +0300998 dma_unmap_addr_set(first_buf, mapping, 0);
999 tpa_info->tpa_state = BNX2X_TPA_STOP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001000 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001001
1002 /* "next page" elements initialization */
1003 bnx2x_set_next_page_sgl(fp);
1004
1005 /* set SGEs bit mask */
1006 bnx2x_init_sge_ring_bit_mask(fp);
1007
1008 /* Allocate SGEs and initialize the ring elements */
1009 for (i = 0, ring_prod = 0;
1010 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1011
1012 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
1013 BNX2X_ERR("was only able to allocate "
1014 "%d rx sges\n", i);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001015 BNX2X_ERR("disabling TPA for "
1016 "queue[%d]\n", j);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001017 /* Cleanup already allocated elements */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001018 bnx2x_free_rx_sge_range(bp, fp,
1019 ring_prod);
1020 bnx2x_free_tpa_pool(bp, fp,
1021 max_agg_queues);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001022 fp->disable_tpa = 1;
1023 ring_prod = 0;
1024 break;
1025 }
1026 ring_prod = NEXT_SGE_IDX(ring_prod);
1027 }
1028
1029 fp->rx_sge_prod = ring_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001030 }
1031 }
1032
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001033 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001034 struct bnx2x_fastpath *fp = &bp->fp[j];
1035
1036 fp->rx_bd_cons = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001037
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001038 /* Activate BD ring */
1039 /* Warning!
1040 * this will generate an interrupt (to the TSTORM)
1041 * must only be done after chip is initialized
1042 */
1043 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1044 fp->rx_sge_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001045
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001046 if (j != 0)
1047 continue;
1048
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001049 if (CHIP_IS_E1(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001050 REG_WR(bp, BAR_USTRORM_INTMEM +
1051 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1052 U64_LO(fp->rx_comp_mapping));
1053 REG_WR(bp, BAR_USTRORM_INTMEM +
1054 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1055 U64_HI(fp->rx_comp_mapping));
1056 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001057 }
1058}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001059
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001060static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1061{
1062 int i;
1063
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001064 for_each_tx_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001065 struct bnx2x_fastpath *fp = &bp->fp[i];
1066
1067 u16 bd_cons = fp->tx_bd_cons;
1068 u16 sw_prod = fp->tx_pkt_prod;
1069 u16 sw_cons = fp->tx_pkt_cons;
1070
1071 while (sw_cons != sw_prod) {
1072 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
1073 sw_cons++;
1074 }
1075 }
1076}
1077
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001078static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1079{
1080 struct bnx2x *bp = fp->bp;
1081 int i;
1082
1083 /* ring wasn't allocated */
1084 if (fp->rx_buf_ring == NULL)
1085 return;
1086
1087 for (i = 0; i < NUM_RX_BD; i++) {
1088 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1089 struct sk_buff *skb = rx_buf->skb;
1090
1091 if (skb == NULL)
1092 continue;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001093 dma_unmap_single(&bp->pdev->dev,
1094 dma_unmap_addr(rx_buf, mapping),
1095 fp->rx_buf_size, DMA_FROM_DEVICE);
1096
1097 rx_buf->skb = NULL;
1098 dev_kfree_skb(skb);
1099 }
1100}
1101
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001102static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1103{
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001104 int j;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001105
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001106 for_each_rx_queue(bp, j) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001107 struct bnx2x_fastpath *fp = &bp->fp[j];
1108
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001109 bnx2x_free_rx_bds(fp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001110
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001111 if (!fp->disable_tpa)
1112 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1113 ETH_MAX_AGGREGATION_QUEUES_E1 :
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001114 ETH_MAX_AGGREGATION_QUEUES_E1H_E2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001115 }
1116}
1117
1118void bnx2x_free_skbs(struct bnx2x *bp)
1119{
1120 bnx2x_free_tx_skbs(bp);
1121 bnx2x_free_rx_skbs(bp);
1122}
1123
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001124void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1125{
1126 /* load old values */
1127 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1128
1129 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1130 /* leave all but MAX value */
1131 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1132
1133 /* set new MAX value */
1134 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1135 & FUNC_MF_CFG_MAX_BW_MASK;
1136
1137 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1138 }
1139}
1140
Dmitry Kravkovca924292011-06-14 01:33:08 +00001141/**
1142 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1143 *
1144 * @bp: driver handle
1145 * @nvecs: number of vectors to be released
1146 */
1147static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001148{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001149 int i, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001150
Dmitry Kravkovca924292011-06-14 01:33:08 +00001151 if (nvecs == offset)
1152 return;
1153 free_irq(bp->msix_table[offset].vector, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001154 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
Dmitry Kravkovca924292011-06-14 01:33:08 +00001155 bp->msix_table[offset].vector);
1156 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001157#ifdef BCM_CNIC
Dmitry Kravkovca924292011-06-14 01:33:08 +00001158 if (nvecs == offset)
1159 return;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001160 offset++;
1161#endif
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001162
Dmitry Kravkovca924292011-06-14 01:33:08 +00001163 for_each_eth_queue(bp, i) {
1164 if (nvecs == offset)
1165 return;
1166 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1167 "irq\n", i, bp->msix_table[offset].vector);
1168
1169 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001170 }
1171}
1172
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001173void bnx2x_free_irq(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001174{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001175 if (bp->flags & USING_MSIX_FLAG)
Dmitry Kravkovca924292011-06-14 01:33:08 +00001176 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1177 CNIC_CONTEXT_USE + 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001178 else if (bp->flags & USING_MSI_FLAG)
1179 free_irq(bp->pdev->irq, bp->dev);
1180 else
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001181 free_irq(bp->pdev->irq, bp->dev);
1182}
1183
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001184int bnx2x_enable_msix(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001185{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001186 int msix_vec = 0, i, rc, req_cnt;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001187
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001188 bp->msix_table[msix_vec].entry = msix_vec;
1189 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1190 bp->msix_table[0].entry);
1191 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001192
1193#ifdef BCM_CNIC
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001194 bp->msix_table[msix_vec].entry = msix_vec;
1195 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1196 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1197 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001198#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001199 for_each_eth_queue(bp, i) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001200 bp->msix_table[msix_vec].entry = msix_vec;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001201 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001202 "(fastpath #%u)\n", msix_vec, msix_vec, i);
1203 msix_vec++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001204 }
1205
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001206 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001207
1208 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001209
1210 /*
1211 * reconfigure number of tx/rx queues according to available
1212 * MSI-X vectors
1213 */
1214 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001215 /* how less vectors we will have? */
1216 int diff = req_cnt - rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001217
1218 DP(NETIF_MSG_IFUP,
1219 "Trying to use less MSI-X vectors: %d\n", rc);
1220
1221 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1222
1223 if (rc) {
1224 DP(NETIF_MSG_IFUP,
1225 "MSI-X is not attainable rc %d\n", rc);
1226 return rc;
1227 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001228 /*
1229 * decrease number of queues by number of unallocated entries
1230 */
1231 bp->num_queues -= diff;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001232
1233 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1234 bp->num_queues);
1235 } else if (rc) {
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001236 /* fall to INTx if not enough memory */
1237 if (rc == -ENOMEM)
1238 bp->flags |= DISABLE_MSI_FLAG;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001239 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
1240 return rc;
1241 }
1242
1243 bp->flags |= USING_MSIX_FLAG;
1244
1245 return 0;
1246}
1247
1248static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1249{
Dmitry Kravkovca924292011-06-14 01:33:08 +00001250 int i, rc, offset = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001251
Dmitry Kravkovca924292011-06-14 01:33:08 +00001252 rc = request_irq(bp->msix_table[offset++].vector,
1253 bnx2x_msix_sp_int, 0,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001254 bp->dev->name, bp->dev);
1255 if (rc) {
1256 BNX2X_ERR("request sp irq failed\n");
1257 return -EBUSY;
1258 }
1259
1260#ifdef BCM_CNIC
1261 offset++;
1262#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001263 for_each_eth_queue(bp, i) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001264 struct bnx2x_fastpath *fp = &bp->fp[i];
1265 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1266 bp->dev->name, i);
1267
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001268 rc = request_irq(bp->msix_table[offset].vector,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001269 bnx2x_msix_fp_int, 0, fp->name, fp);
1270 if (rc) {
Dmitry Kravkovca924292011-06-14 01:33:08 +00001271 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1272 bp->msix_table[offset].vector, rc);
1273 bnx2x_free_msix_irqs(bp, offset);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001274 return -EBUSY;
1275 }
1276
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001277 offset++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001278 }
1279
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001280 i = BNX2X_NUM_ETH_QUEUES(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001281 offset = 1 + CNIC_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001282 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1283 " ... fp[%d] %d\n",
1284 bp->msix_table[0].vector,
1285 0, bp->msix_table[offset].vector,
1286 i - 1, bp->msix_table[offset + i - 1].vector);
1287
1288 return 0;
1289}
1290
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001291int bnx2x_enable_msi(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001292{
1293 int rc;
1294
1295 rc = pci_enable_msi(bp->pdev);
1296 if (rc) {
1297 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1298 return -1;
1299 }
1300 bp->flags |= USING_MSI_FLAG;
1301
1302 return 0;
1303}
1304
1305static int bnx2x_req_irq(struct bnx2x *bp)
1306{
1307 unsigned long flags;
1308 int rc;
1309
1310 if (bp->flags & USING_MSI_FLAG)
1311 flags = 0;
1312 else
1313 flags = IRQF_SHARED;
1314
1315 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1316 bp->dev->name, bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001317 return rc;
1318}
1319
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001320static inline int bnx2x_setup_irqs(struct bnx2x *bp)
1321{
1322 int rc = 0;
1323 if (bp->flags & USING_MSIX_FLAG) {
1324 rc = bnx2x_req_msix_irqs(bp);
1325 if (rc)
1326 return rc;
1327 } else {
1328 bnx2x_ack_int(bp);
1329 rc = bnx2x_req_irq(bp);
1330 if (rc) {
1331 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1332 return rc;
1333 }
1334 if (bp->flags & USING_MSI_FLAG) {
1335 bp->dev->irq = bp->pdev->irq;
1336 netdev_info(bp->dev, "using MSI IRQ %d\n",
1337 bp->pdev->irq);
1338 }
1339 }
1340
1341 return 0;
1342}
1343
1344static inline void bnx2x_napi_enable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001345{
1346 int i;
1347
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001348 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001349 napi_enable(&bnx2x_fp(bp, i, napi));
1350}
1351
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001352static inline void bnx2x_napi_disable(struct bnx2x *bp)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001353{
1354 int i;
1355
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001356 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001357 napi_disable(&bnx2x_fp(bp, i, napi));
1358}
1359
1360void bnx2x_netif_start(struct bnx2x *bp)
1361{
Dmitry Kravkov4b7ed892011-06-14 01:32:53 +00001362 if (netif_running(bp->dev)) {
1363 bnx2x_napi_enable(bp);
1364 bnx2x_int_enable(bp);
1365 if (bp->state == BNX2X_STATE_OPEN)
1366 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001367 }
1368}
1369
1370void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1371{
1372 bnx2x_int_disable_sync(bp, disable_hw);
1373 bnx2x_napi_disable(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001374}
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001375
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00001376u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1377{
1378#ifdef BCM_CNIC
1379 struct bnx2x *bp = netdev_priv(dev);
1380 if (NO_FCOE(bp))
1381 return skb_tx_hash(dev, skb);
1382 else {
1383 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1384 u16 ether_type = ntohs(hdr->h_proto);
1385
1386 /* Skip VLAN tag if present */
1387 if (ether_type == ETH_P_8021Q) {
1388 struct vlan_ethhdr *vhdr =
1389 (struct vlan_ethhdr *)skb->data;
1390
1391 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1392 }
1393
1394 /* If ethertype is FCoE or FIP - use FCoE ring */
1395 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1396 return bnx2x_fcoe(bp, index);
1397 }
1398#endif
1399 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1400 */
1401 return __skb_tx_hash(dev, skb,
1402 dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1403}
1404
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001405void bnx2x_set_num_queues(struct bnx2x *bp)
1406{
1407 switch (bp->multi_mode) {
1408 case ETH_RSS_MODE_DISABLED:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001409 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001410 break;
1411 case ETH_RSS_MODE_REGULAR:
1412 bp->num_queues = bnx2x_calc_num_queues(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001413 break;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001414
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001415 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001416 bp->num_queues = 1;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001417 break;
1418 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001419
1420 /* Add special queues */
1421 bp->num_queues += NONE_ETH_CONTEXT_USE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001422}
1423
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001424static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1425{
1426 int rc, num = bp->num_queues;
1427
1428#ifdef BCM_CNIC
1429 if (NO_FCOE(bp))
1430 num -= FCOE_CONTEXT_USE;
1431
1432#endif
1433 netif_set_real_num_tx_queues(bp->dev, num);
1434 rc = netif_set_real_num_rx_queues(bp->dev, num);
1435 return rc;
1436}
1437
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001438static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1439{
1440 int i;
1441
1442 for_each_queue(bp, i) {
1443 struct bnx2x_fastpath *fp = &bp->fp[i];
1444
1445 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1446 if (IS_FCOE_IDX(i))
1447 /*
1448 * Although there are no IP frames expected to arrive to
1449 * this ring we still want to add an
1450 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1451 * overrun attack.
1452 */
1453 fp->rx_buf_size =
1454 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001455 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001456 else
1457 fp->rx_buf_size =
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001458 bp->dev->mtu + ETH_OVREHEAD +
1459 BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001460 }
1461}
1462
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001463static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
1464{
1465 int i;
1466 u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
1467 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
1468
1469 /*
1470 * Prepare the inital contents fo the indirection table if RSS is
1471 * enabled
1472 */
1473 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1474 for (i = 0; i < sizeof(ind_table); i++)
1475 ind_table[i] =
1476 bp->fp->cl_id + (i % num_eth_queues);
1477 }
1478
1479 /*
1480 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
1481 * per-port, so if explicit configuration is needed , do it only
1482 * for a PMF.
1483 *
1484 * For 57712 and newer on the other hand it's a per-function
1485 * configuration.
1486 */
1487 return bnx2x_config_rss_pf(bp, ind_table,
1488 bp->port.pmf || !CHIP_IS_E1x(bp));
1489}
1490
1491int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
1492{
1493 struct bnx2x_config_rss_params params = {0};
1494 int i;
1495
1496 /* Although RSS is meaningless when there is a single HW queue we
1497 * still need it enabled in order to have HW Rx hash generated.
1498 *
1499 * if (!is_eth_multi(bp))
1500 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
1501 */
1502
1503 params.rss_obj = &bp->rss_conf_obj;
1504
1505 __set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
1506
1507 /* RSS mode */
1508 switch (bp->multi_mode) {
1509 case ETH_RSS_MODE_DISABLED:
1510 __set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
1511 break;
1512 case ETH_RSS_MODE_REGULAR:
1513 __set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
1514 break;
1515 case ETH_RSS_MODE_VLAN_PRI:
1516 __set_bit(BNX2X_RSS_MODE_VLAN_PRI, &params.rss_flags);
1517 break;
1518 case ETH_RSS_MODE_E1HOV_PRI:
1519 __set_bit(BNX2X_RSS_MODE_E1HOV_PRI, &params.rss_flags);
1520 break;
1521 case ETH_RSS_MODE_IP_DSCP:
1522 __set_bit(BNX2X_RSS_MODE_IP_DSCP, &params.rss_flags);
1523 break;
1524 default:
1525 BNX2X_ERR("Unknown multi_mode: %d\n", bp->multi_mode);
1526 return -EINVAL;
1527 }
1528
1529 /* If RSS is enabled */
1530 if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
1531 /* RSS configuration */
1532 __set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
1533 __set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
1534 __set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
1535 __set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
1536
1537 /* Hash bits */
1538 params.rss_result_mask = MULTI_MASK;
1539
1540 memcpy(params.ind_table, ind_table, sizeof(params.ind_table));
1541
1542 if (config_hash) {
1543 /* RSS keys */
1544 for (i = 0; i < sizeof(params.rss_key) / 4; i++)
1545 params.rss_key[i] = random32();
1546
1547 __set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
1548 }
1549 }
1550
1551 return bnx2x_config_rss(bp, &params);
1552}
1553
1554static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
1555{
1556 struct bnx2x_func_state_params func_params = {0};
1557
1558 /* Prepare parameters for function state transitions */
1559 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
1560
1561 func_params.f_obj = &bp->func_obj;
1562 func_params.cmd = BNX2X_F_CMD_HW_INIT;
1563
1564 func_params.params.hw_init.load_phase = load_code;
1565
1566 return bnx2x_func_state_change(bp, &func_params);
1567}
1568
1569/*
1570 * Cleans the object that have internal lists without sending
1571 * ramrods. Should be run when interrutps are disabled.
1572 */
1573static void bnx2x_squeeze_objects(struct bnx2x *bp)
1574{
1575 int rc;
1576 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
1577 struct bnx2x_mcast_ramrod_params rparam = {0};
1578 struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
1579
1580 /***************** Cleanup MACs' object first *************************/
1581
1582 /* Wait for completion of requested */
1583 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
1584 /* Perform a dry cleanup */
1585 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
1586
1587 /* Clean ETH primary MAC */
1588 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
1589 rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags,
1590 &ramrod_flags);
1591 if (rc != 0)
1592 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
1593
1594 /* Cleanup UC list */
1595 vlan_mac_flags = 0;
1596 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
1597 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
1598 &ramrod_flags);
1599 if (rc != 0)
1600 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
1601
1602 /***************** Now clean mcast object *****************************/
1603 rparam.mcast_obj = &bp->mcast_obj;
1604 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
1605
1606 /* Add a DEL command... */
1607 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
1608 if (rc < 0)
1609 BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
1610 "object: %d\n", rc);
1611
1612 /* ...and wait until all pending commands are cleared */
1613 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1614 while (rc != 0) {
1615 if (rc < 0) {
1616 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
1617 rc);
1618 return;
1619 }
1620
1621 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
1622 }
1623}
1624
1625#ifndef BNX2X_STOP_ON_ERROR
1626#define LOAD_ERROR_EXIT(bp, label) \
1627 do { \
1628 (bp)->state = BNX2X_STATE_ERROR; \
1629 goto label; \
1630 } while (0)
1631#else
1632#define LOAD_ERROR_EXIT(bp, label) \
1633 do { \
1634 (bp)->state = BNX2X_STATE_ERROR; \
1635 (bp)->panic = 1; \
1636 return -EBUSY; \
1637 } while (0)
1638#endif
1639
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001640/* must be called with rtnl_lock */
1641int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1642{
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001643 int port = BP_PORT(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001644 u32 load_code;
1645 int i, rc;
1646
1647#ifdef BNX2X_STOP_ON_ERROR
1648 if (unlikely(bp->panic))
1649 return -EPERM;
1650#endif
1651
1652 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1653
Vladislav Zolotarov2ae17f62011-05-04 23:48:23 +00001654 /* Set the initial link reported state to link down */
1655 bnx2x_acquire_phy_lock(bp);
1656 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1657 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1658 &bp->last_reported_link.link_report_flags);
1659 bnx2x_release_phy_lock(bp);
1660
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001661 /* must be called before memory allocation and HW init */
1662 bnx2x_ilt_set_info(bp);
1663
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001664 /* zero fastpath structures preserving invariants like napi which are
1665 * allocated only once
1666 */
1667 for_each_queue(bp, i)
1668 bnx2x_bz_fp(bp, i);
1669
Vladislav Zolotarova8c94b92011-02-06 11:21:02 -08001670 /* Set the receive queues buffer size */
1671 bnx2x_set_rx_buf_size(bp);
1672
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001673 /*
1674 * set the tpa flag for each queue. The tpa flag determines the queue
1675 * minimal size so it must be set prior to queue memory allocation
1676 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001677 for_each_queue(bp, i)
1678 bnx2x_fp(bp, i, disable_tpa) =
1679 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1680
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001681#ifdef BCM_CNIC
1682 /* We don't want TPA on FCoE L2 ring */
1683 bnx2x_fcoe(bp, disable_tpa) = 1;
1684#endif
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001685
1686 if (bnx2x_alloc_mem(bp))
1687 return -ENOMEM;
1688
1689 /* As long as bnx2x_alloc_mem() may possibly update
1690 * bp->num_queues, bnx2x_set_real_num_queues() should always
1691 * come after it.
1692 */
1693 rc = bnx2x_set_real_num_queues(bp);
1694 if (rc) {
1695 BNX2X_ERR("Unable to set real_num_queues\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001696 LOAD_ERROR_EXIT(bp, load_error0);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00001697 }
1698
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001699 bnx2x_napi_enable(bp);
1700
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001701 /* Send LOAD_REQUEST command to MCP
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001702 * Returns the type of LOAD command:
1703 * if it is the first port to be initialized
1704 * common blocks should be initialized, otherwise - not
1705 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001706 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001707 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001708 if (!load_code) {
1709 BNX2X_ERR("MCP response failure, aborting\n");
1710 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001711 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001712 }
1713 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1714 rc = -EBUSY; /* other port in diagnostic mode */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001715 LOAD_ERROR_EXIT(bp, load_error1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001716 }
1717
1718 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001719 int path = BP_PATH(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001720
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001721 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
1722 path, load_count[path][0], load_count[path][1],
1723 load_count[path][2]);
1724 load_count[path][0]++;
1725 load_count[path][1 + port]++;
1726 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
1727 path, load_count[path][0], load_count[path][1],
1728 load_count[path][2]);
1729 if (load_count[path][0] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001730 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001731 else if (load_count[path][1 + port] == 1)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001732 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1733 else
1734 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1735 }
1736
1737 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001738 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001739 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1740 bp->port.pmf = 1;
1741 else
1742 bp->port.pmf = 0;
1743 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1744
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001745 /* Init Function state controlling object */
1746 bnx2x__init_func_obj(bp);
1747
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001748 /* Initialize HW */
1749 rc = bnx2x_init_hw(bp, load_code);
1750 if (rc) {
1751 BNX2X_ERR("HW init failed, aborting\n");
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001752 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001753 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001754 }
1755
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001756 /* Connect to IRQs */
1757 rc = bnx2x_setup_irqs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001758 if (rc) {
1759 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001760 LOAD_ERROR_EXIT(bp, load_error2);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001761 }
1762
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001763 /* Setup NIC internals and enable interrupts */
1764 bnx2x_nic_init(bp, load_code);
1765
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001766 /* Init per-function objects */
1767 bnx2x_init_bp_objs(bp);
1768
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001769 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1770 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001771 (bp->common.shmem2_base)) {
1772 if (SHMEM2_HAS(bp, dcc_support))
1773 SHMEM2_WR(bp, dcc_support,
1774 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1775 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1776 }
1777
1778 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1779 rc = bnx2x_func_start(bp);
1780 if (rc) {
1781 BNX2X_ERR("Function start failed!\n");
1782 LOAD_ERROR_EXIT(bp, load_error3);
1783 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001784
1785 /* Send LOAD_DONE command to MCP */
1786 if (!BP_NOMCP(bp)) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001787 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001788 if (!load_code) {
1789 BNX2X_ERR("MCP response failure, aborting\n");
1790 rc = -EBUSY;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001791 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001792 }
1793 }
1794
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001795 rc = bnx2x_setup_leading(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001796 if (rc) {
1797 BNX2X_ERR("Setup leading failed!\n");
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001798 LOAD_ERROR_EXIT(bp, load_error3);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001799 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001800
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001801#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001802 /* Enable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001803 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001804#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001805
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001806 for_each_nondefault_queue(bp, i) {
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001807 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001808 if (rc)
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001809 LOAD_ERROR_EXIT(bp, load_error4);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001810 }
1811
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001812 rc = bnx2x_init_rss_pf(bp);
1813 if (rc)
1814 LOAD_ERROR_EXIT(bp, load_error4);
1815
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001816 /* Now when Clients are configured we are ready to work */
1817 bp->state = BNX2X_STATE_OPEN;
1818
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001819 /* Configure a ucast MAC */
1820 rc = bnx2x_set_eth_mac(bp, true);
1821 if (rc)
1822 LOAD_ERROR_EXIT(bp, load_error4);
Vladislav Zolotarov6e30dd42011-02-06 11:25:41 -08001823
Dmitry Kravkove3835b92011-03-06 10:50:44 +00001824 if (bp->pending_max) {
1825 bnx2x_update_max_mf_config(bp, bp->pending_max);
1826 bp->pending_max = 0;
1827 }
1828
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001829 if (bp->port.pmf)
1830 bnx2x_initial_phy_init(bp, load_mode);
1831
1832 /* Start fast path */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001833
1834 /* Initialize Rx filter. */
1835 netif_addr_lock_bh(bp->dev);
1836 bnx2x_set_rx_mode(bp->dev);
1837 netif_addr_unlock_bh(bp->dev);
1838
1839 /* Start the Tx */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001840 switch (load_mode) {
1841 case LOAD_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001842 /* Tx queue should be only reenabled */
1843 netif_tx_wake_all_queues(bp->dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001844 break;
1845
1846 case LOAD_OPEN:
1847 netif_tx_start_all_queues(bp->dev);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001848 smp_mb__after_clear_bit();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001849 break;
1850
1851 case LOAD_DIAG:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001852 bp->state = BNX2X_STATE_DIAG;
1853 break;
1854
1855 default:
1856 break;
1857 }
1858
1859 if (!bp->port.pmf)
1860 bnx2x__link_status_update(bp);
1861
1862 /* start the timer */
1863 mod_timer(&bp->timer, jiffies + bp->current_interval);
1864
1865#ifdef BCM_CNIC
1866 bnx2x_setup_cnic_irq_info(bp);
1867 if (bp->state == BNX2X_STATE_OPEN)
1868 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1869#endif
1870 bnx2x_inc_load_cnt(bp);
1871
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001872 /* Wait for all pending SP commands to complete */
1873 if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
1874 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
1875 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
1876 return -EBUSY;
1877 }
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00001878
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001879 bnx2x_dcbx_init(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001880 return 0;
1881
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001882#ifndef BNX2X_STOP_ON_ERROR
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001883load_error4:
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001884#ifdef BCM_CNIC
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001885 /* Disable Timer scan */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001886 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001887#endif
1888load_error3:
1889 bnx2x_int_disable_sync(bp, 1);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001890
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001891 /* Clean queueable objects */
1892 bnx2x_squeeze_objects(bp);
1893
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001894 /* Free SKBs, SGEs, TPA pool and driver internals */
1895 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001896 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001897 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001898
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001899 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001900 bnx2x_free_irq(bp);
1901load_error2:
1902 if (!BP_NOMCP(bp)) {
1903 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1904 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1905 }
1906
1907 bp->port.pmf = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001908load_error1:
1909 bnx2x_napi_disable(bp);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001910load_error0:
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001911 bnx2x_free_mem(bp);
1912
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001913 return rc;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001914#endif /* ! BNX2X_STOP_ON_ERROR */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001915}
1916
1917/* must be called with rtnl_lock */
1918int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1919{
1920 int i;
1921
1922 if (bp->state == BNX2X_STATE_CLOSED) {
1923 /* Interface has been removed - nothing to recover */
1924 bp->recovery_state = BNX2X_RECOVERY_DONE;
1925 bp->is_leader = 0;
1926 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1927 smp_wmb();
1928
1929 return -EINVAL;
1930 }
1931
1932#ifdef BCM_CNIC
1933 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1934#endif
1935 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001936 smp_mb();
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001937
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001938 bp->rx_mode = BNX2X_RX_MODE_NONE;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001939
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001940 /* Stop Tx */
1941 bnx2x_tx_disable(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001942
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001943 del_timer_sync(&bp->timer);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001944
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001945 /* Set ALWAYS_ALIVE bit in shmem */
1946 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
1947
1948 bnx2x_drv_pulse(bp);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001949
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001950 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001951
1952 /* Cleanup the chip if needed */
1953 if (unload_mode != UNLOAD_RECOVERY)
1954 bnx2x_chip_cleanup(bp, unload_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001955 else {
1956 /* Disable HW interrupts, NAPI and Tx */
1957 bnx2x_netif_stop(bp, 1);
1958
1959 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001960 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001961 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001962
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03001963 /*
1964 * At this stage no more interrupts will arrive so we may safly clean
1965 * the queueable objects here in case they failed to get cleaned so far.
1966 */
1967 bnx2x_squeeze_objects(bp);
1968
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001969 bp->port.pmf = 0;
1970
1971 /* Free SKBs, SGEs, TPA pool and driver internals */
1972 bnx2x_free_skbs(bp);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001973 for_each_rx_queue(bp, i)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001974 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00001975
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001976 bnx2x_free_mem(bp);
1977
1978 bp->state = BNX2X_STATE_CLOSED;
1979
1980 /* The last driver must disable a "close the gate" if there is no
1981 * parity attention or "process kill" pending.
1982 */
1983 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1984 bnx2x_reset_is_done(bp))
1985 bnx2x_disable_close_the_gate(bp);
1986
1987 /* Reset MCP mail box sequence if there is on going recovery */
1988 if (unload_mode == UNLOAD_RECOVERY)
1989 bp->fw_seq = 0;
1990
1991 return 0;
1992}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001993
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001994int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1995{
1996 u16 pmcsr;
1997
Dmitry Kravkovadf5f6a2010-10-17 23:10:02 +00001998 /* If there is no power capability, silently succeed */
1999 if (!bp->pm_cap) {
2000 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
2001 return 0;
2002 }
2003
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002004 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2005
2006 switch (state) {
2007 case PCI_D0:
2008 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2009 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2010 PCI_PM_CTRL_PME_STATUS));
2011
2012 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2013 /* delay required during transition out of D3hot */
2014 msleep(20);
2015 break;
2016
2017 case PCI_D3hot:
2018 /* If there are other clients above don't
2019 shut down the power */
2020 if (atomic_read(&bp->pdev->enable_cnt) != 1)
2021 return 0;
2022 /* Don't shut down the power for emulation and FPGA */
2023 if (CHIP_REV_IS_SLOW(bp))
2024 return 0;
2025
2026 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2027 pmcsr |= 3;
2028
2029 if (bp->wol)
2030 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2031
2032 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2033 pmcsr);
2034
2035 /* No more memory access after this point until
2036 * device is brought back to D0.
2037 */
2038 break;
2039
2040 default:
2041 return -EINVAL;
2042 }
2043 return 0;
2044}
2045
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002046/*
2047 * net_device service functions
2048 */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00002049int bnx2x_poll(struct napi_struct *napi, int budget)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002050{
2051 int work_done = 0;
2052 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
2053 napi);
2054 struct bnx2x *bp = fp->bp;
2055
2056 while (1) {
2057#ifdef BNX2X_STOP_ON_ERROR
2058 if (unlikely(bp->panic)) {
2059 napi_complete(napi);
2060 return 0;
2061 }
2062#endif
2063
2064 if (bnx2x_has_tx_work(fp))
2065 bnx2x_tx_int(fp);
2066
2067 if (bnx2x_has_rx_work(fp)) {
2068 work_done += bnx2x_rx_int(fp, budget - work_done);
2069
2070 /* must not complete if we consumed full budget */
2071 if (work_done >= budget)
2072 break;
2073 }
2074
2075 /* Fall out from the NAPI loop if needed */
2076 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002077#ifdef BCM_CNIC
2078 /* No need to update SB for FCoE L2 ring as long as
2079 * it's connected to the default SB and the SB
2080 * has been updated when NAPI was scheduled.
2081 */
2082 if (IS_FCOE_FP(fp)) {
2083 napi_complete(napi);
2084 break;
2085 }
2086#endif
2087
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002088 bnx2x_update_fpsb_idx(fp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002089 /* bnx2x_has_rx_work() reads the status block,
2090 * thus we need to ensure that status block indices
2091 * have been actually read (bnx2x_update_fpsb_idx)
2092 * prior to this check (bnx2x_has_rx_work) so that
2093 * we won't write the "newer" value of the status block
2094 * to IGU (if there was a DMA right after
2095 * bnx2x_has_rx_work and if there is no rmb, the memory
2096 * reading (bnx2x_update_fpsb_idx) may be postponed
2097 * to right before bnx2x_ack_sb). In this case there
2098 * will never be another interrupt until there is
2099 * another update of the status block, while there
2100 * is still unhandled work.
2101 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002102 rmb();
2103
2104 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
2105 napi_complete(napi);
2106 /* Re-enable interrupts */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002107 DP(NETIF_MSG_HW,
2108 "Update index to %d\n", fp->fp_hc_idx);
2109 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
2110 le16_to_cpu(fp->fp_hc_idx),
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002111 IGU_INT_ENABLE, 1);
2112 break;
2113 }
2114 }
2115 }
2116
2117 return work_done;
2118}
2119
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002120/* we split the first BD into headers and data BDs
2121 * to ease the pain of our fellow microcode engineers
2122 * we use one mapping for both BDs
2123 * So far this has only been observed to happen
2124 * in Other Operating Systems(TM)
2125 */
2126static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
2127 struct bnx2x_fastpath *fp,
2128 struct sw_tx_bd *tx_buf,
2129 struct eth_tx_start_bd **tx_bd, u16 hlen,
2130 u16 bd_prod, int nbd)
2131{
2132 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
2133 struct eth_tx_bd *d_tx_bd;
2134 dma_addr_t mapping;
2135 int old_len = le16_to_cpu(h_tx_bd->nbytes);
2136
2137 /* first fix first BD */
2138 h_tx_bd->nbd = cpu_to_le16(nbd);
2139 h_tx_bd->nbytes = cpu_to_le16(hlen);
2140
2141 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
2142 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
2143 h_tx_bd->addr_lo, h_tx_bd->nbd);
2144
2145 /* now get a new data BD
2146 * (after the pbd) and fill it */
2147 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2148 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2149
2150 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
2151 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
2152
2153 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2154 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2155 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
2156
2157 /* this marks the BD as one that has no individual mapping */
2158 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
2159
2160 DP(NETIF_MSG_TX_QUEUED,
2161 "TSO split data size is %d (%x:%x)\n",
2162 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
2163
2164 /* update tx_bd */
2165 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
2166
2167 return bd_prod;
2168}
2169
2170static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
2171{
2172 if (fix > 0)
2173 csum = (u16) ~csum_fold(csum_sub(csum,
2174 csum_partial(t_header - fix, fix, 0)));
2175
2176 else if (fix < 0)
2177 csum = (u16) ~csum_fold(csum_add(csum,
2178 csum_partial(t_header, -fix, 0)));
2179
2180 return swab16(csum);
2181}
2182
2183static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
2184{
2185 u32 rc;
2186
2187 if (skb->ip_summed != CHECKSUM_PARTIAL)
2188 rc = XMIT_PLAIN;
2189
2190 else {
Hao Zhengd0d9d8e2010-11-11 13:47:58 +00002191 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002192 rc = XMIT_CSUM_V6;
2193 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2194 rc |= XMIT_CSUM_TCP;
2195
2196 } else {
2197 rc = XMIT_CSUM_V4;
2198 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2199 rc |= XMIT_CSUM_TCP;
2200 }
2201 }
2202
Vladislav Zolotarov5892b9e2010-11-28 00:23:35 +00002203 if (skb_is_gso_v6(skb))
2204 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
2205 else if (skb_is_gso(skb))
2206 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002207
2208 return rc;
2209}
2210
2211#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2212/* check if packet requires linearization (packet is too fragmented)
2213 no need to check fragmentation if page size > 8K (there will be no
2214 violation to FW restrictions) */
2215static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2216 u32 xmit_type)
2217{
2218 int to_copy = 0;
2219 int hlen = 0;
2220 int first_bd_sz = 0;
2221
2222 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2223 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2224
2225 if (xmit_type & XMIT_GSO) {
2226 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2227 /* Check if LSO packet needs to be copied:
2228 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2229 int wnd_size = MAX_FETCH_BD - 3;
2230 /* Number of windows to check */
2231 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2232 int wnd_idx = 0;
2233 int frag_idx = 0;
2234 u32 wnd_sum = 0;
2235
2236 /* Headers length */
2237 hlen = (int)(skb_transport_header(skb) - skb->data) +
2238 tcp_hdrlen(skb);
2239
2240 /* Amount of data (w/o headers) on linear part of SKB*/
2241 first_bd_sz = skb_headlen(skb) - hlen;
2242
2243 wnd_sum = first_bd_sz;
2244
2245 /* Calculate the first sum - it's special */
2246 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2247 wnd_sum +=
2248 skb_shinfo(skb)->frags[frag_idx].size;
2249
2250 /* If there was data on linear skb data - check it */
2251 if (first_bd_sz > 0) {
2252 if (unlikely(wnd_sum < lso_mss)) {
2253 to_copy = 1;
2254 goto exit_lbl;
2255 }
2256
2257 wnd_sum -= first_bd_sz;
2258 }
2259
2260 /* Others are easier: run through the frag list and
2261 check all windows */
2262 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2263 wnd_sum +=
2264 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2265
2266 if (unlikely(wnd_sum < lso_mss)) {
2267 to_copy = 1;
2268 break;
2269 }
2270 wnd_sum -=
2271 skb_shinfo(skb)->frags[wnd_idx].size;
2272 }
2273 } else {
2274 /* in non-LSO too fragmented packet should always
2275 be linearized */
2276 to_copy = 1;
2277 }
2278 }
2279
2280exit_lbl:
2281 if (unlikely(to_copy))
2282 DP(NETIF_MSG_TX_QUEUED,
2283 "Linearization IS REQUIRED for %s packet. "
2284 "num_frags %d hlen %d first_bd_sz %d\n",
2285 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2286 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2287
2288 return to_copy;
2289}
2290#endif
2291
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002292static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2293 u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002294{
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002295 *parsing_data |= (skb_shinfo(skb)->gso_size <<
2296 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2297 ETH_TX_PARSE_BD_E2_LSO_MSS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002298 if ((xmit_type & XMIT_GSO_V6) &&
2299 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002300 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002301}
2302
2303/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002304 * bnx2x_set_pbd_gso - update PBD in GSO case.
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002305 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002306 * @skb: packet skb
2307 * @pbd: parse BD
2308 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002309 */
2310static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2311 struct eth_tx_parse_bd_e1x *pbd,
2312 u32 xmit_type)
2313{
2314 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2315 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2316 pbd->tcp_flags = pbd_tcp_flags(skb);
2317
2318 if (xmit_type & XMIT_GSO_V4) {
2319 pbd->ip_id = swab16(ip_hdr(skb)->id);
2320 pbd->tcp_pseudo_csum =
2321 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2322 ip_hdr(skb)->daddr,
2323 0, IPPROTO_TCP, 0));
2324
2325 } else
2326 pbd->tcp_pseudo_csum =
2327 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2328 &ipv6_hdr(skb)->daddr,
2329 0, IPPROTO_TCP, 0));
2330
2331 pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2332}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002333
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002334/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002335 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002336 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002337 * @bp: driver handle
2338 * @skb: packet skb
2339 * @parsing_data: data to be updated
2340 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002341 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002342 * 57712 related
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002343 */
2344static inline u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002345 u32 *parsing_data, u32 xmit_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002346{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002347 *parsing_data |=
2348 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2349 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2350 ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002351
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002352 if (xmit_type & XMIT_CSUM_TCP) {
2353 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2354 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2355 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002356
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002357 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2358 } else
2359 /* We support checksum offload for TCP and UDP only.
2360 * No need to pass the UDP header length - it's a constant.
2361 */
2362 return skb_transport_header(skb) +
2363 sizeof(struct udphdr) - skb->data;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002364}
2365
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002366static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2367 struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2368{
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002369 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2370
2371 if (xmit_type & XMIT_CSUM_V4)
2372 tx_start_bd->bd_flags.as_bitfield |=
2373 ETH_TX_BD_FLAGS_IP_CSUM;
2374 else
2375 tx_start_bd->bd_flags.as_bitfield |=
2376 ETH_TX_BD_FLAGS_IPV6;
2377
2378 if (!(xmit_type & XMIT_CSUM_TCP))
2379 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002380}
2381
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002382/**
Dmitry Kravkove8920672011-05-04 23:52:40 +00002383 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002384 *
Dmitry Kravkove8920672011-05-04 23:52:40 +00002385 * @bp: driver handle
2386 * @skb: packet skb
2387 * @pbd: parse BD to be updated
2388 * @xmit_type: xmit flags
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002389 */
2390static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2391 struct eth_tx_parse_bd_e1x *pbd,
2392 u32 xmit_type)
2393{
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002394 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002395
2396 /* for now NS flag is not used in Linux */
2397 pbd->global_data =
2398 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2399 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2400
2401 pbd->ip_hlen_w = (skb_transport_header(skb) -
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002402 skb_network_header(skb)) >> 1;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002403
Vladislav Zolotarove39aece2011-04-23 07:44:46 +00002404 hlen += pbd->ip_hlen_w;
2405
2406 /* We support checksum offload for TCP and UDP only */
2407 if (xmit_type & XMIT_CSUM_TCP)
2408 hlen += tcp_hdrlen(skb) / 2;
2409 else
2410 hlen += sizeof(struct udphdr) / 2;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002411
2412 pbd->total_hlen_w = cpu_to_le16(hlen);
2413 hlen = hlen*2;
2414
2415 if (xmit_type & XMIT_CSUM_TCP) {
2416 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2417
2418 } else {
2419 s8 fix = SKB_CS_OFF(skb); /* signed! */
2420
2421 DP(NETIF_MSG_TX_QUEUED,
2422 "hlen %d fix %d csum before fix %x\n",
2423 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2424
2425 /* HW bug: fixup the CSUM */
2426 pbd->tcp_pseudo_csum =
2427 bnx2x_csum_fix(skb_transport_header(skb),
2428 SKB_CS(skb), fix);
2429
2430 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2431 pbd->tcp_pseudo_csum);
2432 }
2433
2434 return hlen;
2435}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002436
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002437/* called with netif_tx_lock
2438 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2439 * netif_wake_queue()
2440 */
2441netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2442{
2443 struct bnx2x *bp = netdev_priv(dev);
2444 struct bnx2x_fastpath *fp;
2445 struct netdev_queue *txq;
2446 struct sw_tx_bd *tx_buf;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002447 struct eth_tx_start_bd *tx_start_bd, *first_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002448 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002449 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002450 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002451 u32 pbd_e2_parsing_data = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002452 u16 pkt_prod, bd_prod;
2453 int nbd, fp_index;
2454 dma_addr_t mapping;
2455 u32 xmit_type = bnx2x_xmit_type(bp, skb);
2456 int i;
2457 u8 hlen = 0;
2458 __le16 pkt_size = 0;
2459 struct ethhdr *eth;
2460 u8 mac_type = UNICAST_ADDRESS;
2461
2462#ifdef BNX2X_STOP_ON_ERROR
2463 if (unlikely(bp->panic))
2464 return NETDEV_TX_BUSY;
2465#endif
2466
2467 fp_index = skb_get_queue_mapping(skb);
2468 txq = netdev_get_tx_queue(dev, fp_index);
2469
2470 fp = &bp->fp[fp_index];
2471
2472 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2473 fp->eth_q_stats.driver_xoff++;
2474 netif_tx_stop_queue(txq);
2475 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2476 return NETDEV_TX_BUSY;
2477 }
2478
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002479 DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
2480 "protocol(%x,%x) gso type %x xmit_type %x\n",
2481 fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002482 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2483
2484 eth = (struct ethhdr *)skb->data;
2485
2486 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2487 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2488 if (is_broadcast_ether_addr(eth->h_dest))
2489 mac_type = BROADCAST_ADDRESS;
2490 else
2491 mac_type = MULTICAST_ADDRESS;
2492 }
2493
2494#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2495 /* First, check if we need to linearize the skb (due to FW
2496 restrictions). No need to check fragmentation if page size > 8K
2497 (there will be no violation to FW restrictions) */
2498 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2499 /* Statistics of linearization */
2500 bp->lin_cnt++;
2501 if (skb_linearize(skb) != 0) {
2502 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2503 "silently dropping this SKB\n");
2504 dev_kfree_skb_any(skb);
2505 return NETDEV_TX_OK;
2506 }
2507 }
2508#endif
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002509 /* Map skb linear data for DMA */
2510 mapping = dma_map_single(&bp->pdev->dev, skb->data,
2511 skb_headlen(skb), DMA_TO_DEVICE);
2512 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2513 DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
2514 "silently dropping this SKB\n");
2515 dev_kfree_skb_any(skb);
2516 return NETDEV_TX_OK;
2517 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002518 /*
2519 Please read carefully. First we use one BD which we mark as start,
2520 then we have a parsing info BD (used for TSO or xsum),
2521 and only then we have the rest of the TSO BDs.
2522 (don't forget to mark the last one as last,
2523 and to unmap only AFTER you write to the BD ...)
2524 And above all, all pdb sizes are in words - NOT DWORDS!
2525 */
2526
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002527 /* get current pkt produced now - advance it just before sending packet
2528 * since mapping of pages may fail and cause packet to be dropped
2529 */
2530 pkt_prod = fp->tx_pkt_prod;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002531 bd_prod = TX_BD(fp->tx_bd_prod);
2532
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002533 /* get a tx_buf and first BD
2534 * tx_start_bd may be changed during SPLIT,
2535 * but first_bd will always stay first
2536 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002537 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2538 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002539 first_bd = tx_start_bd;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002540
2541 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002542 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2543 mac_type);
2544
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002545 /* header nbd */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002546 SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002547
2548 /* remember the first BD of the packet */
2549 tx_buf->first_bd = fp->tx_bd_prod;
2550 tx_buf->skb = skb;
2551 tx_buf->flags = 0;
2552
2553 DP(NETIF_MSG_TX_QUEUED,
2554 "sending pkt %u @%p next_idx %u bd %u @%p\n",
2555 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2556
Jesse Grosseab6d182010-10-20 13:56:03 +00002557 if (vlan_tx_tag_present(skb)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002558 tx_start_bd->vlan_or_ethertype =
2559 cpu_to_le16(vlan_tx_tag_get(skb));
2560 tx_start_bd->bd_flags.as_bitfield |=
2561 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002562 } else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002563 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002564
2565 /* turn on parsing and get a BD */
2566 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002567
Dmitry Kravkov93ef5c02011-06-14 01:33:02 +00002568 if (xmit_type & XMIT_CSUM)
2569 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002570
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002571 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002572 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2573 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2574 /* Set PBD in checksum offload case */
2575 if (xmit_type & XMIT_CSUM)
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002576 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2577 &pbd_e2_parsing_data,
2578 xmit_type);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002579 if (IS_MF_SI(bp)) {
2580 /*
2581 * fill in the MAC addresses in the PBD - for local
2582 * switching
2583 */
2584 bnx2x_set_fw_mac_addr(&pbd_e2->src_mac_addr_hi,
2585 &pbd_e2->src_mac_addr_mid,
2586 &pbd_e2->src_mac_addr_lo,
2587 eth->h_source);
2588 bnx2x_set_fw_mac_addr(&pbd_e2->dst_mac_addr_hi,
2589 &pbd_e2->dst_mac_addr_mid,
2590 &pbd_e2->dst_mac_addr_lo,
2591 eth->h_dest);
2592 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002593 } else {
2594 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2595 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2596 /* Set PBD in checksum offload case */
2597 if (xmit_type & XMIT_CSUM)
2598 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002599
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002600 }
2601
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002602 /* Setup the data pointer of the first BD of the packet */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002603 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2604 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002605 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002606 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2607 pkt_size = tx_start_bd->nbytes;
2608
2609 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
2610 " nbytes %d flags %x vlan %x\n",
2611 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2612 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002613 tx_start_bd->bd_flags.as_bitfield,
2614 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002615
2616 if (xmit_type & XMIT_GSO) {
2617
2618 DP(NETIF_MSG_TX_QUEUED,
2619 "TSO packet len %d hlen %d total len %d tso size %d\n",
2620 skb->len, hlen, skb_headlen(skb),
2621 skb_shinfo(skb)->gso_size);
2622
2623 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2624
2625 if (unlikely(skb_headlen(skb) > hlen))
2626 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2627 hlen, bd_prod, ++nbd);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002628 if (!CHIP_IS_E1x(bp))
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002629 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2630 xmit_type);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002631 else
2632 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002633 }
Vladislav Zolotarov2297a2d2010-12-08 01:43:09 +00002634
2635 /* Set the PBD's parsing_data field if not zero
2636 * (for the chips newer than 57711).
2637 */
2638 if (pbd_e2_parsing_data)
2639 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2640
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002641 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2642
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002643 /* Handle fragmented skb */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002644 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2645 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2646
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002647 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2648 frag->page_offset, frag->size,
2649 DMA_TO_DEVICE);
2650 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
2651
2652 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
2653 "dropping packet...\n");
2654
2655 /* we need unmap all buffers already mapped
2656 * for this SKB;
2657 * first_bd->nbd need to be properly updated
2658 * before call to bnx2x_free_tx_pkt
2659 */
2660 first_bd->nbd = cpu_to_le16(nbd);
2661 bnx2x_free_tx_pkt(bp, fp, TX_BD(fp->tx_pkt_prod));
2662 return NETDEV_TX_OK;
2663 }
2664
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002665 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2666 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2667 if (total_pkt_bd == NULL)
2668 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2669
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002670 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2671 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2672 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2673 le16_add_cpu(&pkt_size, frag->size);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002674 nbd++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002675
2676 DP(NETIF_MSG_TX_QUEUED,
2677 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
2678 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2679 le16_to_cpu(tx_data_bd->nbytes));
2680 }
2681
2682 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2683
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002684 /* update with actual num BDs */
2685 first_bd->nbd = cpu_to_le16(nbd);
2686
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002687 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2688
2689 /* now send a tx doorbell, counting the next BD
2690 * if the packet contains or ends with it
2691 */
2692 if (TX_BD_POFF(bd_prod) < nbd)
2693 nbd++;
2694
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002695 /* total_pkt_bytes should be set on the first data BD if
2696 * it's not an LSO packet and there is more than one
2697 * data BD. In this case pkt_size is limited by an MTU value.
2698 * However we prefer to set it for an LSO packet (while we don't
2699 * have to) in order to save some CPU cycles in a none-LSO
2700 * case, when we much more care about them.
2701 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002702 if (total_pkt_bd != NULL)
2703 total_pkt_bd->total_pkt_bytes = pkt_size;
2704
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002705 if (pbd_e1x)
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002706 DP(NETIF_MSG_TX_QUEUED,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002707 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002708 " tcp_flags %x xsum %x seq %u hlen %u\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002709 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2710 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2711 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2712 le16_to_cpu(pbd_e1x->total_hlen_w));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002713 if (pbd_e2)
2714 DP(NETIF_MSG_TX_QUEUED,
2715 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
2716 pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2717 pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2718 pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2719 pbd_e2->parsing_data);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002720 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
2721
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002722 fp->tx_pkt_prod++;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002723 /*
2724 * Make sure that the BD data is updated before updating the producer
2725 * since FW might read the BD right after the producer is updated.
2726 * This is only applicable for weak-ordered memory model archs such
2727 * as IA-64. The following barrier is also mandatory since FW will
2728 * assumes packets must have BDs.
2729 */
2730 wmb();
2731
2732 fp->tx_db.data.prod += nbd;
2733 barrier();
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002734
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002735 DOORBELL(bp, fp->cid, fp->tx_db.raw);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002736
2737 mmiowb();
2738
2739 fp->tx_bd_prod += nbd;
2740
2741 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2742 netif_tx_stop_queue(txq);
2743
2744 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2745 * ordering of set_bit() in netif_tx_stop_queue() and read of
2746 * fp->bd_tx_cons */
2747 smp_mb();
2748
2749 fp->eth_q_stats.driver_xoff++;
2750 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2751 netif_tx_wake_queue(txq);
2752 }
2753 fp->tx_pkt++;
2754
2755 return NETDEV_TX_OK;
2756}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002757
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002758/* called with rtnl_lock */
2759int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2760{
2761 struct sockaddr *addr = p;
2762 struct bnx2x *bp = netdev_priv(dev);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002763 int rc = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002764
2765 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2766 return -EINVAL;
2767
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002768 if (netif_running(dev)) {
2769 rc = bnx2x_set_eth_mac(bp, false);
2770 if (rc)
2771 return rc;
2772 }
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002773
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002774 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2775
2776 if (netif_running(dev))
2777 rc = bnx2x_set_eth_mac(bp, true);
2778
2779 return rc;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002780}
2781
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002782static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2783{
2784 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2785 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2786
2787 /* Common */
2788#ifdef BCM_CNIC
2789 if (IS_FCOE_IDX(fp_index)) {
2790 memset(sb, 0, sizeof(union host_hc_status_block));
2791 fp->status_blk_mapping = 0;
2792
2793 } else {
2794#endif
2795 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002796 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002797 BNX2X_PCI_FREE(sb->e2_sb,
2798 bnx2x_fp(bp, fp_index,
2799 status_blk_mapping),
2800 sizeof(struct host_hc_status_block_e2));
2801 else
2802 BNX2X_PCI_FREE(sb->e1x_sb,
2803 bnx2x_fp(bp, fp_index,
2804 status_blk_mapping),
2805 sizeof(struct host_hc_status_block_e1x));
2806#ifdef BCM_CNIC
2807 }
2808#endif
2809 /* Rx */
2810 if (!skip_rx_queue(bp, fp_index)) {
2811 bnx2x_free_rx_bds(fp);
2812
2813 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2814 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
2815 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
2816 bnx2x_fp(bp, fp_index, rx_desc_mapping),
2817 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2818
2819 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
2820 bnx2x_fp(bp, fp_index, rx_comp_mapping),
2821 sizeof(struct eth_fast_path_rx_cqe) *
2822 NUM_RCQ_BD);
2823
2824 /* SGE ring */
2825 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
2826 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
2827 bnx2x_fp(bp, fp_index, rx_sge_mapping),
2828 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2829 }
2830
2831 /* Tx */
2832 if (!skip_tx_queue(bp, fp_index)) {
2833 /* fastpath tx rings: tx_buf tx_desc */
2834 BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
2835 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
2836 bnx2x_fp(bp, fp_index, tx_desc_mapping),
2837 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2838 }
2839 /* end of fastpath */
2840}
2841
2842void bnx2x_free_fp_mem(struct bnx2x *bp)
2843{
2844 int i;
2845 for_each_queue(bp, i)
2846 bnx2x_free_fp_mem_at(bp, i);
2847}
2848
2849static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
2850{
2851 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002852 if (!CHIP_IS_E1x(bp)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002853 bnx2x_fp(bp, index, sb_index_values) =
2854 (__le16 *)status_blk.e2_sb->sb.index_values;
2855 bnx2x_fp(bp, index, sb_running_index) =
2856 (__le16 *)status_blk.e2_sb->sb.running_index;
2857 } else {
2858 bnx2x_fp(bp, index, sb_index_values) =
2859 (__le16 *)status_blk.e1x_sb->sb.index_values;
2860 bnx2x_fp(bp, index, sb_running_index) =
2861 (__le16 *)status_blk.e1x_sb->sb.running_index;
2862 }
2863}
2864
2865static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
2866{
2867 union host_hc_status_block *sb;
2868 struct bnx2x_fastpath *fp = &bp->fp[index];
2869 int ring_size = 0;
2870
2871 /* if rx_ring_size specified - use it */
2872 int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
2873 MAX_RX_AVAIL/bp->num_queues;
2874
2875 /* allocate at least number of buffers required by FW */
2876 rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
2877 MIN_RX_SIZE_TPA,
2878 rx_ring_size);
2879
2880 bnx2x_fp(bp, index, bp) = bp;
2881 bnx2x_fp(bp, index, index) = index;
2882
2883 /* Common */
2884 sb = &bnx2x_fp(bp, index, status_blk);
2885#ifdef BCM_CNIC
2886 if (!IS_FCOE_IDX(index)) {
2887#endif
2888 /* status blocks */
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03002889 if (!CHIP_IS_E1x(bp))
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002890 BNX2X_PCI_ALLOC(sb->e2_sb,
2891 &bnx2x_fp(bp, index, status_blk_mapping),
2892 sizeof(struct host_hc_status_block_e2));
2893 else
2894 BNX2X_PCI_ALLOC(sb->e1x_sb,
2895 &bnx2x_fp(bp, index, status_blk_mapping),
2896 sizeof(struct host_hc_status_block_e1x));
2897#ifdef BCM_CNIC
2898 }
2899#endif
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00002900
2901 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
2902 * set shortcuts for it.
2903 */
2904 if (!IS_FCOE_IDX(index))
2905 set_sb_shortcuts(bp, index);
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002906
2907 /* Tx */
2908 if (!skip_tx_queue(bp, index)) {
2909 /* fastpath tx rings: tx_buf tx_desc */
2910 BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
2911 sizeof(struct sw_tx_bd) * NUM_TX_BD);
2912 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
2913 &bnx2x_fp(bp, index, tx_desc_mapping),
2914 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2915 }
2916
2917 /* Rx */
2918 if (!skip_rx_queue(bp, index)) {
2919 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2920 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
2921 sizeof(struct sw_rx_bd) * NUM_RX_BD);
2922 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
2923 &bnx2x_fp(bp, index, rx_desc_mapping),
2924 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2925
2926 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
2927 &bnx2x_fp(bp, index, rx_comp_mapping),
2928 sizeof(struct eth_fast_path_rx_cqe) *
2929 NUM_RCQ_BD);
2930
2931 /* SGE ring */
2932 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
2933 sizeof(struct sw_rx_page) * NUM_RX_SGE);
2934 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
2935 &bnx2x_fp(bp, index, rx_sge_mapping),
2936 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2937 /* RX BD ring */
2938 bnx2x_set_next_page_rx_bd(fp);
2939
2940 /* CQ ring */
2941 bnx2x_set_next_page_rx_cq(fp);
2942
2943 /* BDs */
2944 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
2945 if (ring_size < rx_ring_size)
2946 goto alloc_mem_err;
2947 }
2948
2949 return 0;
2950
2951/* handles low memory cases */
2952alloc_mem_err:
2953 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2954 index, ring_size);
2955 /* FW will drop all packets if queue is not big enough,
2956 * In these cases we disable the queue
2957 * Min size diferent for TPA and non-TPA queues
2958 */
2959 if (ring_size < (fp->disable_tpa ?
Dmitry Kravkoveb722d72011-05-24 02:06:06 +00002960 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002961 /* release memory allocated for this queue */
2962 bnx2x_free_fp_mem_at(bp, index);
2963 return -ENOMEM;
2964 }
2965 return 0;
2966}
2967
2968int bnx2x_alloc_fp_mem(struct bnx2x *bp)
2969{
2970 int i;
2971
2972 /**
2973 * 1. Allocate FP for leading - fatal if error
2974 * 2. {CNIC} Allocate FCoE FP - fatal if error
2975 * 3. Allocate RSS - fix number of queues if error
2976 */
2977
2978 /* leading */
2979 if (bnx2x_alloc_fp_mem_at(bp, 0))
2980 return -ENOMEM;
2981#ifdef BCM_CNIC
Dmitry Kravkov8eef2af2011-06-14 01:32:47 +00002982 if (!NO_FCOE(bp))
2983 /* FCoE */
2984 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
2985 /* we will fail load process instead of mark
2986 * NO_FCOE_FLAG
2987 */
2988 return -ENOMEM;
Dmitry Kravkovb3b83c32011-05-04 23:50:33 +00002989#endif
2990 /* RSS */
2991 for_each_nondefault_eth_queue(bp, i)
2992 if (bnx2x_alloc_fp_mem_at(bp, i))
2993 break;
2994
2995 /* handle memory failures */
2996 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
2997 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
2998
2999 WARN_ON(delta < 0);
3000#ifdef BCM_CNIC
3001 /**
3002 * move non eth FPs next to last eth FP
3003 * must be done in that order
3004 * FCOE_IDX < FWD_IDX < OOO_IDX
3005 */
3006
3007 /* move FCoE fp */
3008 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
3009#endif
3010 bp->num_queues -= delta;
3011 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
3012 bp->num_queues + delta, bp->num_queues);
3013 }
3014
3015 return 0;
3016}
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00003017
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003018void bnx2x_free_mem_bp(struct bnx2x *bp)
3019{
3020 kfree(bp->fp);
3021 kfree(bp->msix_table);
3022 kfree(bp->ilt);
3023}
3024
3025int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
3026{
3027 struct bnx2x_fastpath *fp;
3028 struct msix_entry *tbl;
3029 struct bnx2x_ilt *ilt;
3030
3031 /* fp array */
3032 fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
3033 if (!fp)
3034 goto alloc_err;
3035 bp->fp = fp;
3036
3037 /* msix table */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003038 tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003039 GFP_KERNEL);
3040 if (!tbl)
3041 goto alloc_err;
3042 bp->msix_table = tbl;
3043
3044 /* ilt */
3045 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
3046 if (!ilt)
3047 goto alloc_err;
3048 bp->ilt = ilt;
3049
3050 return 0;
3051alloc_err:
3052 bnx2x_free_mem_bp(bp);
3053 return -ENOMEM;
3054
3055}
3056
Dmitry Kravkova9fccec2011-06-14 01:33:30 +00003057int bnx2x_reload_if_running(struct net_device *dev)
Michał Mirosław66371c42011-04-12 09:38:23 +00003058{
3059 struct bnx2x *bp = netdev_priv(dev);
3060
3061 if (unlikely(!netif_running(dev)))
3062 return 0;
3063
3064 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
3065 return bnx2x_nic_load(bp, LOAD_NORMAL);
3066}
3067
Yaniv Rosner1ac9e422011-05-31 21:26:11 +00003068int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
3069{
3070 u32 sel_phy_idx = 0;
3071 if (bp->link_params.num_phys <= 1)
3072 return INT_PHY;
3073
3074 if (bp->link_vars.link_up) {
3075 sel_phy_idx = EXT_PHY1;
3076 /* In case link is SERDES, check if the EXT_PHY2 is the one */
3077 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
3078 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
3079 sel_phy_idx = EXT_PHY2;
3080 } else {
3081
3082 switch (bnx2x_phy_selection(&bp->link_params)) {
3083 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
3084 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
3085 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
3086 sel_phy_idx = EXT_PHY1;
3087 break;
3088 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
3089 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
3090 sel_phy_idx = EXT_PHY2;
3091 break;
3092 }
3093 }
3094
3095 return sel_phy_idx;
3096
3097}
3098int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
3099{
3100 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
3101 /*
3102 * The selected actived PHY is always after swapping (in case PHY
3103 * swapping is enabled). So when swapping is enabled, we need to reverse
3104 * the configuration
3105 */
3106
3107 if (bp->link_params.multi_phy_config &
3108 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
3109 if (sel_phy_idx == EXT_PHY1)
3110 sel_phy_idx = EXT_PHY2;
3111 else if (sel_phy_idx == EXT_PHY2)
3112 sel_phy_idx = EXT_PHY1;
3113 }
3114 return LINK_CONFIG_IDX(sel_phy_idx);
3115}
3116
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003117/* called with rtnl_lock */
3118int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
3119{
3120 struct bnx2x *bp = netdev_priv(dev);
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003121
3122 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3123 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3124 return -EAGAIN;
3125 }
3126
3127 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
3128 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
3129 return -EINVAL;
3130
3131 /* This does not race with packet allocation
3132 * because the actual alloc size is
3133 * only updated as part of load
3134 */
3135 dev->mtu = new_mtu;
3136
Michał Mirosław66371c42011-04-12 09:38:23 +00003137 return bnx2x_reload_if_running(dev);
3138}
3139
3140u32 bnx2x_fix_features(struct net_device *dev, u32 features)
3141{
3142 struct bnx2x *bp = netdev_priv(dev);
3143
3144 /* TPA requires Rx CSUM offloading */
3145 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
3146 features &= ~NETIF_F_LRO;
3147
3148 return features;
3149}
3150
3151int bnx2x_set_features(struct net_device *dev, u32 features)
3152{
3153 struct bnx2x *bp = netdev_priv(dev);
3154 u32 flags = bp->flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003155 bool bnx2x_reload = false;
Michał Mirosław66371c42011-04-12 09:38:23 +00003156
3157 if (features & NETIF_F_LRO)
3158 flags |= TPA_ENABLE_FLAG;
3159 else
3160 flags &= ~TPA_ENABLE_FLAG;
3161
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003162 if (features & NETIF_F_LOOPBACK) {
3163 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
3164 bp->link_params.loopback_mode = LOOPBACK_BMAC;
3165 bnx2x_reload = true;
3166 }
3167 } else {
3168 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
3169 bp->link_params.loopback_mode = LOOPBACK_NONE;
3170 bnx2x_reload = true;
3171 }
3172 }
3173
Michał Mirosław66371c42011-04-12 09:38:23 +00003174 if (flags ^ bp->flags) {
3175 bp->flags = flags;
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003176 bnx2x_reload = true;
3177 }
Michał Mirosław66371c42011-04-12 09:38:23 +00003178
Mahesh Bandewar538dd2e2011-05-13 15:08:49 +00003179 if (bnx2x_reload) {
Michał Mirosław66371c42011-04-12 09:38:23 +00003180 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
3181 return bnx2x_reload_if_running(dev);
3182 /* else: bnx2x_nic_load() will be called at end of recovery */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003183 }
3184
Michał Mirosław66371c42011-04-12 09:38:23 +00003185 return 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003186}
3187
3188void bnx2x_tx_timeout(struct net_device *dev)
3189{
3190 struct bnx2x *bp = netdev_priv(dev);
3191
3192#ifdef BNX2X_STOP_ON_ERROR
3193 if (!bp->panic)
3194 bnx2x_panic();
3195#endif
3196 /* This allows the netif to be shutdown gracefully before resetting */
3197 schedule_delayed_work(&bp->reset_task, 0);
3198}
3199
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003200int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
3201{
3202 struct net_device *dev = pci_get_drvdata(pdev);
3203 struct bnx2x *bp;
3204
3205 if (!dev) {
3206 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3207 return -ENODEV;
3208 }
3209 bp = netdev_priv(dev);
3210
3211 rtnl_lock();
3212
3213 pci_save_state(pdev);
3214
3215 if (!netif_running(dev)) {
3216 rtnl_unlock();
3217 return 0;
3218 }
3219
3220 netif_device_detach(dev);
3221
3222 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
3223
3224 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
3225
3226 rtnl_unlock();
3227
3228 return 0;
3229}
3230
3231int bnx2x_resume(struct pci_dev *pdev)
3232{
3233 struct net_device *dev = pci_get_drvdata(pdev);
3234 struct bnx2x *bp;
3235 int rc;
3236
3237 if (!dev) {
3238 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
3239 return -ENODEV;
3240 }
3241 bp = netdev_priv(dev);
3242
3243 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3244 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3245 return -EAGAIN;
3246 }
3247
3248 rtnl_lock();
3249
3250 pci_restore_state(pdev);
3251
3252 if (!netif_running(dev)) {
3253 rtnl_unlock();
3254 return 0;
3255 }
3256
3257 bnx2x_set_power_state(bp, PCI_D0);
3258 netif_device_attach(dev);
3259
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003260 /* Since the chip was reset, clear the FW sequence number */
3261 bp->fw_seq = 0;
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003262 rc = bnx2x_nic_load(bp, LOAD_OPEN);
3263
3264 rtnl_unlock();
3265
3266 return rc;
3267}
Vlad Zolotarov619c5cb2011-06-14 14:33:44 +03003268
3269
3270void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
3271 u32 cid)
3272{
3273 /* ustorm cxt validation */
3274 cxt->ustorm_ag_context.cdu_usage =
3275 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3276 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
3277 /* xcontext validation */
3278 cxt->xstorm_ag_context.cdu_reserved =
3279 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
3280 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
3281}
3282
3283static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
3284 u8 fw_sb_id, u8 sb_index,
3285 u8 ticks)
3286{
3287
3288 u32 addr = BAR_CSTRORM_INTMEM +
3289 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
3290 REG_WR8(bp, addr, ticks);
3291 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
3292 port, fw_sb_id, sb_index, ticks);
3293}
3294
3295static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
3296 u16 fw_sb_id, u8 sb_index,
3297 u8 disable)
3298{
3299 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
3300 u32 addr = BAR_CSTRORM_INTMEM +
3301 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
3302 u16 flags = REG_RD16(bp, addr);
3303 /* clear and set */
3304 flags &= ~HC_INDEX_DATA_HC_ENABLED;
3305 flags |= enable_flag;
3306 REG_WR16(bp, addr, flags);
3307 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
3308 port, fw_sb_id, sb_index, disable);
3309}
3310
3311void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
3312 u8 sb_index, u8 disable, u16 usec)
3313{
3314 int port = BP_PORT(bp);
3315 u8 ticks = usec / BNX2X_BTR;
3316
3317 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3318
3319 disable = disable ? 1 : (usec ? 0 : 1);
3320 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3321}