blob: 93be84ce9badccaef2c7a5aebca99c8892b1d5f4 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
Sathya Perla2e588f82011-03-11 02:49:26 +000028static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000030module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla3abcded2010-10-03 22:12:27 -070035static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070045 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000048/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700118
119static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
120{
121 struct be_dma_mem *mem = &q->dma_mem;
122 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000123 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
124 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700125}
126
127static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
128 u16 len, u16 entry_size)
129{
130 struct be_dma_mem *mem = &q->dma_mem;
131
132 memset(q, 0, sizeof(*q));
133 q->len = len;
134 q->entry_size = entry_size;
135 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000136 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
137 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700138 if (!mem->va)
139 return -1;
140 memset(mem->va, 0, mem->size);
141 return 0;
142}
143
Sathya Perla8788fdc2009-07-27 22:52:03 +0000144static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700145{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000146 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147 u32 reg = ioread32(addr);
148 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000149
Sathya Perlacf588472010-02-14 21:22:01 +0000150 if (adapter->eeh_err)
151 return;
152
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 iowrite32(reg, addr);
161}
162
Sathya Perla8788fdc2009-07-27 22:52:03 +0000163static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700164{
165 u32 val = 0;
166 val |= qid & DB_RQ_RING_ID_MASK;
167 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000168
169 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000170 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700171}
172
Sathya Perla8788fdc2009-07-27 22:52:03 +0000173static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174{
175 u32 val = 0;
176 val |= qid & DB_TXULP_RING_ID_MASK;
177 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000178
179 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000180 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700181}
182
Sathya Perla8788fdc2009-07-27 22:52:03 +0000183static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184 bool arm, bool clear_int, u16 num_popped)
185{
186 u32 val = 0;
187 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000188 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
189 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000190
191 if (adapter->eeh_err)
192 return;
193
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700194 if (arm)
195 val |= 1 << DB_EQ_REARM_SHIFT;
196 if (clear_int)
197 val |= 1 << DB_EQ_CLR_SHIFT;
198 val |= 1 << DB_EQ_EVNT_SHIFT;
199 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000200 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700201}
202
Sathya Perla8788fdc2009-07-27 22:52:03 +0000203void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700204{
205 u32 val = 0;
206 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000207 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
208 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000209
210 if (adapter->eeh_err)
211 return;
212
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700213 if (arm)
214 val |= 1 << DB_CQ_REARM_SHIFT;
215 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000216 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700217}
218
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219static int be_mac_addr_set(struct net_device *netdev, void *p)
220{
221 struct be_adapter *adapter = netdev_priv(netdev);
222 struct sockaddr *addr = p;
223 int status = 0;
224
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000225 if (!is_valid_ether_addr(addr->sa_data))
226 return -EADDRNOTAVAIL;
227
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000228 /* MAC addr configuration will be done in hardware for VFs
229 * by their corresponding PFs. Just copy to netdev addr here
230 */
231 if (!be_physfn(adapter))
232 goto netdev_addr;
233
Ajit Khapardef8617e02011-02-11 13:36:37 +0000234 status = be_cmd_pmac_del(adapter, adapter->if_handle,
235 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000236 if (status)
237 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700238
Sathya Perlaa65027e2009-08-17 00:58:04 +0000239 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000240 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000241netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700242 if (!status)
243 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
244
245 return status;
246}
247
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000248static void populate_be2_stats(struct be_adapter *adapter)
249{
250
251 struct be_drv_stats *drvs = &adapter->drv_stats;
252 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
253 struct be_port_rxf_stats_v0 *port_stats =
254 be_port_rxf_stats_from_cmd(adapter);
255 struct be_rxf_stats_v0 *rxf_stats =
256 be_rxf_stats_from_cmd(adapter);
257
258 drvs->rx_pause_frames = port_stats->rx_pause_frames;
259 drvs->rx_crc_errors = port_stats->rx_crc_errors;
260 drvs->rx_control_frames = port_stats->rx_control_frames;
261 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
262 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
263 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
264 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
265 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
266 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
267 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
268 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
269 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
270 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
271 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
272 drvs->rx_input_fifo_overflow_drop =
273 port_stats->rx_input_fifo_overflow;
274 drvs->rx_dropped_header_too_small =
275 port_stats->rx_dropped_header_too_small;
276 drvs->rx_address_match_errors =
277 port_stats->rx_address_match_errors;
278 drvs->rx_alignment_symbol_errors =
279 port_stats->rx_alignment_symbol_errors;
280
281 drvs->tx_pauseframes = port_stats->tx_pauseframes;
282 drvs->tx_controlframes = port_stats->tx_controlframes;
283
284 if (adapter->port_num)
285 drvs->jabber_events =
286 rxf_stats->port1_jabber_events;
287 else
288 drvs->jabber_events =
289 rxf_stats->port0_jabber_events;
290 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
291 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
292 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
293 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
294 drvs->forwarded_packets = rxf_stats->forwarded_packets;
295 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
296 drvs->rx_drops_no_tpre_descr =
297 rxf_stats->rx_drops_no_tpre_descr;
298 drvs->rx_drops_too_many_frags =
299 rxf_stats->rx_drops_too_many_frags;
300 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
301}
302
303static void populate_be3_stats(struct be_adapter *adapter)
304{
305 struct be_drv_stats *drvs = &adapter->drv_stats;
306 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
307
308 struct be_rxf_stats_v1 *rxf_stats =
309 be_rxf_stats_from_cmd(adapter);
310 struct be_port_rxf_stats_v1 *port_stats =
311 be_port_rxf_stats_from_cmd(adapter);
312
313 drvs->rx_priority_pause_frames = 0;
314 drvs->pmem_fifo_overflow_drop = 0;
315 drvs->rx_pause_frames = port_stats->rx_pause_frames;
316 drvs->rx_crc_errors = port_stats->rx_crc_errors;
317 drvs->rx_control_frames = port_stats->rx_control_frames;
318 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
319 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
320 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
321 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
322 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
323 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
324 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
325 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
326 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
327 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
328 drvs->rx_dropped_header_too_small =
329 port_stats->rx_dropped_header_too_small;
330 drvs->rx_input_fifo_overflow_drop =
331 port_stats->rx_input_fifo_overflow_drop;
332 drvs->rx_address_match_errors =
333 port_stats->rx_address_match_errors;
334 drvs->rx_alignment_symbol_errors =
335 port_stats->rx_alignment_symbol_errors;
336 drvs->rxpp_fifo_overflow_drop =
337 port_stats->rxpp_fifo_overflow_drop;
338 drvs->tx_pauseframes = port_stats->tx_pauseframes;
339 drvs->tx_controlframes = port_stats->tx_controlframes;
340 drvs->jabber_events = port_stats->jabber_events;
341 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
342 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
343 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
344 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
345 drvs->forwarded_packets = rxf_stats->forwarded_packets;
346 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
347 drvs->rx_drops_no_tpre_descr =
348 rxf_stats->rx_drops_no_tpre_descr;
349 drvs->rx_drops_too_many_frags =
350 rxf_stats->rx_drops_too_many_frags;
351 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
352}
353
Selvin Xavier005d5692011-05-16 07:36:35 +0000354static void populate_lancer_stats(struct be_adapter *adapter)
355{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000356
Selvin Xavier005d5692011-05-16 07:36:35 +0000357 struct be_drv_stats *drvs = &adapter->drv_stats;
358 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
359 (adapter);
360 drvs->rx_priority_pause_frames = 0;
361 drvs->pmem_fifo_overflow_drop = 0;
362 drvs->rx_pause_frames =
363 make_64bit_val(pport_stats->rx_pause_frames_lo,
364 pport_stats->rx_pause_frames_hi);
365 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
366 pport_stats->rx_crc_errors_lo);
367 drvs->rx_control_frames =
368 make_64bit_val(pport_stats->rx_control_frames_hi,
369 pport_stats->rx_control_frames_lo);
370 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
371 drvs->rx_frame_too_long =
372 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
373 pport_stats->rx_frames_too_long_lo);
374 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
375 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
376 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
377 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
378 drvs->rx_dropped_tcp_length =
379 pport_stats->rx_dropped_invalid_tcp_length;
380 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
381 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
382 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
383 drvs->rx_dropped_header_too_small =
384 pport_stats->rx_dropped_header_too_small;
385 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
386 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
387 drvs->rx_alignment_symbol_errors =
388 make_64bit_val(pport_stats->rx_symbol_errors_hi,
389 pport_stats->rx_symbol_errors_lo);
390 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
391 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
392 pport_stats->tx_pause_frames_lo);
393 drvs->tx_controlframes =
394 make_64bit_val(pport_stats->tx_control_frames_hi,
395 pport_stats->tx_control_frames_lo);
396 drvs->jabber_events = pport_stats->rx_jabbers;
397 drvs->rx_drops_no_pbuf = 0;
398 drvs->rx_drops_no_txpb = 0;
399 drvs->rx_drops_no_erx_descr = 0;
400 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
401 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
402 pport_stats->num_forwards_lo);
403 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
404 pport_stats->rx_drops_mtu_lo);
405 drvs->rx_drops_no_tpre_descr = 0;
406 drvs->rx_drops_too_many_frags =
407 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
408 pport_stats->rx_drops_too_many_frags_lo);
409}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000410
411void be_parse_stats(struct be_adapter *adapter)
412{
Selvin Xavier005d5692011-05-16 07:36:35 +0000413 if (adapter->generation == BE_GEN3) {
414 if (lancer_chip(adapter))
415 populate_lancer_stats(adapter);
416 else
417 populate_be3_stats(adapter);
418 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000419 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000420 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421}
422
Sathya Perlab31c50a2009-09-17 10:30:13 -0700423void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700424{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000425 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde78122a52009-10-07 03:11:20 -0700426 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700427 struct be_rx_obj *rxo;
428 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700429
Sathya Perla3abcded2010-10-03 22:12:27 -0700430 memset(dev_stats, 0, sizeof(*dev_stats));
431 for_all_rx_queues(adapter, rxo, i) {
432 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
433 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
434 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
435 /* no space in linux buffers: best possible approximation */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000436 if (adapter->generation == BE_GEN3) {
Selvin Xavier005d5692011-05-16 07:36:35 +0000437 if (!(lancer_chip(adapter))) {
438 struct be_erx_stats_v1 *erx_stats =
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000439 be_erx_stats_from_cmd(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000440 dev_stats->rx_dropped +=
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000441 erx_stats->rx_drops_no_fragments[rxo->q.id];
Selvin Xavier005d5692011-05-16 07:36:35 +0000442 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000443 } else {
444 struct be_erx_stats_v0 *erx_stats =
445 be_erx_stats_from_cmd(adapter);
446 dev_stats->rx_dropped +=
447 erx_stats->rx_drops_no_fragments[rxo->q.id];
448 }
Sathya Perla3abcded2010-10-03 22:12:27 -0700449 }
450
451 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
452 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700453
454 /* bad pkts received */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000455 dev_stats->rx_errors = drvs->rx_crc_errors +
456 drvs->rx_alignment_symbol_errors +
457 drvs->rx_in_range_errors +
458 drvs->rx_out_range_errors +
459 drvs->rx_frame_too_long +
460 drvs->rx_dropped_too_small +
461 drvs->rx_dropped_too_short +
462 drvs->rx_dropped_header_too_small +
463 drvs->rx_dropped_tcp_length +
464 drvs->rx_dropped_runt +
465 drvs->rx_tcp_checksum_errs +
466 drvs->rx_ip_checksum_errs +
467 drvs->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700468
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700469 /* detailed rx errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000470 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
471 drvs->rx_out_range_errors +
472 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000473
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000474 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700475
476 /* frame alignment errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000477 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000478
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700479 /* receiver fifo overrun */
480 /* drops_no_pbuf is no per i/f, it's per BE card */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000481 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
482 drvs->rx_input_fifo_overflow_drop +
483 drvs->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700484}
485
Sathya Perla8788fdc2009-07-27 22:52:03 +0000486void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700487{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700488 struct net_device *netdev = adapter->netdev;
489
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700490 /* If link came up or went down */
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000491 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000492 adapter->link_speed = -1;
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000493 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700494 netif_carrier_on(netdev);
495 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000496 } else {
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000497 netif_carrier_off(netdev);
498 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700499 }
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000500 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700502}
503
504/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700505static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506{
Sathya Perla3abcded2010-10-03 22:12:27 -0700507 struct be_eq_obj *rx_eq = &rxo->rx_eq;
508 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700509 ulong now = jiffies;
510 u32 eqd;
511
512 if (!rx_eq->enable_aic)
513 return;
514
515 /* Wrapped around */
516 if (time_before(now, stats->rx_fps_jiffies)) {
517 stats->rx_fps_jiffies = now;
518 return;
519 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700520
521 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700522 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523 return;
524
Sathya Perla3abcded2010-10-03 22:12:27 -0700525 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700526 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700527
Sathya Perla4097f662009-03-24 16:40:13 -0700528 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700529 stats->prev_rx_frags = stats->rx_frags;
530 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700531 eqd = eqd << 3;
532 if (eqd > rx_eq->max_eqd)
533 eqd = rx_eq->max_eqd;
534 if (eqd < rx_eq->min_eqd)
535 eqd = rx_eq->min_eqd;
536 if (eqd < 10)
537 eqd = 0;
538 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000539 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700540
541 rx_eq->cur_eqd = eqd;
542}
543
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700544static u32 be_calc_rate(u64 bytes, unsigned long ticks)
545{
546 u64 rate = bytes;
547
548 do_div(rate, ticks / HZ);
549 rate <<= 3; /* bytes/sec -> bits/sec */
550 do_div(rate, 1000000ul); /* MB/Sec */
551
552 return rate;
553}
554
Sathya Perla4097f662009-03-24 16:40:13 -0700555static void be_tx_rate_update(struct be_adapter *adapter)
556{
Sathya Perla3abcded2010-10-03 22:12:27 -0700557 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -0700558 ulong now = jiffies;
559
560 /* Wrapped around? */
561 if (time_before(now, stats->be_tx_jiffies)) {
562 stats->be_tx_jiffies = now;
563 return;
564 }
565
566 /* Update tx rate once in two seconds */
567 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700568 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
569 - stats->be_tx_bytes_prev,
570 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700571 stats->be_tx_jiffies = now;
572 stats->be_tx_bytes_prev = stats->be_tx_bytes;
573 }
574}
575
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700576static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000577 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700578{
Sathya Perla3abcded2010-10-03 22:12:27 -0700579 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580 stats->be_tx_reqs++;
581 stats->be_tx_wrbs += wrb_cnt;
582 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000583 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584 if (stopped)
585 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586}
587
588/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000589static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
590 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700592 int cnt = (skb->len > skb->data_len);
593
594 cnt += skb_shinfo(skb)->nr_frags;
595
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700596 /* to account for hdr wrb */
597 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000598 if (lancer_chip(adapter) || !(cnt & 1)) {
599 *dummy = false;
600 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700601 /* add a dummy to make it an even num */
602 cnt++;
603 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000604 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700605 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
606 return cnt;
607}
608
609static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
610{
611 wrb->frag_pa_hi = upper_32_bits(addr);
612 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
613 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
614}
615
Somnath Koturcc4ce022010-10-21 07:11:14 -0700616static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
617 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700618{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700619 u8 vlan_prio = 0;
620 u16 vlan_tag = 0;
621
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700622 memset(hdr, 0, sizeof(*hdr));
623
624 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
625
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000626 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
628 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
629 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000630 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000631 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000632 if (lancer_chip(adapter) && adapter->sli_family ==
633 LANCER_A0_SLI_FAMILY) {
634 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
635 if (is_tcp_pkt(skb))
636 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
637 tcpcs, hdr, 1);
638 else if (is_udp_pkt(skb))
639 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
640 udpcs, hdr, 1);
641 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700642 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
643 if (is_tcp_pkt(skb))
644 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
645 else if (is_udp_pkt(skb))
646 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
647 }
648
Somnath Koturcc4ce022010-10-21 07:11:14 -0700649 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700650 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700651 vlan_tag = vlan_tx_tag_get(skb);
652 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
653 /* If vlan priority provided by OS is NOT in available bmap */
654 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
655 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
656 adapter->recommended_prio;
657 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658 }
659
660 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
661 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
662 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
664}
665
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000666static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000667 bool unmap_single)
668{
669 dma_addr_t dma;
670
671 be_dws_le_to_cpu(wrb, sizeof(*wrb));
672
673 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000674 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000675 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000676 dma_unmap_single(dev, dma, wrb->frag_len,
677 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000678 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000679 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000680 }
681}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700682
683static int make_tx_wrbs(struct be_adapter *adapter,
684 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
685{
Sathya Perla7101e112010-03-22 20:41:12 +0000686 dma_addr_t busaddr;
687 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000688 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700689 struct sk_buff *first_skb = skb;
690 struct be_queue_info *txq = &adapter->tx_obj.q;
691 struct be_eth_wrb *wrb;
692 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000693 bool map_single = false;
694 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700695
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696 hdr = queue_head_node(txq);
697 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000698 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699
David S. Millerebc8d2a2009-06-09 01:01:31 -0700700 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700701 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000702 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
703 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000704 goto dma_err;
705 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700706 wrb = queue_head_node(txq);
707 wrb_fill(wrb, busaddr, len);
708 be_dws_cpu_to_le(wrb, sizeof(*wrb));
709 queue_head_inc(txq);
710 copied += len;
711 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700712
David S. Millerebc8d2a2009-06-09 01:01:31 -0700713 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
714 struct skb_frag_struct *frag =
715 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000716 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
717 frag->size, DMA_TO_DEVICE);
718 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000719 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700720 wrb = queue_head_node(txq);
721 wrb_fill(wrb, busaddr, frag->size);
722 be_dws_cpu_to_le(wrb, sizeof(*wrb));
723 queue_head_inc(txq);
724 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725 }
726
727 if (dummy_wrb) {
728 wrb = queue_head_node(txq);
729 wrb_fill(wrb, 0, 0);
730 be_dws_cpu_to_le(wrb, sizeof(*wrb));
731 queue_head_inc(txq);
732 }
733
Somnath Koturcc4ce022010-10-21 07:11:14 -0700734 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700735 be_dws_cpu_to_le(hdr, sizeof(*hdr));
736
737 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000738dma_err:
739 txq->head = map_head;
740 while (copied) {
741 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000742 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000743 map_single = false;
744 copied -= wrb->frag_len;
745 queue_head_inc(txq);
746 }
747 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748}
749
Stephen Hemminger613573252009-08-31 19:50:58 +0000750static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700751 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752{
753 struct be_adapter *adapter = netdev_priv(netdev);
754 struct be_tx_obj *tx_obj = &adapter->tx_obj;
755 struct be_queue_info *txq = &tx_obj->q;
756 u32 wrb_cnt = 0, copied = 0;
757 u32 start = txq->head;
758 bool dummy_wrb, stopped = false;
759
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000760 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761
762 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000763 if (copied) {
764 /* record the sent skb in the sent_skb table */
765 BUG_ON(tx_obj->sent_skb_list[start]);
766 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700767
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000768 /* Ensure txq has space for the next skb; Else stop the queue
769 * *BEFORE* ringing the tx doorbell, so that we serialze the
770 * tx compls of the current transmit which'll wake up the queue
771 */
Sathya Perla7101e112010-03-22 20:41:12 +0000772 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000773 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
774 txq->len) {
775 netif_stop_queue(netdev);
776 stopped = true;
777 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000779 be_txq_notify(adapter, txq->id, wrb_cnt);
780
Ajit Khaparde91992e42010-02-19 13:57:12 +0000781 be_tx_stats_update(adapter, wrb_cnt, copied,
782 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000783 } else {
784 txq->head = start;
785 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700786 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 return NETDEV_TX_OK;
788}
789
790static int be_change_mtu(struct net_device *netdev, int new_mtu)
791{
792 struct be_adapter *adapter = netdev_priv(netdev);
793 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000794 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
795 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700796 dev_info(&adapter->pdev->dev,
797 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000798 BE_MIN_MTU,
799 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700800 return -EINVAL;
801 }
802 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
803 netdev->mtu, new_mtu);
804 netdev->mtu = new_mtu;
805 return 0;
806}
807
808/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000809 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
810 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700811 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000812static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700813{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700814 u16 vtag[BE_NUM_VLANS_SUPPORTED];
815 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000816 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000817 u32 if_handle;
818
819 if (vf) {
820 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
821 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
822 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
823 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700824
Ajit Khaparde82903e42010-02-09 01:34:57 +0000825 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700826 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000827 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700828 if (adapter->vlan_tag[i]) {
829 vtag[ntags] = cpu_to_le16(i);
830 ntags++;
831 }
832 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700833 status = be_cmd_vlan_config(adapter, adapter->if_handle,
834 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700835 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700836 status = be_cmd_vlan_config(adapter, adapter->if_handle,
837 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700838 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000839
Sathya Perlab31c50a2009-09-17 10:30:13 -0700840 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700841}
842
843static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
844{
845 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700847 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700848}
849
850static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
851{
852 struct be_adapter *adapter = netdev_priv(netdev);
853
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000854 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000855 if (!be_physfn(adapter))
856 return;
857
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700858 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000859 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000860 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700861}
862
863static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
864{
865 struct be_adapter *adapter = netdev_priv(netdev);
866
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000867 adapter->vlans_added--;
868 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
869
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000870 if (!be_physfn(adapter))
871 return;
872
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700873 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000874 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000875 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700876}
877
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878static void be_set_multicast_list(struct net_device *netdev)
879{
880 struct be_adapter *adapter = netdev_priv(netdev);
881
882 if (netdev->flags & IFF_PROMISC) {
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000883 be_cmd_promiscuous_config(adapter, true);
Sathya Perla24307ee2009-06-18 00:09:25 +0000884 adapter->promiscuous = true;
885 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000887
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300888 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000889 if (adapter->promiscuous) {
890 adapter->promiscuous = false;
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000891 be_cmd_promiscuous_config(adapter, false);
Sathya Perla24307ee2009-06-18 00:09:25 +0000892 }
893
Sathya Perlae7b909a2009-11-22 22:01:10 +0000894 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000895 if (netdev->flags & IFF_ALLMULTI ||
896 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000897 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000898 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000899 goto done;
900 }
901
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000902 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800903 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000904done:
905 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700906}
907
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000908static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
909{
910 struct be_adapter *adapter = netdev_priv(netdev);
911 int status;
912
913 if (!adapter->sriov_enabled)
914 return -EPERM;
915
916 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
917 return -EINVAL;
918
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000919 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
920 status = be_cmd_pmac_del(adapter,
921 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000922 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000923
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000924 status = be_cmd_pmac_add(adapter, mac,
925 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000926 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000927
928 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000929 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
930 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000931 else
932 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
933
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000934 return status;
935}
936
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000937static int be_get_vf_config(struct net_device *netdev, int vf,
938 struct ifla_vf_info *vi)
939{
940 struct be_adapter *adapter = netdev_priv(netdev);
941
942 if (!adapter->sriov_enabled)
943 return -EPERM;
944
945 if (vf >= num_vfs)
946 return -EINVAL;
947
948 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000949 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000950 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000951 vi->qos = 0;
952 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
953
954 return 0;
955}
956
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000957static int be_set_vf_vlan(struct net_device *netdev,
958 int vf, u16 vlan, u8 qos)
959{
960 struct be_adapter *adapter = netdev_priv(netdev);
961 int status = 0;
962
963 if (!adapter->sriov_enabled)
964 return -EPERM;
965
966 if ((vf >= num_vfs) || (vlan > 4095))
967 return -EINVAL;
968
969 if (vlan) {
970 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
971 adapter->vlans_added++;
972 } else {
973 adapter->vf_cfg[vf].vf_vlan_tag = 0;
974 adapter->vlans_added--;
975 }
976
977 status = be_vid_config(adapter, true, vf);
978
979 if (status)
980 dev_info(&adapter->pdev->dev,
981 "VLAN %d config on VF %d failed\n", vlan, vf);
982 return status;
983}
984
Ajit Khapardee1d18732010-07-23 01:52:13 +0000985static int be_set_vf_tx_rate(struct net_device *netdev,
986 int vf, int rate)
987{
988 struct be_adapter *adapter = netdev_priv(netdev);
989 int status = 0;
990
991 if (!adapter->sriov_enabled)
992 return -EPERM;
993
994 if ((vf >= num_vfs) || (rate < 0))
995 return -EINVAL;
996
997 if (rate > 10000)
998 rate = 10000;
999
1000 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +00001001 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001002
1003 if (status)
1004 dev_info(&adapter->pdev->dev,
1005 "tx rate %d on VF %d failed\n", rate, vf);
1006 return status;
1007}
1008
Sathya Perla3abcded2010-10-03 22:12:27 -07001009static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010{
Sathya Perla3abcded2010-10-03 22:12:27 -07001011 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001012 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001013
Sathya Perla4097f662009-03-24 16:40:13 -07001014 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001015 if (time_before(now, stats->rx_jiffies)) {
1016 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001017 return;
1018 }
1019
1020 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -07001021 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001022 return;
1023
Sathya Perla3abcded2010-10-03 22:12:27 -07001024 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1025 now - stats->rx_jiffies);
1026 stats->rx_jiffies = now;
1027 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -07001028}
1029
Sathya Perla3abcded2010-10-03 22:12:27 -07001030static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001031 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001032{
Sathya Perla3abcded2010-10-03 22:12:27 -07001033 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001034
Sathya Perla3abcded2010-10-03 22:12:27 -07001035 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001036 stats->rx_frags += rxcp->num_rcvd;
1037 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001038 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001039 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001040 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001041 if (rxcp->err)
1042 stats->rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001043}
1044
Sathya Perla2e588f82011-03-11 02:49:26 +00001045static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001046{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001047 /* L4 checksum is not reliable for non TCP/UDP packets.
1048 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001049 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1050 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001051}
1052
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001053static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001054get_rx_page_info(struct be_adapter *adapter,
1055 struct be_rx_obj *rxo,
1056 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001057{
1058 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001059 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001060
Sathya Perla3abcded2010-10-03 22:12:27 -07001061 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062 BUG_ON(!rx_page_info->page);
1063
Ajit Khaparde205859a2010-02-09 01:34:21 +00001064 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001065 dma_unmap_page(&adapter->pdev->dev,
1066 dma_unmap_addr(rx_page_info, bus),
1067 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001068 rx_page_info->last_page_user = false;
1069 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001070
1071 atomic_dec(&rxq->used);
1072 return rx_page_info;
1073}
1074
1075/* Throwaway the data in the Rx completion */
1076static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001077 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001078 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001079{
Sathya Perla3abcded2010-10-03 22:12:27 -07001080 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001081 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001082 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001083
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001084 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001085 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001086 put_page(page_info->page);
1087 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001088 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089 }
1090}
1091
1092/*
1093 * skb_fill_rx_data forms a complete skb for an ether frame
1094 * indicated by rxcp.
1095 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001096static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001097 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001098{
Sathya Perla3abcded2010-10-03 22:12:27 -07001099 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001100 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001101 u16 i, j;
1102 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103 u8 *start;
1104
Sathya Perla2e588f82011-03-11 02:49:26 +00001105 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001106 start = page_address(page_info->page) + page_info->page_offset;
1107 prefetch(start);
1108
1109 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001110 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001111
1112 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001113 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001114 memcpy(skb->data, start, hdr_len);
1115 skb->len = curr_frag_len;
1116 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1117 /* Complete packet has now been moved to data */
1118 put_page(page_info->page);
1119 skb->data_len = 0;
1120 skb->tail += curr_frag_len;
1121 } else {
1122 skb_shinfo(skb)->nr_frags = 1;
1123 skb_shinfo(skb)->frags[0].page = page_info->page;
1124 skb_shinfo(skb)->frags[0].page_offset =
1125 page_info->page_offset + hdr_len;
1126 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1127 skb->data_len = curr_frag_len - hdr_len;
1128 skb->tail += hdr_len;
1129 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001130 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001131
Sathya Perla2e588f82011-03-11 02:49:26 +00001132 if (rxcp->pkt_size <= rx_frag_size) {
1133 BUG_ON(rxcp->num_rcvd != 1);
1134 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001135 }
1136
1137 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001138 index_inc(&rxcp->rxq_idx, rxq->len);
1139 remaining = rxcp->pkt_size - curr_frag_len;
1140 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1141 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1142 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001143
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001144 /* Coalesce all frags from the same physical page in one slot */
1145 if (page_info->page_offset == 0) {
1146 /* Fresh page */
1147 j++;
1148 skb_shinfo(skb)->frags[j].page = page_info->page;
1149 skb_shinfo(skb)->frags[j].page_offset =
1150 page_info->page_offset;
1151 skb_shinfo(skb)->frags[j].size = 0;
1152 skb_shinfo(skb)->nr_frags++;
1153 } else {
1154 put_page(page_info->page);
1155 }
1156
1157 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001158 skb->len += curr_frag_len;
1159 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001160
Sathya Perla2e588f82011-03-11 02:49:26 +00001161 remaining -= curr_frag_len;
1162 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001163 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001164 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001165 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001166}
1167
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001168/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001170 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001171 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001173 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001174 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001175
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001176 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001177 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178 if (net_ratelimit())
1179 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001180 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001181 return;
1182 }
1183
Sathya Perla2e588f82011-03-11 02:49:26 +00001184 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001186 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001187 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001188 else
1189 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190
1191 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001192 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001193 if (adapter->netdev->features & NETIF_F_RXHASH)
1194 skb->rxhash = rxcp->rss_hash;
1195
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001196
Sathya Perla2e588f82011-03-11 02:49:26 +00001197 if (unlikely(rxcp->vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001198 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199 kfree_skb(skb);
1200 return;
1201 }
Somnath Kotur6709d952011-05-04 22:40:46 +00001202 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1203 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204 } else {
1205 netif_receive_skb(skb);
1206 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001207}
1208
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001209/* Process the RX completion indicated by rxcp when GRO is enabled */
1210static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001211 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001212 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001213{
1214 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001215 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001216 struct be_queue_info *rxq = &rxo->q;
1217 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001218 u16 remaining, curr_frag_len;
1219 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001220
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001221 skb = napi_get_frags(&eq_obj->napi);
1222 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001223 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001224 return;
1225 }
1226
Sathya Perla2e588f82011-03-11 02:49:26 +00001227 remaining = rxcp->pkt_size;
1228 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1229 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230
1231 curr_frag_len = min(remaining, rx_frag_size);
1232
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001233 /* Coalesce all frags from the same physical page in one slot */
1234 if (i == 0 || page_info->page_offset == 0) {
1235 /* First frag or Fresh page */
1236 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001237 skb_shinfo(skb)->frags[j].page = page_info->page;
1238 skb_shinfo(skb)->frags[j].page_offset =
1239 page_info->page_offset;
1240 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001241 } else {
1242 put_page(page_info->page);
1243 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001244 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001245
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001246 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001247 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001248 memset(page_info, 0, sizeof(*page_info));
1249 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001250 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001252 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001253 skb->len = rxcp->pkt_size;
1254 skb->data_len = rxcp->pkt_size;
1255 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001256 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001257 if (adapter->netdev->features & NETIF_F_RXHASH)
1258 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001259
Sathya Perla2e588f82011-03-11 02:49:26 +00001260 if (likely(!rxcp->vlanf))
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001261 napi_gro_frags(&eq_obj->napi);
Sathya Perla2e588f82011-03-11 02:49:26 +00001262 else
Somnath Kotur6709d952011-05-04 22:40:46 +00001263 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1264 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001265}
1266
Sathya Perla2e588f82011-03-11 02:49:26 +00001267static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1268 struct be_eth_rx_compl *compl,
1269 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270{
Sathya Perla2e588f82011-03-11 02:49:26 +00001271 rxcp->pkt_size =
1272 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1273 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1274 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1275 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001276 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001277 rxcp->ip_csum =
1278 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1279 rxcp->l4_csum =
1280 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1281 rxcp->ipv6 =
1282 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1283 rxcp->rxq_idx =
1284 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1285 rxcp->num_rcvd =
1286 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1287 rxcp->pkt_type =
1288 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001289 rxcp->rss_hash =
1290 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001291 if (rxcp->vlanf) {
1292 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001293 compl);
1294 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1295 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001296 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001297}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001298
Sathya Perla2e588f82011-03-11 02:49:26 +00001299static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1300 struct be_eth_rx_compl *compl,
1301 struct be_rx_compl_info *rxcp)
1302{
1303 rxcp->pkt_size =
1304 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1305 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1306 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1307 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001308 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001309 rxcp->ip_csum =
1310 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1311 rxcp->l4_csum =
1312 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1313 rxcp->ipv6 =
1314 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1315 rxcp->rxq_idx =
1316 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1317 rxcp->num_rcvd =
1318 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1319 rxcp->pkt_type =
1320 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001321 rxcp->rss_hash =
1322 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001323 if (rxcp->vlanf) {
1324 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001325 compl);
1326 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1327 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001328 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001329}
1330
1331static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1332{
1333 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1334 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1335 struct be_adapter *adapter = rxo->adapter;
1336
1337 /* For checking the valid bit it is Ok to use either definition as the
1338 * valid bit is at the same position in both v0 and v1 Rx compl */
1339 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001340 return NULL;
1341
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001342 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001343 be_dws_le_to_cpu(compl, sizeof(*compl));
1344
1345 if (adapter->be3_native)
1346 be_parse_rx_compl_v1(adapter, compl, rxcp);
1347 else
1348 be_parse_rx_compl_v0(adapter, compl, rxcp);
1349
Sathya Perla15d72182011-03-21 20:49:26 +00001350 if (rxcp->vlanf) {
1351 /* vlanf could be wrongly set in some cards.
1352 * ignore if vtm is not set */
1353 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1354 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001355
Sathya Perla15d72182011-03-21 20:49:26 +00001356 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001357 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001358
David S. Miller3c709f82011-05-11 14:26:15 -04001359 if (((adapter->pvid & VLAN_VID_MASK) ==
1360 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1361 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001362 rxcp->vlanf = 0;
1363 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001364
1365 /* As the compl has been parsed, reset it; we wont touch it again */
1366 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001367
Sathya Perla3abcded2010-10-03 22:12:27 -07001368 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001369 return rxcp;
1370}
1371
Eric Dumazet1829b082011-03-01 05:48:12 +00001372static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001375
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001376 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001377 gfp |= __GFP_COMP;
1378 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379}
1380
1381/*
1382 * Allocate a page, split it to fragments of size rx_frag_size and post as
1383 * receive buffers to BE
1384 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001385static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001386{
Sathya Perla3abcded2010-10-03 22:12:27 -07001387 struct be_adapter *adapter = rxo->adapter;
1388 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001389 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001390 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001391 struct page *pagep = NULL;
1392 struct be_eth_rx_d *rxd;
1393 u64 page_dmaaddr = 0, frag_dmaaddr;
1394 u32 posted, page_offset = 0;
1395
Sathya Perla3abcded2010-10-03 22:12:27 -07001396 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1398 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001399 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001400 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001401 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001402 break;
1403 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001404 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1405 0, adapter->big_page_size,
1406 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 page_info->page_offset = 0;
1408 } else {
1409 get_page(pagep);
1410 page_info->page_offset = page_offset + rx_frag_size;
1411 }
1412 page_offset = page_info->page_offset;
1413 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001414 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001415 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1416
1417 rxd = queue_head_node(rxq);
1418 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1419 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001420
1421 /* Any space left in the current big page for another frag? */
1422 if ((page_offset + rx_frag_size + rx_frag_size) >
1423 adapter->big_page_size) {
1424 pagep = NULL;
1425 page_info->last_page_user = true;
1426 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001427
1428 prev_page_info = page_info;
1429 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001430 page_info = &page_info_tbl[rxq->head];
1431 }
1432 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001433 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434
1435 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001436 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001437 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001438 } else if (atomic_read(&rxq->used) == 0) {
1439 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001440 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001441 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001442}
1443
Sathya Perla5fb379e2009-06-18 00:02:59 +00001444static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001445{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1447
1448 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1449 return NULL;
1450
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001451 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001452 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1453
1454 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1455
1456 queue_tail_inc(tx_cq);
1457 return txcp;
1458}
1459
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001460static u16 be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001461{
1462 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001463 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001464 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1465 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001466 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1467 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001468
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001469 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001471 sent_skbs[txq->tail] = NULL;
1472
1473 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001474 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001475
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001476 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001478 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001479 unmap_tx_frag(&adapter->pdev->dev, wrb,
1480 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001481 unmap_skb_hdr = false;
1482
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483 num_wrbs++;
1484 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001485 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001486
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001487 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001488 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489}
1490
Sathya Perla859b1e42009-08-10 03:43:51 +00001491static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1492{
1493 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1494
1495 if (!eqe->evt)
1496 return NULL;
1497
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001498 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001499 eqe->evt = le32_to_cpu(eqe->evt);
1500 queue_tail_inc(&eq_obj->q);
1501 return eqe;
1502}
1503
1504static int event_handle(struct be_adapter *adapter,
1505 struct be_eq_obj *eq_obj)
1506{
1507 struct be_eq_entry *eqe;
1508 u16 num = 0;
1509
1510 while ((eqe = event_get(eq_obj)) != NULL) {
1511 eqe->evt = 0;
1512 num++;
1513 }
1514
1515 /* Deal with any spurious interrupts that come
1516 * without events
1517 */
1518 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1519 if (num)
1520 napi_schedule(&eq_obj->napi);
1521
1522 return num;
1523}
1524
1525/* Just read and notify events without processing them.
1526 * Used at the time of destroying event queues */
1527static void be_eq_clean(struct be_adapter *adapter,
1528 struct be_eq_obj *eq_obj)
1529{
1530 struct be_eq_entry *eqe;
1531 u16 num = 0;
1532
1533 while ((eqe = event_get(eq_obj)) != NULL) {
1534 eqe->evt = 0;
1535 num++;
1536 }
1537
1538 if (num)
1539 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1540}
1541
Sathya Perla3abcded2010-10-03 22:12:27 -07001542static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543{
1544 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001545 struct be_queue_info *rxq = &rxo->q;
1546 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001547 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548 u16 tail;
1549
1550 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001551 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1552 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001553 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001554 }
1555
1556 /* Then free posted rx buffer that were not used */
1557 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001558 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001559 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001560 put_page(page_info->page);
1561 memset(page_info, 0, sizeof(*page_info));
1562 }
1563 BUG_ON(atomic_read(&rxq->used));
1564}
1565
Sathya Perlaa8e91792009-08-10 03:42:43 +00001566static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001568 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001569 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001570 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001571 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001572 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1573 struct sk_buff *sent_skb;
1574 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575
Sathya Perlaa8e91792009-08-10 03:42:43 +00001576 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1577 do {
1578 while ((txcp = be_tx_compl_get(tx_cq))) {
1579 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1580 wrb_index, txcp);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001581 num_wrbs += be_tx_compl_process(adapter, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001582 cmpl++;
1583 }
1584 if (cmpl) {
1585 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001586 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001587 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001588 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001589 }
1590
1591 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1592 break;
1593
1594 mdelay(1);
1595 } while (true);
1596
1597 if (atomic_read(&txq->used))
1598 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1599 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001600
1601 /* free posted tx for which compls will never arrive */
1602 while (atomic_read(&txq->used)) {
1603 sent_skb = sent_skbs[txq->tail];
1604 end_idx = txq->tail;
1605 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001606 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1607 txq->len);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001608 num_wrbs = be_tx_compl_process(adapter, end_idx);
1609 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001610 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611}
1612
Sathya Perla5fb379e2009-06-18 00:02:59 +00001613static void be_mcc_queues_destroy(struct be_adapter *adapter)
1614{
1615 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001616
Sathya Perla8788fdc2009-07-27 22:52:03 +00001617 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001618 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001619 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001620 be_queue_free(adapter, q);
1621
Sathya Perla8788fdc2009-07-27 22:52:03 +00001622 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001623 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001624 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001625 be_queue_free(adapter, q);
1626}
1627
1628/* Must be called only after TX qs are created as MCC shares TX EQ */
1629static int be_mcc_queues_create(struct be_adapter *adapter)
1630{
1631 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001632
1633 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001634 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001635 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001636 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001637 goto err;
1638
1639 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001640 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001641 goto mcc_cq_free;
1642
1643 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001644 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001645 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1646 goto mcc_cq_destroy;
1647
1648 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001649 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001650 goto mcc_q_free;
1651
1652 return 0;
1653
1654mcc_q_free:
1655 be_queue_free(adapter, q);
1656mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001657 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001658mcc_cq_free:
1659 be_queue_free(adapter, cq);
1660err:
1661 return -1;
1662}
1663
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001664static void be_tx_queues_destroy(struct be_adapter *adapter)
1665{
1666 struct be_queue_info *q;
1667
1668 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001669 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001670 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001671 be_queue_free(adapter, q);
1672
1673 q = &adapter->tx_obj.cq;
1674 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001675 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001676 be_queue_free(adapter, q);
1677
Sathya Perla859b1e42009-08-10 03:43:51 +00001678 /* Clear any residual events */
1679 be_eq_clean(adapter, &adapter->tx_eq);
1680
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001681 q = &adapter->tx_eq.q;
1682 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001683 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684 be_queue_free(adapter, q);
1685}
1686
1687static int be_tx_queues_create(struct be_adapter *adapter)
1688{
1689 struct be_queue_info *eq, *q, *cq;
1690
1691 adapter->tx_eq.max_eqd = 0;
1692 adapter->tx_eq.min_eqd = 0;
1693 adapter->tx_eq.cur_eqd = 96;
1694 adapter->tx_eq.enable_aic = false;
1695 /* Alloc Tx Event queue */
1696 eq = &adapter->tx_eq.q;
1697 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1698 return -1;
1699
1700 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001701 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702 goto tx_eq_free;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001703
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001704 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001705
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001706
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001707 /* Alloc TX eth compl queue */
1708 cq = &adapter->tx_obj.cq;
1709 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1710 sizeof(struct be_eth_tx_compl)))
1711 goto tx_eq_destroy;
1712
1713 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001714 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715 goto tx_cq_free;
1716
1717 /* Alloc TX eth queue */
1718 q = &adapter->tx_obj.q;
1719 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1720 goto tx_cq_destroy;
1721
1722 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001723 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001724 goto tx_q_free;
1725 return 0;
1726
1727tx_q_free:
1728 be_queue_free(adapter, q);
1729tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001730 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731tx_cq_free:
1732 be_queue_free(adapter, cq);
1733tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001734 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001735tx_eq_free:
1736 be_queue_free(adapter, eq);
1737 return -1;
1738}
1739
1740static void be_rx_queues_destroy(struct be_adapter *adapter)
1741{
1742 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001743 struct be_rx_obj *rxo;
1744 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745
Sathya Perla3abcded2010-10-03 22:12:27 -07001746 for_all_rx_queues(adapter, rxo, i) {
1747 q = &rxo->q;
1748 if (q->created) {
1749 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1750 /* After the rxq is invalidated, wait for a grace time
1751 * of 1ms for all dma to end and the flush compl to
1752 * arrive
1753 */
1754 mdelay(1);
1755 be_rx_q_clean(adapter, rxo);
1756 }
1757 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001758
Sathya Perla3abcded2010-10-03 22:12:27 -07001759 q = &rxo->cq;
1760 if (q->created)
1761 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1762 be_queue_free(adapter, q);
1763
1764 /* Clear any residual events */
1765 q = &rxo->rx_eq.q;
1766 if (q->created) {
1767 be_eq_clean(adapter, &rxo->rx_eq);
1768 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1769 }
1770 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001771 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772}
1773
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001774static u32 be_num_rxqs_want(struct be_adapter *adapter)
1775{
1776 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1777 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1778 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1779 } else {
1780 dev_warn(&adapter->pdev->dev,
1781 "No support for multiple RX queues\n");
1782 return 1;
1783 }
1784}
1785
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001786static int be_rx_queues_create(struct be_adapter *adapter)
1787{
1788 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001789 struct be_rx_obj *rxo;
1790 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001791
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001792 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1793 msix_enabled(adapter) ?
1794 adapter->num_msix_vec - 1 : 1);
1795 if (adapter->num_rx_qs != MAX_RX_QS)
1796 dev_warn(&adapter->pdev->dev,
1797 "Can create only %d RX queues", adapter->num_rx_qs);
1798
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001799 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001800 for_all_rx_queues(adapter, rxo, i) {
1801 rxo->adapter = adapter;
1802 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1803 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001804
Sathya Perla3abcded2010-10-03 22:12:27 -07001805 /* EQ */
1806 eq = &rxo->rx_eq.q;
1807 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1808 sizeof(struct be_eq_entry));
1809 if (rc)
1810 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811
Sathya Perla3abcded2010-10-03 22:12:27 -07001812 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1813 if (rc)
1814 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001815
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001816 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001817
Sathya Perla3abcded2010-10-03 22:12:27 -07001818 /* CQ */
1819 cq = &rxo->cq;
1820 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1821 sizeof(struct be_eth_rx_compl));
1822 if (rc)
1823 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001824
Sathya Perla3abcded2010-10-03 22:12:27 -07001825 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1826 if (rc)
1827 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001828 /* Rx Q */
1829 q = &rxo->q;
1830 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1831 sizeof(struct be_eth_rx_d));
1832 if (rc)
1833 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001834
Sathya Perla3abcded2010-10-03 22:12:27 -07001835 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1836 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1837 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1838 if (rc)
1839 goto err;
1840 }
1841
1842 if (be_multi_rxq(adapter)) {
1843 u8 rsstable[MAX_RSS_QS];
1844
1845 for_all_rss_queues(adapter, rxo, i)
1846 rsstable[i] = rxo->rss_id;
1847
1848 rc = be_cmd_rss_config(adapter, rsstable,
1849 adapter->num_rx_qs - 1);
1850 if (rc)
1851 goto err;
1852 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001853
1854 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001855err:
1856 be_rx_queues_destroy(adapter);
1857 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001858}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001859
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001860static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001861{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001862 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1863 if (!eqe->evt)
1864 return false;
1865 else
1866 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001867}
1868
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001869static irqreturn_t be_intx(int irq, void *dev)
1870{
1871 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001872 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001873 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001874
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001875 if (lancer_chip(adapter)) {
1876 if (event_peek(&adapter->tx_eq))
1877 tx = event_handle(adapter, &adapter->tx_eq);
1878 for_all_rx_queues(adapter, rxo, i) {
1879 if (event_peek(&rxo->rx_eq))
1880 rx |= event_handle(adapter, &rxo->rx_eq);
1881 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001882
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001883 if (!(tx || rx))
1884 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001885
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001886 } else {
1887 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1888 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1889 if (!isr)
1890 return IRQ_NONE;
1891
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001892 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001893 event_handle(adapter, &adapter->tx_eq);
1894
1895 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001896 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001897 event_handle(adapter, &rxo->rx_eq);
1898 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001899 }
Sathya Perlac001c212009-07-01 01:06:07 +00001900
Sathya Perla8788fdc2009-07-27 22:52:03 +00001901 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001902}
1903
1904static irqreturn_t be_msix_rx(int irq, void *dev)
1905{
Sathya Perla3abcded2010-10-03 22:12:27 -07001906 struct be_rx_obj *rxo = dev;
1907 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001908
Sathya Perla3abcded2010-10-03 22:12:27 -07001909 event_handle(adapter, &rxo->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001910
1911 return IRQ_HANDLED;
1912}
1913
Sathya Perla5fb379e2009-06-18 00:02:59 +00001914static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915{
1916 struct be_adapter *adapter = dev;
1917
Sathya Perla8788fdc2009-07-27 22:52:03 +00001918 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919
1920 return IRQ_HANDLED;
1921}
1922
Sathya Perla2e588f82011-03-11 02:49:26 +00001923static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001924{
Sathya Perla2e588f82011-03-11 02:49:26 +00001925 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926}
1927
stephen hemminger49b05222010-10-21 07:50:48 +00001928static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001929{
1930 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001931 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1932 struct be_adapter *adapter = rxo->adapter;
1933 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001934 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935 u32 work_done;
1936
Sathya Perla3abcded2010-10-03 22:12:27 -07001937 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001939 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001940 if (!rxcp)
1941 break;
1942
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001943 /* Ignore flush completions */
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001944 if (rxcp->num_rcvd && rxcp->pkt_size) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001945 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001946 be_rx_compl_process_gro(adapter, rxo, rxcp);
1947 else
1948 be_rx_compl_process(adapter, rxo, rxcp);
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001949 } else if (rxcp->pkt_size == 0) {
1950 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001951 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001952
Sathya Perla2e588f82011-03-11 02:49:26 +00001953 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001954 }
1955
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001957 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001958 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959
1960 /* All consumed */
1961 if (work_done < budget) {
1962 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001963 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001964 } else {
1965 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001966 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001967 }
1968 return work_done;
1969}
1970
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001971/* As TX and MCC share the same EQ check for both TX and MCC completions.
1972 * For TX/MCC we don't honour budget; consume everything
1973 */
1974static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001975{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001976 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1977 struct be_adapter *adapter =
1978 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001979 struct be_queue_info *txq = &adapter->tx_obj.q;
1980 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001981 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001982 int tx_compl = 0, mcc_compl, status = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001983 u16 end_idx, num_wrbs = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001984
Sathya Perla5fb379e2009-06-18 00:02:59 +00001985 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001987 wrb_index, txcp);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001988 num_wrbs += be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001989 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001990 }
1991
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001992 mcc_compl = be_process_mcc(adapter, &status);
1993
1994 napi_complete(napi);
1995
1996 if (mcc_compl) {
1997 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1998 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1999 }
2000
2001 if (tx_compl) {
2002 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002003
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00002004 atomic_sub(num_wrbs, &txq->used);
2005
Sathya Perla5fb379e2009-06-18 00:02:59 +00002006 /* As Tx wrbs have been freed up, wake up netdev queue if
2007 * it was stopped due to lack of tx wrbs.
2008 */
2009 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002010 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00002011 netif_wake_queue(adapter->netdev);
2012 }
2013
Sathya Perla3abcded2010-10-03 22:12:27 -07002014 tx_stats(adapter)->be_tx_events++;
2015 tx_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002016 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002017
2018 return 1;
2019}
2020
Ajit Khaparded053de92010-09-03 06:23:30 +00002021void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002022{
2023 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2024 u32 i;
2025
2026 pci_read_config_dword(adapter->pdev,
2027 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2028 pci_read_config_dword(adapter->pdev,
2029 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2030 pci_read_config_dword(adapter->pdev,
2031 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2032 pci_read_config_dword(adapter->pdev,
2033 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2034
2035 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2036 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2037
Ajit Khaparded053de92010-09-03 06:23:30 +00002038 if (ue_status_lo || ue_status_hi) {
2039 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002040 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00002041 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2042 }
2043
Ajit Khaparde7c185272010-07-29 06:16:33 +00002044 if (ue_status_lo) {
2045 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2046 if (ue_status_lo & 1)
2047 dev_err(&adapter->pdev->dev,
2048 "UE: %s bit set\n", ue_status_low_desc[i]);
2049 }
2050 }
2051 if (ue_status_hi) {
2052 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2053 if (ue_status_hi & 1)
2054 dev_err(&adapter->pdev->dev,
2055 "UE: %s bit set\n", ue_status_hi_desc[i]);
2056 }
2057 }
2058
2059}
2060
Sathya Perlaea1dae12009-03-19 23:56:20 -07002061static void be_worker(struct work_struct *work)
2062{
2063 struct be_adapter *adapter =
2064 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07002065 struct be_rx_obj *rxo;
2066 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002067
Sathya Perla16da8252011-03-21 20:49:27 +00002068 if (!adapter->ue_detected && !lancer_chip(adapter))
2069 be_detect_dump_ue(adapter);
2070
Somnath Koturf203af72010-10-25 23:01:03 +00002071 /* when interrupts are not yet enabled, just reap any pending
2072 * mcc completions */
2073 if (!netif_running(adapter->netdev)) {
2074 int mcc_compl, status = 0;
2075
2076 mcc_compl = be_process_mcc(adapter, &status);
2077
2078 if (mcc_compl) {
2079 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2080 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2081 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00002082
Somnath Koturf203af72010-10-25 23:01:03 +00002083 goto reschedule;
2084 }
2085
Selvin Xavier005d5692011-05-16 07:36:35 +00002086 if (!adapter->stats_cmd_sent) {
2087 if (lancer_chip(adapter))
2088 lancer_cmd_get_pport_stats(adapter,
2089 &adapter->stats_cmd);
2090 else
2091 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2092 }
Sathya Perla4097f662009-03-24 16:40:13 -07002093 be_tx_rate_update(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -07002094
Sathya Perla3abcded2010-10-03 22:12:27 -07002095 for_all_rx_queues(adapter, rxo, i) {
2096 be_rx_rate_update(rxo);
2097 be_rx_eqd_update(adapter, rxo);
2098
2099 if (rxo->rx_post_starved) {
2100 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002101 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002102 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002103 }
2104
Somnath Koturf203af72010-10-25 23:01:03 +00002105reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002106 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002107 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2108}
2109
Sathya Perla8d56ff12009-11-22 22:02:26 +00002110static void be_msix_disable(struct be_adapter *adapter)
2111{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002112 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002113 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002114 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002115 }
2116}
2117
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002118static void be_msix_enable(struct be_adapter *adapter)
2119{
Sathya Perla3abcded2010-10-03 22:12:27 -07002120#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002121 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002123 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002124
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002125 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002126 adapter->msix_entries[i].entry = i;
2127
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002128 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002129 if (status == 0) {
2130 goto done;
2131 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002132 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002133 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002134 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002135 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002136 }
2137 return;
2138done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002139 adapter->num_msix_vec = num_vec;
2140 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002141}
2142
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002143static void be_sriov_enable(struct be_adapter *adapter)
2144{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002145 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002146#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002147 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002148 int status, pos;
2149 u16 nvfs;
2150
2151 pos = pci_find_ext_capability(adapter->pdev,
2152 PCI_EXT_CAP_ID_SRIOV);
2153 pci_read_config_word(adapter->pdev,
2154 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2155
2156 if (num_vfs > nvfs) {
2157 dev_info(&adapter->pdev->dev,
2158 "Device supports %d VFs and not %d\n",
2159 nvfs, num_vfs);
2160 num_vfs = nvfs;
2161 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002162
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002163 status = pci_enable_sriov(adapter->pdev, num_vfs);
2164 adapter->sriov_enabled = status ? false : true;
2165 }
2166#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002167}
2168
2169static void be_sriov_disable(struct be_adapter *adapter)
2170{
2171#ifdef CONFIG_PCI_IOV
2172 if (adapter->sriov_enabled) {
2173 pci_disable_sriov(adapter->pdev);
2174 adapter->sriov_enabled = false;
2175 }
2176#endif
2177}
2178
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002179static inline int be_msix_vec_get(struct be_adapter *adapter,
2180 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002181{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002182 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002183}
2184
2185static int be_request_irq(struct be_adapter *adapter,
2186 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002187 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002188{
2189 struct net_device *netdev = adapter->netdev;
2190 int vec;
2191
2192 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002193 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002194 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002195}
2196
Sathya Perla3abcded2010-10-03 22:12:27 -07002197static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2198 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002199{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002200 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002201 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002202}
2203
2204static int be_msix_register(struct be_adapter *adapter)
2205{
Sathya Perla3abcded2010-10-03 22:12:27 -07002206 struct be_rx_obj *rxo;
2207 int status, i;
2208 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002209
Sathya Perla3abcded2010-10-03 22:12:27 -07002210 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2211 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002212 if (status)
2213 goto err;
2214
Sathya Perla3abcded2010-10-03 22:12:27 -07002215 for_all_rx_queues(adapter, rxo, i) {
2216 sprintf(qname, "rxq%d", i);
2217 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2218 qname, rxo);
2219 if (status)
2220 goto err_msix;
2221 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002222
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002223 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002224
Sathya Perla3abcded2010-10-03 22:12:27 -07002225err_msix:
2226 be_free_irq(adapter, &adapter->tx_eq, adapter);
2227
2228 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2229 be_free_irq(adapter, &rxo->rx_eq, rxo);
2230
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002231err:
2232 dev_warn(&adapter->pdev->dev,
2233 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002234 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002235 return status;
2236}
2237
2238static int be_irq_register(struct be_adapter *adapter)
2239{
2240 struct net_device *netdev = adapter->netdev;
2241 int status;
2242
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002243 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244 status = be_msix_register(adapter);
2245 if (status == 0)
2246 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002247 /* INTx is not supported for VF */
2248 if (!be_physfn(adapter))
2249 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250 }
2251
2252 /* INTx */
2253 netdev->irq = adapter->pdev->irq;
2254 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2255 adapter);
2256 if (status) {
2257 dev_err(&adapter->pdev->dev,
2258 "INTx request IRQ failed - err %d\n", status);
2259 return status;
2260 }
2261done:
2262 adapter->isr_registered = true;
2263 return 0;
2264}
2265
2266static void be_irq_unregister(struct be_adapter *adapter)
2267{
2268 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002269 struct be_rx_obj *rxo;
2270 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002271
2272 if (!adapter->isr_registered)
2273 return;
2274
2275 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002276 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277 free_irq(netdev->irq, adapter);
2278 goto done;
2279 }
2280
2281 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002282 be_free_irq(adapter, &adapter->tx_eq, adapter);
2283
2284 for_all_rx_queues(adapter, rxo, i)
2285 be_free_irq(adapter, &rxo->rx_eq, rxo);
2286
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002287done:
2288 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002289}
2290
Sathya Perla889cd4b2010-05-30 23:33:45 +00002291static int be_close(struct net_device *netdev)
2292{
2293 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002294 struct be_rx_obj *rxo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002295 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002296 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002297
Sathya Perla889cd4b2010-05-30 23:33:45 +00002298 be_async_mcc_disable(adapter);
2299
Sathya Perla889cd4b2010-05-30 23:33:45 +00002300 netif_carrier_off(netdev);
2301 adapter->link_up = false;
2302
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002303 if (!lancer_chip(adapter))
2304 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002305
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002306 for_all_rx_queues(adapter, rxo, i)
2307 napi_disable(&rxo->rx_eq.napi);
2308
2309 napi_disable(&tx_eq->napi);
2310
2311 if (lancer_chip(adapter)) {
2312 be_cq_notify(adapter, adapter->tx_obj.cq.id, false, 0);
2313 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2314 for_all_rx_queues(adapter, rxo, i)
2315 be_cq_notify(adapter, rxo->cq.id, false, 0);
2316 }
2317
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002318 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002319 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002320 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002321
2322 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002323 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002324 synchronize_irq(vec);
2325 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002326 } else {
2327 synchronize_irq(netdev->irq);
2328 }
2329 be_irq_unregister(adapter);
2330
Sathya Perla889cd4b2010-05-30 23:33:45 +00002331 /* Wait for all pending tx completions to arrive so that
2332 * all tx skbs are freed.
2333 */
2334 be_tx_compl_clean(adapter);
2335
2336 return 0;
2337}
2338
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002339static int be_open(struct net_device *netdev)
2340{
2341 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002342 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002343 struct be_rx_obj *rxo;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002344 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002345 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002346 u8 mac_speed;
2347 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002348
Sathya Perla3abcded2010-10-03 22:12:27 -07002349 for_all_rx_queues(adapter, rxo, i) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002350 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002351 napi_enable(&rxo->rx_eq.napi);
2352 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002353 napi_enable(&tx_eq->napi);
2354
2355 be_irq_register(adapter);
2356
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002357 if (!lancer_chip(adapter))
2358 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002359
2360 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002361 for_all_rx_queues(adapter, rxo, i) {
2362 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2363 be_cq_notify(adapter, rxo->cq.id, true, 0);
2364 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002365 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002366
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002367 /* Now that interrupts are on we can process async mcc */
2368 be_async_mcc_enable(adapter);
2369
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002370 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
Ajit Khaparde187e8752011-04-19 12:11:46 +00002371 &link_speed, 0);
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002372 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002373 goto err;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002374 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002375
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002376 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002377 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002378 if (status)
2379 goto err;
2380
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002381 status = be_cmd_set_flow_control(adapter,
2382 adapter->tx_fc, adapter->rx_fc);
2383 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002384 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002385 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002386
Sathya Perla889cd4b2010-05-30 23:33:45 +00002387 return 0;
2388err:
2389 be_close(adapter->netdev);
2390 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002391}
2392
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002393static int be_setup_wol(struct be_adapter *adapter, bool enable)
2394{
2395 struct be_dma_mem cmd;
2396 int status = 0;
2397 u8 mac[ETH_ALEN];
2398
2399 memset(mac, 0, ETH_ALEN);
2400
2401 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002402 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2403 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002404 if (cmd.va == NULL)
2405 return -1;
2406 memset(cmd.va, 0, cmd.size);
2407
2408 if (enable) {
2409 status = pci_write_config_dword(adapter->pdev,
2410 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2411 if (status) {
2412 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002413 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002414 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2415 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002416 return status;
2417 }
2418 status = be_cmd_enable_magic_wol(adapter,
2419 adapter->netdev->dev_addr, &cmd);
2420 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2421 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2422 } else {
2423 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2424 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2425 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2426 }
2427
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002428 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002429 return status;
2430}
2431
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002432/*
2433 * Generate a seed MAC address from the PF MAC Address using jhash.
2434 * MAC Address for VFs are assigned incrementally starting from the seed.
2435 * These addresses are programmed in the ASIC by the PF and the VF driver
2436 * queries for the MAC address during its probe.
2437 */
2438static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2439{
2440 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002441 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002442 u8 mac[ETH_ALEN];
2443
2444 be_vf_eth_addr_generate(adapter, mac);
2445
2446 for (vf = 0; vf < num_vfs; vf++) {
2447 status = be_cmd_pmac_add(adapter, mac,
2448 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002449 &adapter->vf_cfg[vf].vf_pmac_id,
2450 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002451 if (status)
2452 dev_err(&adapter->pdev->dev,
2453 "Mac address add failed for VF %d\n", vf);
2454 else
2455 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2456
2457 mac[5] += 1;
2458 }
2459 return status;
2460}
2461
2462static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2463{
2464 u32 vf;
2465
2466 for (vf = 0; vf < num_vfs; vf++) {
2467 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2468 be_cmd_pmac_del(adapter,
2469 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002470 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002471 }
2472}
2473
Sathya Perla5fb379e2009-06-18 00:02:59 +00002474static int be_setup(struct be_adapter *adapter)
2475{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002476 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002477 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002478 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002479 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002480
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002481 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2482 BE_IF_FLAGS_BROADCAST |
2483 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002484
2485 if (be_physfn(adapter)) {
2486 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2487 BE_IF_FLAGS_PROMISCUOUS |
2488 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2489 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002490
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002491 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002492 cap_flags |= BE_IF_FLAGS_RSS;
2493 en_flags |= BE_IF_FLAGS_RSS;
2494 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002495 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002496
2497 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2498 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002499 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002500 if (status != 0)
2501 goto do_none;
2502
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002503 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002504 if (adapter->sriov_enabled) {
2505 while (vf < num_vfs) {
2506 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2507 BE_IF_FLAGS_BROADCAST;
2508 status = be_cmd_if_create(adapter, cap_flags,
2509 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002510 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002511 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002512 if (status) {
2513 dev_err(&adapter->pdev->dev,
2514 "Interface Create failed for VF %d\n",
2515 vf);
2516 goto if_destroy;
2517 }
2518 adapter->vf_cfg[vf].vf_pmac_id =
2519 BE_INVALID_PMAC_ID;
2520 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002521 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002522 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002523 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002524 status = be_cmd_mac_addr_query(adapter, mac,
2525 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2526 if (!status) {
2527 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2528 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2529 }
2530 }
2531
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002532 status = be_tx_queues_create(adapter);
2533 if (status != 0)
2534 goto if_destroy;
2535
2536 status = be_rx_queues_create(adapter);
2537 if (status != 0)
2538 goto tx_qs_destroy;
2539
Sathya Perla5fb379e2009-06-18 00:02:59 +00002540 status = be_mcc_queues_create(adapter);
2541 if (status != 0)
2542 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002543
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002544 adapter->link_speed = -1;
2545
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002546 return 0;
2547
Sathya Perla5fb379e2009-06-18 00:02:59 +00002548rx_qs_destroy:
2549 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002550tx_qs_destroy:
2551 be_tx_queues_destroy(adapter);
2552if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002553 if (be_physfn(adapter) && adapter->sriov_enabled)
2554 for (vf = 0; vf < num_vfs; vf++)
2555 if (adapter->vf_cfg[vf].vf_if_handle)
2556 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002557 adapter->vf_cfg[vf].vf_if_handle,
2558 vf + 1);
2559 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002560do_none:
2561 return status;
2562}
2563
Sathya Perla5fb379e2009-06-18 00:02:59 +00002564static int be_clear(struct be_adapter *adapter)
2565{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002566 int vf;
2567
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002568 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002569 be_vf_eth_addr_rem(adapter);
2570
Sathya Perla1a8887d2009-08-17 00:58:41 +00002571 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002572 be_rx_queues_destroy(adapter);
2573 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002574 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002575
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002576 if (be_physfn(adapter) && adapter->sriov_enabled)
2577 for (vf = 0; vf < num_vfs; vf++)
2578 if (adapter->vf_cfg[vf].vf_if_handle)
2579 be_cmd_if_destroy(adapter,
2580 adapter->vf_cfg[vf].vf_if_handle,
2581 vf + 1);
2582
Ajit Khaparde658681f2011-02-11 13:34:46 +00002583 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002584
Sathya Perla2243e2e2009-11-22 22:02:03 +00002585 /* tell fw we're done with firing cmds */
2586 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002587 return 0;
2588}
2589
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002590
Ajit Khaparde84517482009-09-04 03:12:16 +00002591#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002592static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002593 const u8 *p, u32 img_start, int image_size,
2594 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002595{
2596 u32 crc_offset;
2597 u8 flashed_crc[4];
2598 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002599
2600 crc_offset = hdr_size + img_start + image_size - 4;
2601
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002602 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002603
2604 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002605 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002606 if (status) {
2607 dev_err(&adapter->pdev->dev,
2608 "could not get crc from flash, not flashing redboot\n");
2609 return false;
2610 }
2611
2612 /*update redboot only if crc does not match*/
2613 if (!memcmp(flashed_crc, p, 4))
2614 return false;
2615 else
2616 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002617}
2618
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002619static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002620 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002621 struct be_dma_mem *flash_cmd, int num_of_images)
2622
Ajit Khaparde84517482009-09-04 03:12:16 +00002623{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002624 int status = 0, i, filehdr_size = 0;
2625 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002626 int num_bytes;
2627 const u8 *p = fw->data;
2628 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002629 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002630 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002631
Joe Perches215faf92010-12-21 02:16:10 -08002632 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002633 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2634 FLASH_IMAGE_MAX_SIZE_g3},
2635 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2636 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2637 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2638 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2639 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2640 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2641 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2642 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2643 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2644 FLASH_IMAGE_MAX_SIZE_g3},
2645 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2646 FLASH_IMAGE_MAX_SIZE_g3},
2647 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002648 FLASH_IMAGE_MAX_SIZE_g3},
2649 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2650 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002651 };
Joe Perches215faf92010-12-21 02:16:10 -08002652 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002653 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2654 FLASH_IMAGE_MAX_SIZE_g2},
2655 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2656 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2657 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2658 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2659 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2660 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2661 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2662 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2663 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2664 FLASH_IMAGE_MAX_SIZE_g2},
2665 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2666 FLASH_IMAGE_MAX_SIZE_g2},
2667 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2668 FLASH_IMAGE_MAX_SIZE_g2}
2669 };
2670
2671 if (adapter->generation == BE_GEN3) {
2672 pflashcomp = gen3_flash_types;
2673 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002674 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002675 } else {
2676 pflashcomp = gen2_flash_types;
2677 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002678 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002679 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002680 for (i = 0; i < num_comp; i++) {
2681 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2682 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2683 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002684 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2685 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002686 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2687 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002688 continue;
2689 p = fw->data;
2690 p += filehdr_size + pflashcomp[i].offset
2691 + (num_of_images * sizeof(struct image_hdr));
2692 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002693 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002694 total_bytes = pflashcomp[i].size;
2695 while (total_bytes) {
2696 if (total_bytes > 32*1024)
2697 num_bytes = 32*1024;
2698 else
2699 num_bytes = total_bytes;
2700 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002701
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002702 if (!total_bytes)
2703 flash_op = FLASHROM_OPER_FLASH;
2704 else
2705 flash_op = FLASHROM_OPER_SAVE;
2706 memcpy(req->params.data_buf, p, num_bytes);
2707 p += num_bytes;
2708 status = be_cmd_write_flashrom(adapter, flash_cmd,
2709 pflashcomp[i].optype, flash_op, num_bytes);
2710 if (status) {
2711 dev_err(&adapter->pdev->dev,
2712 "cmd to write to flash rom failed.\n");
2713 return -1;
2714 }
2715 yield();
Ajit Khaparde84517482009-09-04 03:12:16 +00002716 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002717 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002718 return 0;
2719}
2720
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002721static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2722{
2723 if (fhdr == NULL)
2724 return 0;
2725 if (fhdr->build[0] == '3')
2726 return BE_GEN3;
2727 else if (fhdr->build[0] == '2')
2728 return BE_GEN2;
2729 else
2730 return 0;
2731}
2732
Ajit Khaparde84517482009-09-04 03:12:16 +00002733int be_load_fw(struct be_adapter *adapter, u8 *func)
2734{
2735 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2736 const struct firmware *fw;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002737 struct flash_file_hdr_g2 *fhdr;
2738 struct flash_file_hdr_g3 *fhdr3;
2739 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002740 struct be_dma_mem flash_cmd;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002741 int status, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002742 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002743
Sarveshwar Bandid9efd2a2010-11-18 23:44:45 +00002744 if (!netif_running(adapter->netdev)) {
2745 dev_err(&adapter->pdev->dev,
2746 "Firmware load not allowed (interface is down)\n");
2747 return -EPERM;
2748 }
2749
Ajit Khaparde84517482009-09-04 03:12:16 +00002750 strcpy(fw_file, func);
2751
2752 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2753 if (status)
2754 goto fw_exit;
2755
2756 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002757 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002758 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2759
Ajit Khaparde84517482009-09-04 03:12:16 +00002760 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002761 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2762 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002763 if (!flash_cmd.va) {
2764 status = -ENOMEM;
2765 dev_err(&adapter->pdev->dev,
2766 "Memory allocation failure while flashing\n");
2767 goto fw_exit;
2768 }
2769
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002770 if ((adapter->generation == BE_GEN3) &&
2771 (get_ufigen_type(fhdr) == BE_GEN3)) {
2772 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002773 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2774 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002775 img_hdr_ptr = (struct image_hdr *) (fw->data +
2776 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002777 i * sizeof(struct image_hdr)));
2778 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2779 status = be_flash_data(adapter, fw, &flash_cmd,
2780 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002781 }
2782 } else if ((adapter->generation == BE_GEN2) &&
2783 (get_ufigen_type(fhdr) == BE_GEN2)) {
2784 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2785 } else {
2786 dev_err(&adapter->pdev->dev,
2787 "UFI and Interface are not compatible for flashing\n");
2788 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002789 }
2790
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002791 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2792 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002793 if (status) {
2794 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2795 goto fw_exit;
2796 }
2797
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002798 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002799
2800fw_exit:
2801 release_firmware(fw);
2802 return status;
2803}
2804
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002805static struct net_device_ops be_netdev_ops = {
2806 .ndo_open = be_open,
2807 .ndo_stop = be_close,
2808 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002809 .ndo_set_rx_mode = be_set_multicast_list,
2810 .ndo_set_mac_address = be_mac_addr_set,
2811 .ndo_change_mtu = be_change_mtu,
2812 .ndo_validate_addr = eth_validate_addr,
2813 .ndo_vlan_rx_register = be_vlan_register,
2814 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2815 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002816 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002817 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002818 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002819 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002820};
2821
2822static void be_netdev_init(struct net_device *netdev)
2823{
2824 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002825 struct be_rx_obj *rxo;
2826 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002827
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002828 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002829 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2830 NETIF_F_HW_VLAN_TX;
2831 if (be_multi_rxq(adapter))
2832 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002833
2834 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002835 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002836
Michał Mirosław79032642010-11-30 06:38:00 +00002837 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2838 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002839
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002840 if (lancer_chip(adapter))
2841 netdev->vlan_features |= NETIF_F_TSO6;
2842
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002843 netdev->flags |= IFF_MULTICAST;
2844
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002845 /* Default settings for Rx and Tx flow control */
2846 adapter->rx_fc = true;
2847 adapter->tx_fc = true;
2848
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002849 netif_set_gso_max_size(netdev, 65535);
2850
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002851 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2852
2853 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2854
Sathya Perla3abcded2010-10-03 22:12:27 -07002855 for_all_rx_queues(adapter, rxo, i)
2856 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2857 BE_NAPI_WEIGHT);
2858
Sathya Perla5fb379e2009-06-18 00:02:59 +00002859 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002860 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002861}
2862
2863static void be_unmap_pci_bars(struct be_adapter *adapter)
2864{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002865 if (adapter->csr)
2866 iounmap(adapter->csr);
2867 if (adapter->db)
2868 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002869 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002870 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002871}
2872
2873static int be_map_pci_bars(struct be_adapter *adapter)
2874{
2875 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002876 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002877
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002878 if (lancer_chip(adapter)) {
2879 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2880 pci_resource_len(adapter->pdev, 0));
2881 if (addr == NULL)
2882 return -ENOMEM;
2883 adapter->db = addr;
2884 return 0;
2885 }
2886
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002887 if (be_physfn(adapter)) {
2888 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2889 pci_resource_len(adapter->pdev, 2));
2890 if (addr == NULL)
2891 return -ENOMEM;
2892 adapter->csr = addr;
2893 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002894
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002895 if (adapter->generation == BE_GEN2) {
2896 pcicfg_reg = 1;
2897 db_reg = 4;
2898 } else {
2899 pcicfg_reg = 0;
2900 if (be_physfn(adapter))
2901 db_reg = 4;
2902 else
2903 db_reg = 0;
2904 }
2905 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2906 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002907 if (addr == NULL)
2908 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002909 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002910
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002911 if (be_physfn(adapter)) {
2912 addr = ioremap_nocache(
2913 pci_resource_start(adapter->pdev, pcicfg_reg),
2914 pci_resource_len(adapter->pdev, pcicfg_reg));
2915 if (addr == NULL)
2916 goto pci_map_err;
2917 adapter->pcicfg = addr;
2918 } else
2919 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002920
2921 return 0;
2922pci_map_err:
2923 be_unmap_pci_bars(adapter);
2924 return -ENOMEM;
2925}
2926
2927
2928static void be_ctrl_cleanup(struct be_adapter *adapter)
2929{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002930 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002931
2932 be_unmap_pci_bars(adapter);
2933
2934 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002935 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2936 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002937
2938 mem = &adapter->mc_cmd_mem;
2939 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002940 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2941 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002942}
2943
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002944static int be_ctrl_init(struct be_adapter *adapter)
2945{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002946 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2947 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002948 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002949 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002950
2951 status = be_map_pci_bars(adapter);
2952 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002953 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002954
2955 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002956 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2957 mbox_mem_alloc->size,
2958 &mbox_mem_alloc->dma,
2959 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002960 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002961 status = -ENOMEM;
2962 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002963 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002964
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002965 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2966 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2967 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2968 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002969
2970 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002971 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2972 mc_cmd_mem->size, &mc_cmd_mem->dma,
2973 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002974 if (mc_cmd_mem->va == NULL) {
2975 status = -ENOMEM;
2976 goto free_mbox;
2977 }
2978 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2979
Ivan Vecera29849612010-12-14 05:43:19 +00002980 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002981 spin_lock_init(&adapter->mcc_lock);
2982 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002983
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002984 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00002985 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002986 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002987
2988free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002989 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2990 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002991
2992unmap_pci_bars:
2993 be_unmap_pci_bars(adapter);
2994
2995done:
2996 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002997}
2998
2999static void be_stats_cleanup(struct be_adapter *adapter)
3000{
Sathya Perla3abcded2010-10-03 22:12:27 -07003001 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003002
3003 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003004 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3005 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003006}
3007
3008static int be_stats_init(struct be_adapter *adapter)
3009{
Sathya Perla3abcded2010-10-03 22:12:27 -07003010 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003011
Selvin Xavier005d5692011-05-16 07:36:35 +00003012 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003013 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003014 } else {
3015 if (lancer_chip(adapter))
3016 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3017 else
3018 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3019 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003020 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3021 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003022 if (cmd->va == NULL)
3023 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003024 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003025 return 0;
3026}
3027
3028static void __devexit be_remove(struct pci_dev *pdev)
3029{
3030 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003031
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003032 if (!adapter)
3033 return;
3034
Somnath Koturf203af72010-10-25 23:01:03 +00003035 cancel_delayed_work_sync(&adapter->work);
3036
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003037 unregister_netdev(adapter->netdev);
3038
Sathya Perla5fb379e2009-06-18 00:02:59 +00003039 be_clear(adapter);
3040
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003041 be_stats_cleanup(adapter);
3042
3043 be_ctrl_cleanup(adapter);
3044
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003045 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003046 be_sriov_disable(adapter);
3047
Sathya Perla8d56ff12009-11-22 22:02:26 +00003048 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003049
3050 pci_set_drvdata(pdev, NULL);
3051 pci_release_regions(pdev);
3052 pci_disable_device(pdev);
3053
3054 free_netdev(adapter->netdev);
3055}
3056
Sathya Perla2243e2e2009-11-22 22:02:03 +00003057static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003058{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003059 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003060 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003061
Sathya Perla8788fdc2009-07-27 22:52:03 +00003062 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003063 if (status)
3064 return status;
3065
Sathya Perla3abcded2010-10-03 22:12:27 -07003066 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3067 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003068 if (status)
3069 return status;
3070
3071 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003072
3073 if (be_physfn(adapter)) {
3074 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003075 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003076
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003077 if (status)
3078 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003079
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003080 if (!is_valid_ether_addr(mac))
3081 return -EADDRNOTAVAIL;
3082
3083 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3084 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3085 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003086
Ajit Khaparde3486be22010-07-23 02:04:54 +00003087 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003088 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3089 else
3090 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3091
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003092 status = be_cmd_get_cntl_attributes(adapter);
3093 if (status)
3094 return status;
3095
Sathya Perla2e588f82011-03-11 02:49:26 +00003096 be_cmd_check_native_mode(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003097 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003098}
3099
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003100static int be_dev_family_check(struct be_adapter *adapter)
3101{
3102 struct pci_dev *pdev = adapter->pdev;
3103 u32 sli_intf = 0, if_type;
3104
3105 switch (pdev->device) {
3106 case BE_DEVICE_ID1:
3107 case OC_DEVICE_ID1:
3108 adapter->generation = BE_GEN2;
3109 break;
3110 case BE_DEVICE_ID2:
3111 case OC_DEVICE_ID2:
3112 adapter->generation = BE_GEN3;
3113 break;
3114 case OC_DEVICE_ID3:
3115 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3116 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3117 SLI_INTF_IF_TYPE_SHIFT;
3118
3119 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3120 if_type != 0x02) {
3121 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3122 return -EINVAL;
3123 }
3124 if (num_vfs > 0) {
3125 dev_err(&pdev->dev, "VFs not supported\n");
3126 return -EINVAL;
3127 }
3128 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3129 SLI_INTF_FAMILY_SHIFT);
3130 adapter->generation = BE_GEN3;
3131 break;
3132 default:
3133 adapter->generation = 0;
3134 }
3135 return 0;
3136}
3137
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003138static int lancer_wait_ready(struct be_adapter *adapter)
3139{
3140#define SLIPORT_READY_TIMEOUT 500
3141 u32 sliport_status;
3142 int status = 0, i;
3143
3144 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3145 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3146 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3147 break;
3148
3149 msleep(20);
3150 }
3151
3152 if (i == SLIPORT_READY_TIMEOUT)
3153 status = -1;
3154
3155 return status;
3156}
3157
3158static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3159{
3160 int status;
3161 u32 sliport_status, err, reset_needed;
3162 status = lancer_wait_ready(adapter);
3163 if (!status) {
3164 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3165 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3166 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3167 if (err && reset_needed) {
3168 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3169 adapter->db + SLIPORT_CONTROL_OFFSET);
3170
3171 /* check adapter has corrected the error */
3172 status = lancer_wait_ready(adapter);
3173 sliport_status = ioread32(adapter->db +
3174 SLIPORT_STATUS_OFFSET);
3175 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3176 SLIPORT_STATUS_RN_MASK);
3177 if (status || sliport_status)
3178 status = -1;
3179 } else if (err || reset_needed) {
3180 status = -1;
3181 }
3182 }
3183 return status;
3184}
3185
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003186static int __devinit be_probe(struct pci_dev *pdev,
3187 const struct pci_device_id *pdev_id)
3188{
3189 int status = 0;
3190 struct be_adapter *adapter;
3191 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003192
3193 status = pci_enable_device(pdev);
3194 if (status)
3195 goto do_none;
3196
3197 status = pci_request_regions(pdev, DRV_NAME);
3198 if (status)
3199 goto disable_dev;
3200 pci_set_master(pdev);
3201
3202 netdev = alloc_etherdev(sizeof(struct be_adapter));
3203 if (netdev == NULL) {
3204 status = -ENOMEM;
3205 goto rel_reg;
3206 }
3207 adapter = netdev_priv(netdev);
3208 adapter->pdev = pdev;
3209 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003210
3211 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003212 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003213 goto free_netdev;
3214
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003215 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003216 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003217
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003218 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003219 if (!status) {
3220 netdev->features |= NETIF_F_HIGHDMA;
3221 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003222 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003223 if (status) {
3224 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3225 goto free_netdev;
3226 }
3227 }
3228
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003229 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003230 if (adapter->sriov_enabled) {
3231 adapter->vf_cfg = kcalloc(num_vfs,
3232 sizeof(struct be_vf_cfg), GFP_KERNEL);
3233
3234 if (!adapter->vf_cfg)
3235 goto free_netdev;
3236 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003237
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003238 status = be_ctrl_init(adapter);
3239 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003240 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003241
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003242 if (lancer_chip(adapter)) {
3243 status = lancer_test_and_set_rdy_state(adapter);
3244 if (status) {
3245 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003246 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003247 }
3248 }
3249
Sathya Perla2243e2e2009-11-22 22:02:03 +00003250 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003251 if (be_physfn(adapter)) {
3252 status = be_cmd_POST(adapter);
3253 if (status)
3254 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003255 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003256
3257 /* tell fw we're ready to fire cmds */
3258 status = be_cmd_fw_init(adapter);
3259 if (status)
3260 goto ctrl_clean;
3261
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003262 status = be_cmd_reset_function(adapter);
3263 if (status)
3264 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003265
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003266 status = be_stats_init(adapter);
3267 if (status)
3268 goto ctrl_clean;
3269
Sathya Perla2243e2e2009-11-22 22:02:03 +00003270 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003271 if (status)
3272 goto stats_clean;
3273
Sathya Perla3abcded2010-10-03 22:12:27 -07003274 be_msix_enable(adapter);
3275
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003276 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003277
Sathya Perla5fb379e2009-06-18 00:02:59 +00003278 status = be_setup(adapter);
3279 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003280 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003281
Sathya Perla3abcded2010-10-03 22:12:27 -07003282 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003283 status = register_netdev(netdev);
3284 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003285 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00003286 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003287
Ajit Khapardee6319362011-02-11 13:35:41 +00003288 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003289 u8 mac_speed;
3290 bool link_up;
3291 u16 vf, lnk_speed;
3292
Ajit Khapardee6319362011-02-11 13:35:41 +00003293 status = be_vf_eth_addr_config(adapter);
3294 if (status)
3295 goto unreg_netdev;
Ajit Khaparded0381c42011-04-19 12:11:55 +00003296
3297 for (vf = 0; vf < num_vfs; vf++) {
3298 status = be_cmd_link_status_query(adapter, &link_up,
3299 &mac_speed, &lnk_speed, vf + 1);
3300 if (!status)
3301 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3302 else
3303 goto unreg_netdev;
3304 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003305 }
3306
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003307 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Koturf203af72010-10-25 23:01:03 +00003308 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003309 return 0;
3310
Ajit Khapardee6319362011-02-11 13:35:41 +00003311unreg_netdev:
3312 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003313unsetup:
3314 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003315msix_disable:
3316 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003317stats_clean:
3318 be_stats_cleanup(adapter);
3319ctrl_clean:
3320 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003321free_vf_cfg:
3322 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003323free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003324 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003325 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003326 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003327rel_reg:
3328 pci_release_regions(pdev);
3329disable_dev:
3330 pci_disable_device(pdev);
3331do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003332 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003333 return status;
3334}
3335
3336static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3337{
3338 struct be_adapter *adapter = pci_get_drvdata(pdev);
3339 struct net_device *netdev = adapter->netdev;
3340
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003341 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003342 if (adapter->wol)
3343 be_setup_wol(adapter, true);
3344
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003345 netif_device_detach(netdev);
3346 if (netif_running(netdev)) {
3347 rtnl_lock();
3348 be_close(netdev);
3349 rtnl_unlock();
3350 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003351 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003352 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003353
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003354 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003355 pci_save_state(pdev);
3356 pci_disable_device(pdev);
3357 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3358 return 0;
3359}
3360
3361static int be_resume(struct pci_dev *pdev)
3362{
3363 int status = 0;
3364 struct be_adapter *adapter = pci_get_drvdata(pdev);
3365 struct net_device *netdev = adapter->netdev;
3366
3367 netif_device_detach(netdev);
3368
3369 status = pci_enable_device(pdev);
3370 if (status)
3371 return status;
3372
3373 pci_set_power_state(pdev, 0);
3374 pci_restore_state(pdev);
3375
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003376 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003377 /* tell fw we're ready to fire cmds */
3378 status = be_cmd_fw_init(adapter);
3379 if (status)
3380 return status;
3381
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003382 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003383 if (netif_running(netdev)) {
3384 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003385 be_open(netdev);
3386 rtnl_unlock();
3387 }
3388 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003389
3390 if (adapter->wol)
3391 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003392
3393 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003394 return 0;
3395}
3396
Sathya Perla82456b02010-02-17 01:35:37 +00003397/*
3398 * An FLR will stop BE from DMAing any data.
3399 */
3400static void be_shutdown(struct pci_dev *pdev)
3401{
3402 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003403
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003404 if (!adapter)
3405 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003406
Sathya Perla0f4a6822011-03-21 20:49:28 +00003407 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003408
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003409 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003410
Sathya Perla82456b02010-02-17 01:35:37 +00003411 if (adapter->wol)
3412 be_setup_wol(adapter, true);
3413
Ajit Khaparde57841862011-04-06 18:08:43 +00003414 be_cmd_reset_function(adapter);
3415
Sathya Perla82456b02010-02-17 01:35:37 +00003416 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003417}
3418
Sathya Perlacf588472010-02-14 21:22:01 +00003419static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3420 pci_channel_state_t state)
3421{
3422 struct be_adapter *adapter = pci_get_drvdata(pdev);
3423 struct net_device *netdev = adapter->netdev;
3424
3425 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3426
3427 adapter->eeh_err = true;
3428
3429 netif_device_detach(netdev);
3430
3431 if (netif_running(netdev)) {
3432 rtnl_lock();
3433 be_close(netdev);
3434 rtnl_unlock();
3435 }
3436 be_clear(adapter);
3437
3438 if (state == pci_channel_io_perm_failure)
3439 return PCI_ERS_RESULT_DISCONNECT;
3440
3441 pci_disable_device(pdev);
3442
3443 return PCI_ERS_RESULT_NEED_RESET;
3444}
3445
3446static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3447{
3448 struct be_adapter *adapter = pci_get_drvdata(pdev);
3449 int status;
3450
3451 dev_info(&adapter->pdev->dev, "EEH reset\n");
3452 adapter->eeh_err = false;
3453
3454 status = pci_enable_device(pdev);
3455 if (status)
3456 return PCI_ERS_RESULT_DISCONNECT;
3457
3458 pci_set_master(pdev);
3459 pci_set_power_state(pdev, 0);
3460 pci_restore_state(pdev);
3461
3462 /* Check if card is ok and fw is ready */
3463 status = be_cmd_POST(adapter);
3464 if (status)
3465 return PCI_ERS_RESULT_DISCONNECT;
3466
3467 return PCI_ERS_RESULT_RECOVERED;
3468}
3469
3470static void be_eeh_resume(struct pci_dev *pdev)
3471{
3472 int status = 0;
3473 struct be_adapter *adapter = pci_get_drvdata(pdev);
3474 struct net_device *netdev = adapter->netdev;
3475
3476 dev_info(&adapter->pdev->dev, "EEH resume\n");
3477
3478 pci_save_state(pdev);
3479
3480 /* tell fw we're ready to fire cmds */
3481 status = be_cmd_fw_init(adapter);
3482 if (status)
3483 goto err;
3484
3485 status = be_setup(adapter);
3486 if (status)
3487 goto err;
3488
3489 if (netif_running(netdev)) {
3490 status = be_open(netdev);
3491 if (status)
3492 goto err;
3493 }
3494 netif_device_attach(netdev);
3495 return;
3496err:
3497 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003498}
3499
3500static struct pci_error_handlers be_eeh_handlers = {
3501 .error_detected = be_eeh_err_detected,
3502 .slot_reset = be_eeh_reset,
3503 .resume = be_eeh_resume,
3504};
3505
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003506static struct pci_driver be_driver = {
3507 .name = DRV_NAME,
3508 .id_table = be_dev_ids,
3509 .probe = be_probe,
3510 .remove = be_remove,
3511 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003512 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003513 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003514 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003515};
3516
3517static int __init be_init_module(void)
3518{
Joe Perches8e95a202009-12-03 07:58:21 +00003519 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3520 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003521 printk(KERN_WARNING DRV_NAME
3522 " : Module param rx_frag_size must be 2048/4096/8192."
3523 " Using 2048\n");
3524 rx_frag_size = 2048;
3525 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003526
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003527 return pci_register_driver(&be_driver);
3528}
3529module_init(be_init_module);
3530
3531static void __exit be_exit_module(void)
3532{
3533 pci_unregister_driver(&be_driver);
3534}
3535module_exit(be_exit_module);