blob: c4f564cd745b29ad092ca4e2a11c46614f888ab7 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparded2145cd2011-03-16 08:20:46 +00002 * Copyright (C) 2005 - 2011 Emulex
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
Ajit Khaparded2145cd2011-03-16 08:20:46 +000011 * linux-drivers@emulex.com
Sathya Perla6b7c5b92009-03-11 23:32:03 -070012 *
Ajit Khaparded2145cd2011-03-16 08:20:46 +000013 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
Sathya Perla6b7c5b92009-03-11 23:32:03 -070016 */
17
Paul Gortmaker70c71602011-05-22 16:47:17 -040018#include <linux/prefetch.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070019#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000020#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070021#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070022
23MODULE_VERSION(DRV_VER);
24MODULE_DEVICE_TABLE(pci, be_dev_ids);
25MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
26MODULE_AUTHOR("ServerEngines Corporation");
27MODULE_LICENSE("GPL");
28
Sathya Perla2e588f82011-03-11 02:49:26 +000029static ushort rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000030static unsigned int num_vfs;
Sathya Perla2e588f82011-03-11 02:49:26 +000031module_param(rx_frag_size, ushort, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000032module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070033MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000034MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035
Sathya Perla3abcded2010-10-03 22:12:27 -070036static bool multi_rxq = true;
37module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
39
Sathya Perla6b7c5b92009-03-11 23:32:03 -070040static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070042 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070043 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
44 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000045 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +000046 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070047 { 0 }
48};
49MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000050/* UE Status Low CSR */
51static char *ue_status_low_desc[] = {
52 "CEV",
53 "CTX",
54 "DBUF",
55 "ERX",
56 "Host",
57 "MPU",
58 "NDMA",
59 "PTC ",
60 "RDMA ",
61 "RXF ",
62 "RXIPS ",
63 "RXULP0 ",
64 "RXULP1 ",
65 "RXULP2 ",
66 "TIM ",
67 "TPOST ",
68 "TPRE ",
69 "TXIPS ",
70 "TXULP0 ",
71 "TXULP1 ",
72 "UC ",
73 "WDMA ",
74 "TXULP2 ",
75 "HOST1 ",
76 "P0_OB_LINK ",
77 "P1_OB_LINK ",
78 "HOST_GPIO ",
79 "MBOX ",
80 "AXGMAC0",
81 "AXGMAC1",
82 "JTAG",
83 "MPU_INTPEND"
84};
85/* UE Status High CSR */
86static char *ue_status_hi_desc[] = {
87 "LPCMEMHOST",
88 "MGMT_MAC",
89 "PCS0ONLINE",
90 "MPU_IRAM",
91 "PCS1ONLINE",
92 "PCTL0",
93 "PCTL1",
94 "PMEM",
95 "RR",
96 "TXPB",
97 "RXPP",
98 "XAUI",
99 "TXP",
100 "ARM",
101 "IPC",
102 "HOST2",
103 "HOST3",
104 "HOST4",
105 "HOST5",
106 "HOST6",
107 "HOST7",
108 "HOST8",
109 "HOST9",
110 "NETC"
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown",
117 "Unknown",
118 "Unknown"
119};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700120
121static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
122{
123 struct be_dma_mem *mem = &q->dma_mem;
124 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000125 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
126 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700127}
128
129static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
130 u16 len, u16 entry_size)
131{
132 struct be_dma_mem *mem = &q->dma_mem;
133
134 memset(q, 0, sizeof(*q));
135 q->len = len;
136 q->entry_size = entry_size;
137 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000138 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
139 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700140 if (!mem->va)
141 return -1;
142 memset(mem->va, 0, mem->size);
143 return 0;
144}
145
Sathya Perla8788fdc2009-07-27 22:52:03 +0000146static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700147{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000148 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700149 u32 reg = ioread32(addr);
150 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000151
Sathya Perlacf588472010-02-14 21:22:01 +0000152 if (adapter->eeh_err)
153 return;
154
Sathya Perla5f0b8492009-07-27 22:52:56 +0000155 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700156 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000157 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000159 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700160 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000161
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700162 iowrite32(reg, addr);
163}
164
Sathya Perla8788fdc2009-07-27 22:52:03 +0000165static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700166{
167 u32 val = 0;
168 val |= qid & DB_RQ_RING_ID_MASK;
169 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000170
171 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000172 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700173}
174
Sathya Perla8788fdc2009-07-27 22:52:03 +0000175static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700176{
177 u32 val = 0;
178 val |= qid & DB_TXULP_RING_ID_MASK;
179 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000180
181 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000182 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700183}
184
Sathya Perla8788fdc2009-07-27 22:52:03 +0000185static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186 bool arm, bool clear_int, u16 num_popped)
187{
188 u32 val = 0;
189 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000190 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
191 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000192
193 if (adapter->eeh_err)
194 return;
195
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196 if (arm)
197 val |= 1 << DB_EQ_REARM_SHIFT;
198 if (clear_int)
199 val |= 1 << DB_EQ_CLR_SHIFT;
200 val |= 1 << DB_EQ_EVNT_SHIFT;
201 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000202 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203}
204
Sathya Perla8788fdc2009-07-27 22:52:03 +0000205void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206{
207 u32 val = 0;
208 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000209 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
210 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000211
212 if (adapter->eeh_err)
213 return;
214
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700215 if (arm)
216 val |= 1 << DB_CQ_REARM_SHIFT;
217 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000218 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219}
220
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700221static int be_mac_addr_set(struct net_device *netdev, void *p)
222{
223 struct be_adapter *adapter = netdev_priv(netdev);
224 struct sockaddr *addr = p;
225 int status = 0;
226
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000227 if (!is_valid_ether_addr(addr->sa_data))
228 return -EADDRNOTAVAIL;
229
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000230 /* MAC addr configuration will be done in hardware for VFs
231 * by their corresponding PFs. Just copy to netdev addr here
232 */
233 if (!be_physfn(adapter))
234 goto netdev_addr;
235
Ajit Khapardef8617e02011-02-11 13:36:37 +0000236 status = be_cmd_pmac_del(adapter, adapter->if_handle,
237 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000238 if (status)
239 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700240
Sathya Perlaa65027e2009-08-17 00:58:04 +0000241 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000242 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000243netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700244 if (!status)
245 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
246
247 return status;
248}
249
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000250static void populate_be2_stats(struct be_adapter *adapter)
251{
252
253 struct be_drv_stats *drvs = &adapter->drv_stats;
254 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
255 struct be_port_rxf_stats_v0 *port_stats =
256 be_port_rxf_stats_from_cmd(adapter);
257 struct be_rxf_stats_v0 *rxf_stats =
258 be_rxf_stats_from_cmd(adapter);
259
260 drvs->rx_pause_frames = port_stats->rx_pause_frames;
261 drvs->rx_crc_errors = port_stats->rx_crc_errors;
262 drvs->rx_control_frames = port_stats->rx_control_frames;
263 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
264 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
265 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
266 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
267 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
268 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
269 drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
270 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
271 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
272 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
273 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
274 drvs->rx_input_fifo_overflow_drop =
275 port_stats->rx_input_fifo_overflow;
276 drvs->rx_dropped_header_too_small =
277 port_stats->rx_dropped_header_too_small;
278 drvs->rx_address_match_errors =
279 port_stats->rx_address_match_errors;
280 drvs->rx_alignment_symbol_errors =
281 port_stats->rx_alignment_symbol_errors;
282
283 drvs->tx_pauseframes = port_stats->tx_pauseframes;
284 drvs->tx_controlframes = port_stats->tx_controlframes;
285
286 if (adapter->port_num)
287 drvs->jabber_events =
288 rxf_stats->port1_jabber_events;
289 else
290 drvs->jabber_events =
291 rxf_stats->port0_jabber_events;
292 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
293 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
294 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
295 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
296 drvs->forwarded_packets = rxf_stats->forwarded_packets;
297 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
298 drvs->rx_drops_no_tpre_descr =
299 rxf_stats->rx_drops_no_tpre_descr;
300 drvs->rx_drops_too_many_frags =
301 rxf_stats->rx_drops_too_many_frags;
302 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
303}
304
305static void populate_be3_stats(struct be_adapter *adapter)
306{
307 struct be_drv_stats *drvs = &adapter->drv_stats;
308 struct be_pmem_stats *pmem_sts = be_pmem_stats_from_cmd(adapter);
309
310 struct be_rxf_stats_v1 *rxf_stats =
311 be_rxf_stats_from_cmd(adapter);
312 struct be_port_rxf_stats_v1 *port_stats =
313 be_port_rxf_stats_from_cmd(adapter);
314
315 drvs->rx_priority_pause_frames = 0;
316 drvs->pmem_fifo_overflow_drop = 0;
317 drvs->rx_pause_frames = port_stats->rx_pause_frames;
318 drvs->rx_crc_errors = port_stats->rx_crc_errors;
319 drvs->rx_control_frames = port_stats->rx_control_frames;
320 drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
321 drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
322 drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
323 drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
324 drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
325 drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
326 drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
327 drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
328 drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
329 drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
330 drvs->rx_dropped_header_too_small =
331 port_stats->rx_dropped_header_too_small;
332 drvs->rx_input_fifo_overflow_drop =
333 port_stats->rx_input_fifo_overflow_drop;
334 drvs->rx_address_match_errors =
335 port_stats->rx_address_match_errors;
336 drvs->rx_alignment_symbol_errors =
337 port_stats->rx_alignment_symbol_errors;
338 drvs->rxpp_fifo_overflow_drop =
339 port_stats->rxpp_fifo_overflow_drop;
340 drvs->tx_pauseframes = port_stats->tx_pauseframes;
341 drvs->tx_controlframes = port_stats->tx_controlframes;
342 drvs->jabber_events = port_stats->jabber_events;
343 drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
344 drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
345 drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
346 drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
347 drvs->forwarded_packets = rxf_stats->forwarded_packets;
348 drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
349 drvs->rx_drops_no_tpre_descr =
350 rxf_stats->rx_drops_no_tpre_descr;
351 drvs->rx_drops_too_many_frags =
352 rxf_stats->rx_drops_too_many_frags;
353 adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
354}
355
Selvin Xavier005d5692011-05-16 07:36:35 +0000356static void populate_lancer_stats(struct be_adapter *adapter)
357{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000358
Selvin Xavier005d5692011-05-16 07:36:35 +0000359 struct be_drv_stats *drvs = &adapter->drv_stats;
360 struct lancer_cmd_pport_stats *pport_stats = pport_stats_from_cmd
361 (adapter);
362 drvs->rx_priority_pause_frames = 0;
363 drvs->pmem_fifo_overflow_drop = 0;
364 drvs->rx_pause_frames =
Selvin Xavieraedfebb2011-06-06 02:27:13 +0000365 make_64bit_val(pport_stats->rx_pause_frames_hi,
366 pport_stats->rx_pause_frames_lo);
Selvin Xavier005d5692011-05-16 07:36:35 +0000367 drvs->rx_crc_errors = make_64bit_val(pport_stats->rx_crc_errors_hi,
368 pport_stats->rx_crc_errors_lo);
369 drvs->rx_control_frames =
370 make_64bit_val(pport_stats->rx_control_frames_hi,
371 pport_stats->rx_control_frames_lo);
372 drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
373 drvs->rx_frame_too_long =
374 make_64bit_val(pport_stats->rx_internal_mac_errors_hi,
375 pport_stats->rx_frames_too_long_lo);
376 drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
377 drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
378 drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
379 drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
380 drvs->rx_dropped_tcp_length =
381 pport_stats->rx_dropped_invalid_tcp_length;
382 drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
383 drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
384 drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
385 drvs->rx_dropped_header_too_small =
386 pport_stats->rx_dropped_header_too_small;
387 drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388 drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
389 drvs->rx_alignment_symbol_errors =
390 make_64bit_val(pport_stats->rx_symbol_errors_hi,
391 pport_stats->rx_symbol_errors_lo);
392 drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
393 drvs->tx_pauseframes = make_64bit_val(pport_stats->tx_pause_frames_hi,
394 pport_stats->tx_pause_frames_lo);
395 drvs->tx_controlframes =
396 make_64bit_val(pport_stats->tx_control_frames_hi,
397 pport_stats->tx_control_frames_lo);
398 drvs->jabber_events = pport_stats->rx_jabbers;
399 drvs->rx_drops_no_pbuf = 0;
400 drvs->rx_drops_no_txpb = 0;
401 drvs->rx_drops_no_erx_descr = 0;
402 drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
403 drvs->forwarded_packets = make_64bit_val(pport_stats->num_forwards_hi,
404 pport_stats->num_forwards_lo);
405 drvs->rx_drops_mtu = make_64bit_val(pport_stats->rx_drops_mtu_hi,
406 pport_stats->rx_drops_mtu_lo);
407 drvs->rx_drops_no_tpre_descr = 0;
408 drvs->rx_drops_too_many_frags =
409 make_64bit_val(pport_stats->rx_drops_too_many_frags_hi,
410 pport_stats->rx_drops_too_many_frags_lo);
411}
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000412
413void be_parse_stats(struct be_adapter *adapter)
414{
Selvin Xavier005d5692011-05-16 07:36:35 +0000415 if (adapter->generation == BE_GEN3) {
416 if (lancer_chip(adapter))
417 populate_lancer_stats(adapter);
418 else
419 populate_be3_stats(adapter);
420 } else {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000421 populate_be2_stats(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000422 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000423}
424
Sathya Perlab31c50a2009-09-17 10:30:13 -0700425void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700426{
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000427 struct be_drv_stats *drvs = &adapter->drv_stats;
Ajit Khaparde78122a52009-10-07 03:11:20 -0700428 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla3abcded2010-10-03 22:12:27 -0700429 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +0000430 struct be_tx_obj *txo;
Sathya Perla3abcded2010-10-03 22:12:27 -0700431 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700432
Sathya Perla3abcded2010-10-03 22:12:27 -0700433 memset(dev_stats, 0, sizeof(*dev_stats));
434 for_all_rx_queues(adapter, rxo, i) {
435 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
436 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
437 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
438 /* no space in linux buffers: best possible approximation */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000439 if (adapter->generation == BE_GEN3) {
Selvin Xavier005d5692011-05-16 07:36:35 +0000440 if (!(lancer_chip(adapter))) {
441 struct be_erx_stats_v1 *erx_stats =
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000442 be_erx_stats_from_cmd(adapter);
Selvin Xavier005d5692011-05-16 07:36:35 +0000443 dev_stats->rx_dropped +=
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000444 erx_stats->rx_drops_no_fragments[rxo->q.id];
Selvin Xavier005d5692011-05-16 07:36:35 +0000445 }
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000446 } else {
447 struct be_erx_stats_v0 *erx_stats =
448 be_erx_stats_from_cmd(adapter);
449 dev_stats->rx_dropped +=
450 erx_stats->rx_drops_no_fragments[rxo->q.id];
451 }
Sathya Perla3abcded2010-10-03 22:12:27 -0700452 }
453
Sathya Perla3c8def92011-06-12 20:01:58 +0000454 for_all_tx_queues(adapter, txo, i) {
455 dev_stats->tx_packets += tx_stats(txo)->be_tx_pkts;
456 dev_stats->tx_bytes += tx_stats(txo)->be_tx_bytes;
457 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700458
459 /* bad pkts received */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000460 dev_stats->rx_errors = drvs->rx_crc_errors +
461 drvs->rx_alignment_symbol_errors +
462 drvs->rx_in_range_errors +
463 drvs->rx_out_range_errors +
464 drvs->rx_frame_too_long +
465 drvs->rx_dropped_too_small +
466 drvs->rx_dropped_too_short +
467 drvs->rx_dropped_header_too_small +
468 drvs->rx_dropped_tcp_length +
469 drvs->rx_dropped_runt +
470 drvs->rx_tcp_checksum_errs +
471 drvs->rx_ip_checksum_errs +
472 drvs->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700473
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700474 /* detailed rx errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000475 dev_stats->rx_length_errors = drvs->rx_in_range_errors +
476 drvs->rx_out_range_errors +
477 drvs->rx_frame_too_long;
Sathya Perla68110862009-06-10 02:21:16 +0000478
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000479 dev_stats->rx_crc_errors = drvs->rx_crc_errors;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700480
481 /* frame alignment errors */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000482 dev_stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000483
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700484 /* receiver fifo overrun */
485 /* drops_no_pbuf is no per i/f, it's per BE card */
Ajit Khaparde89a88ab2011-05-16 07:36:18 +0000486 dev_stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
487 drvs->rx_input_fifo_overflow_drop +
488 drvs->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700489}
490
Sathya Perla8788fdc2009-07-27 22:52:03 +0000491void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700492{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700493 struct net_device *netdev = adapter->netdev;
494
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700495 /* If link came up or went down */
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000496 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000497 adapter->link_speed = -1;
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000498 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700499 netif_carrier_on(netdev);
500 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000501 } else {
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000502 netif_carrier_off(netdev);
503 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700504 }
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000505 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700507}
508
509/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700510static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700511{
Sathya Perla3abcded2010-10-03 22:12:27 -0700512 struct be_eq_obj *rx_eq = &rxo->rx_eq;
513 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700514 ulong now = jiffies;
515 u32 eqd;
516
517 if (!rx_eq->enable_aic)
518 return;
519
520 /* Wrapped around */
521 if (time_before(now, stats->rx_fps_jiffies)) {
522 stats->rx_fps_jiffies = now;
523 return;
524 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700525
526 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700527 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700528 return;
529
Sathya Perla3abcded2010-10-03 22:12:27 -0700530 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700531 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700532
Sathya Perla4097f662009-03-24 16:40:13 -0700533 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700534 stats->prev_rx_frags = stats->rx_frags;
535 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700536 eqd = eqd << 3;
537 if (eqd > rx_eq->max_eqd)
538 eqd = rx_eq->max_eqd;
539 if (eqd < rx_eq->min_eqd)
540 eqd = rx_eq->min_eqd;
541 if (eqd < 10)
542 eqd = 0;
543 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000544 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700545
546 rx_eq->cur_eqd = eqd;
547}
548
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700549static u32 be_calc_rate(u64 bytes, unsigned long ticks)
550{
551 u64 rate = bytes;
552
553 do_div(rate, ticks / HZ);
554 rate <<= 3; /* bytes/sec -> bits/sec */
555 do_div(rate, 1000000ul); /* MB/Sec */
556
557 return rate;
558}
559
Sathya Perla3c8def92011-06-12 20:01:58 +0000560static void be_tx_rate_update(struct be_tx_obj *txo)
Sathya Perla4097f662009-03-24 16:40:13 -0700561{
Sathya Perla3c8def92011-06-12 20:01:58 +0000562 struct be_tx_stats *stats = tx_stats(txo);
Sathya Perla4097f662009-03-24 16:40:13 -0700563 ulong now = jiffies;
564
565 /* Wrapped around? */
566 if (time_before(now, stats->be_tx_jiffies)) {
567 stats->be_tx_jiffies = now;
568 return;
569 }
570
571 /* Update tx rate once in two seconds */
572 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700573 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
574 - stats->be_tx_bytes_prev,
575 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700576 stats->be_tx_jiffies = now;
577 stats->be_tx_bytes_prev = stats->be_tx_bytes;
578 }
579}
580
Sathya Perla3c8def92011-06-12 20:01:58 +0000581static void be_tx_stats_update(struct be_tx_obj *txo,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000582 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583{
Sathya Perla3c8def92011-06-12 20:01:58 +0000584 struct be_tx_stats *stats = tx_stats(txo);
585
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586 stats->be_tx_reqs++;
587 stats->be_tx_wrbs += wrb_cnt;
588 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000589 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590 if (stopped)
591 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700592}
593
594/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000595static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
596 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700597{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700598 int cnt = (skb->len > skb->data_len);
599
600 cnt += skb_shinfo(skb)->nr_frags;
601
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700602 /* to account for hdr wrb */
603 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000604 if (lancer_chip(adapter) || !(cnt & 1)) {
605 *dummy = false;
606 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700607 /* add a dummy to make it an even num */
608 cnt++;
609 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000610 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700611 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
612 return cnt;
613}
614
615static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
616{
617 wrb->frag_pa_hi = upper_32_bits(addr);
618 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
619 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
620}
621
Somnath Koturcc4ce022010-10-21 07:11:14 -0700622static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
623 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700624{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700625 u8 vlan_prio = 0;
626 u16 vlan_tag = 0;
627
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700628 memset(hdr, 0, sizeof(*hdr));
629
630 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
631
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000632 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700633 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
634 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
635 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000636 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000637 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000638 if (lancer_chip(adapter) && adapter->sli_family ==
639 LANCER_A0_SLI_FAMILY) {
640 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
641 if (is_tcp_pkt(skb))
642 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
643 tcpcs, hdr, 1);
644 else if (is_udp_pkt(skb))
645 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
646 udpcs, hdr, 1);
647 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
649 if (is_tcp_pkt(skb))
650 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
651 else if (is_udp_pkt(skb))
652 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
653 }
654
Somnath Koturcc4ce022010-10-21 07:11:14 -0700655 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700656 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700657 vlan_tag = vlan_tx_tag_get(skb);
658 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
659 /* If vlan priority provided by OS is NOT in available bmap */
660 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
661 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
662 adapter->recommended_prio;
663 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 }
665
666 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
667 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
668 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
669 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
670}
671
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000672static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000673 bool unmap_single)
674{
675 dma_addr_t dma;
676
677 be_dws_le_to_cpu(wrb, sizeof(*wrb));
678
679 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000680 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000681 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000682 dma_unmap_single(dev, dma, wrb->frag_len,
683 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000684 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000685 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000686 }
687}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700688
Sathya Perla3c8def92011-06-12 20:01:58 +0000689static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700690 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
691{
Sathya Perla7101e112010-03-22 20:41:12 +0000692 dma_addr_t busaddr;
693 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000694 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700695 struct sk_buff *first_skb = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696 struct be_eth_wrb *wrb;
697 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000698 bool map_single = false;
699 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700700
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701 hdr = queue_head_node(txq);
702 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000703 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700704
David S. Millerebc8d2a2009-06-09 01:01:31 -0700705 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700706 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000707 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
708 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000709 goto dma_err;
710 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700711 wrb = queue_head_node(txq);
712 wrb_fill(wrb, busaddr, len);
713 be_dws_cpu_to_le(wrb, sizeof(*wrb));
714 queue_head_inc(txq);
715 copied += len;
716 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700717
David S. Millerebc8d2a2009-06-09 01:01:31 -0700718 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
719 struct skb_frag_struct *frag =
720 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000721 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
722 frag->size, DMA_TO_DEVICE);
723 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000724 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700725 wrb = queue_head_node(txq);
726 wrb_fill(wrb, busaddr, frag->size);
727 be_dws_cpu_to_le(wrb, sizeof(*wrb));
728 queue_head_inc(txq);
729 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700730 }
731
732 if (dummy_wrb) {
733 wrb = queue_head_node(txq);
734 wrb_fill(wrb, 0, 0);
735 be_dws_cpu_to_le(wrb, sizeof(*wrb));
736 queue_head_inc(txq);
737 }
738
Somnath Koturcc4ce022010-10-21 07:11:14 -0700739 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700740 be_dws_cpu_to_le(hdr, sizeof(*hdr));
741
742 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000743dma_err:
744 txq->head = map_head;
745 while (copied) {
746 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000747 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000748 map_single = false;
749 copied -= wrb->frag_len;
750 queue_head_inc(txq);
751 }
752 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700753}
754
Stephen Hemminger613573252009-08-31 19:50:58 +0000755static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700756 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700757{
758 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3c8def92011-06-12 20:01:58 +0000759 struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
760 struct be_queue_info *txq = &txo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700761 u32 wrb_cnt = 0, copied = 0;
762 u32 start = txq->head;
763 bool dummy_wrb, stopped = false;
764
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000765 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766
Sathya Perla3c8def92011-06-12 20:01:58 +0000767 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000768 if (copied) {
769 /* record the sent skb in the sent_skb table */
Sathya Perla3c8def92011-06-12 20:01:58 +0000770 BUG_ON(txo->sent_skb_list[start]);
771 txo->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700772
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000773 /* Ensure txq has space for the next skb; Else stop the queue
774 * *BEFORE* ringing the tx doorbell, so that we serialze the
775 * tx compls of the current transmit which'll wake up the queue
776 */
Sathya Perla7101e112010-03-22 20:41:12 +0000777 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000778 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
779 txq->len) {
Sathya Perla3c8def92011-06-12 20:01:58 +0000780 netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000781 stopped = true;
782 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700783
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000784 be_txq_notify(adapter, txq->id, wrb_cnt);
785
Sathya Perla3c8def92011-06-12 20:01:58 +0000786 be_tx_stats_update(txo, wrb_cnt, copied,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000787 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000788 } else {
789 txq->head = start;
790 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700791 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700792 return NETDEV_TX_OK;
793}
794
795static int be_change_mtu(struct net_device *netdev, int new_mtu)
796{
797 struct be_adapter *adapter = netdev_priv(netdev);
798 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000799 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
800 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700801 dev_info(&adapter->pdev->dev,
802 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000803 BE_MIN_MTU,
804 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700805 return -EINVAL;
806 }
807 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
808 netdev->mtu, new_mtu);
809 netdev->mtu = new_mtu;
810 return 0;
811}
812
813/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000814 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
815 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700816 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000817static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700818{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700819 u16 vtag[BE_NUM_VLANS_SUPPORTED];
820 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000821 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000822 u32 if_handle;
823
824 if (vf) {
825 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
826 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
827 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
828 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829
Ajit Khaparde82903e42010-02-09 01:34:57 +0000830 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700831 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000832 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833 if (adapter->vlan_tag[i]) {
834 vtag[ntags] = cpu_to_le16(i);
835 ntags++;
836 }
837 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700838 status = be_cmd_vlan_config(adapter, adapter->if_handle,
839 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700840 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700841 status = be_cmd_vlan_config(adapter, adapter->if_handle,
842 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700843 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000844
Sathya Perlab31c50a2009-09-17 10:30:13 -0700845 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700846}
847
848static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
849{
850 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700851
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700852 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700853}
854
855static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
856{
857 struct be_adapter *adapter = netdev_priv(netdev);
858
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000859 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000860 if (!be_physfn(adapter))
861 return;
862
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000864 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000865 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700866}
867
868static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
869{
870 struct be_adapter *adapter = netdev_priv(netdev);
871
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000872 adapter->vlans_added--;
873 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
874
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000875 if (!be_physfn(adapter))
876 return;
877
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700878 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000879 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000880 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700881}
882
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700883static void be_set_multicast_list(struct net_device *netdev)
884{
885 struct be_adapter *adapter = netdev_priv(netdev);
886
887 if (netdev->flags & IFF_PROMISC) {
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000888 be_cmd_promiscuous_config(adapter, true);
Sathya Perla24307ee2009-06-18 00:09:25 +0000889 adapter->promiscuous = true;
890 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700891 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000892
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300893 /* BE was previously in promiscuous mode; disable it */
Sathya Perla24307ee2009-06-18 00:09:25 +0000894 if (adapter->promiscuous) {
895 adapter->promiscuous = false;
Padmanabh Ratnakarecd0bf02011-05-10 05:13:26 +0000896 be_cmd_promiscuous_config(adapter, false);
Sathya Perla24307ee2009-06-18 00:09:25 +0000897 }
898
Sathya Perlae7b909a2009-11-22 22:01:10 +0000899 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000900 if (netdev->flags & IFF_ALLMULTI ||
901 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000902 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000903 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000904 goto done;
905 }
906
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000907 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800908 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000909done:
910 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700911}
912
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000913static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
914{
915 struct be_adapter *adapter = netdev_priv(netdev);
916 int status;
917
918 if (!adapter->sriov_enabled)
919 return -EPERM;
920
921 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
922 return -EINVAL;
923
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000924 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
925 status = be_cmd_pmac_del(adapter,
926 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000927 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000928
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000929 status = be_cmd_pmac_add(adapter, mac,
930 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000931 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000932
933 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000934 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
935 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000936 else
937 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
938
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000939 return status;
940}
941
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000942static int be_get_vf_config(struct net_device *netdev, int vf,
943 struct ifla_vf_info *vi)
944{
945 struct be_adapter *adapter = netdev_priv(netdev);
946
947 if (!adapter->sriov_enabled)
948 return -EPERM;
949
950 if (vf >= num_vfs)
951 return -EINVAL;
952
953 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000954 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000955 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000956 vi->qos = 0;
957 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
958
959 return 0;
960}
961
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000962static int be_set_vf_vlan(struct net_device *netdev,
963 int vf, u16 vlan, u8 qos)
964{
965 struct be_adapter *adapter = netdev_priv(netdev);
966 int status = 0;
967
968 if (!adapter->sriov_enabled)
969 return -EPERM;
970
971 if ((vf >= num_vfs) || (vlan > 4095))
972 return -EINVAL;
973
974 if (vlan) {
975 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
976 adapter->vlans_added++;
977 } else {
978 adapter->vf_cfg[vf].vf_vlan_tag = 0;
979 adapter->vlans_added--;
980 }
981
982 status = be_vid_config(adapter, true, vf);
983
984 if (status)
985 dev_info(&adapter->pdev->dev,
986 "VLAN %d config on VF %d failed\n", vlan, vf);
987 return status;
988}
989
Ajit Khapardee1d18732010-07-23 01:52:13 +0000990static int be_set_vf_tx_rate(struct net_device *netdev,
991 int vf, int rate)
992{
993 struct be_adapter *adapter = netdev_priv(netdev);
994 int status = 0;
995
996 if (!adapter->sriov_enabled)
997 return -EPERM;
998
999 if ((vf >= num_vfs) || (rate < 0))
1000 return -EINVAL;
1001
1002 if (rate > 10000)
1003 rate = 10000;
1004
1005 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +00001006 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +00001007
1008 if (status)
1009 dev_info(&adapter->pdev->dev,
1010 "tx rate %d on VF %d failed\n", rate, vf);
1011 return status;
1012}
1013
Sathya Perla3abcded2010-10-03 22:12:27 -07001014static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001015{
Sathya Perla3abcded2010-10-03 22:12:27 -07001016 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001017 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001018
Sathya Perla4097f662009-03-24 16:40:13 -07001019 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -07001020 if (time_before(now, stats->rx_jiffies)) {
1021 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -07001022 return;
1023 }
1024
1025 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -07001026 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -07001027 return;
1028
Sathya Perla3abcded2010-10-03 22:12:27 -07001029 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
1030 now - stats->rx_jiffies);
1031 stats->rx_jiffies = now;
1032 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -07001033}
1034
Sathya Perla3abcded2010-10-03 22:12:27 -07001035static void be_rx_stats_update(struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001036 struct be_rx_compl_info *rxcp)
Sathya Perla4097f662009-03-24 16:40:13 -07001037{
Sathya Perla3abcded2010-10-03 22:12:27 -07001038 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -07001039
Sathya Perla3abcded2010-10-03 22:12:27 -07001040 stats->rx_compl++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001041 stats->rx_frags += rxcp->num_rcvd;
1042 stats->rx_bytes += rxcp->pkt_size;
Sathya Perla3abcded2010-10-03 22:12:27 -07001043 stats->rx_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001044 if (rxcp->pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -07001045 stats->rx_mcast_pkts++;
Sathya Perla2e588f82011-03-11 02:49:26 +00001046 if (rxcp->err)
1047 stats->rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001048}
1049
Sathya Perla2e588f82011-03-11 02:49:26 +00001050static inline bool csum_passed(struct be_rx_compl_info *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -07001051{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +00001052 /* L4 checksum is not reliable for non TCP/UDP packets.
1053 * Also ignore ipcksm for ipv6 pkts */
Sathya Perla2e588f82011-03-11 02:49:26 +00001054 return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1055 (rxcp->ip_csum || rxcp->ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -07001056}
1057
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -07001059get_rx_page_info(struct be_adapter *adapter,
1060 struct be_rx_obj *rxo,
1061 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062{
1063 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001064 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001065
Sathya Perla3abcded2010-10-03 22:12:27 -07001066 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001067 BUG_ON(!rx_page_info->page);
1068
Ajit Khaparde205859a2010-02-09 01:34:21 +00001069 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001070 dma_unmap_page(&adapter->pdev->dev,
1071 dma_unmap_addr(rx_page_info, bus),
1072 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001073 rx_page_info->last_page_user = false;
1074 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001075
1076 atomic_dec(&rxq->used);
1077 return rx_page_info;
1078}
1079
1080/* Throwaway the data in the Rx completion */
1081static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001082 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001083 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001084{
Sathya Perla3abcded2010-10-03 22:12:27 -07001085 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001087 u16 i, num_rcvd = rxcp->num_rcvd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001088
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001089 for (i = 0; i < num_rcvd; i++) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001090 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001091 put_page(page_info->page);
1092 memset(page_info, 0, sizeof(*page_info));
Sathya Perla2e588f82011-03-11 02:49:26 +00001093 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001094 }
1095}
1096
1097/*
1098 * skb_fill_rx_data forms a complete skb for an ether frame
1099 * indicated by rxcp.
1100 */
Sathya Perla3abcded2010-10-03 22:12:27 -07001101static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001102 struct sk_buff *skb, struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001103{
Sathya Perla3abcded2010-10-03 22:12:27 -07001104 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001105 struct be_rx_page_info *page_info;
Sathya Perla2e588f82011-03-11 02:49:26 +00001106 u16 i, j;
1107 u16 hdr_len, curr_frag_len, remaining;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001108 u8 *start;
1109
Sathya Perla2e588f82011-03-11 02:49:26 +00001110 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001111 start = page_address(page_info->page) + page_info->page_offset;
1112 prefetch(start);
1113
1114 /* Copy data in the first descriptor of this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001115 curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116
1117 /* Copy the header portion into skb_data */
Sathya Perla2e588f82011-03-11 02:49:26 +00001118 hdr_len = min(BE_HDR_LEN, curr_frag_len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001119 memcpy(skb->data, start, hdr_len);
1120 skb->len = curr_frag_len;
1121 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1122 /* Complete packet has now been moved to data */
1123 put_page(page_info->page);
1124 skb->data_len = 0;
1125 skb->tail += curr_frag_len;
1126 } else {
1127 skb_shinfo(skb)->nr_frags = 1;
1128 skb_shinfo(skb)->frags[0].page = page_info->page;
1129 skb_shinfo(skb)->frags[0].page_offset =
1130 page_info->page_offset + hdr_len;
1131 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
1132 skb->data_len = curr_frag_len - hdr_len;
1133 skb->tail += hdr_len;
1134 }
Ajit Khaparde205859a2010-02-09 01:34:21 +00001135 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001136
Sathya Perla2e588f82011-03-11 02:49:26 +00001137 if (rxcp->pkt_size <= rx_frag_size) {
1138 BUG_ON(rxcp->num_rcvd != 1);
1139 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001140 }
1141
1142 /* More frags present for this completion */
Sathya Perla2e588f82011-03-11 02:49:26 +00001143 index_inc(&rxcp->rxq_idx, rxq->len);
1144 remaining = rxcp->pkt_size - curr_frag_len;
1145 for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1146 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1147 curr_frag_len = min(remaining, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001149 /* Coalesce all frags from the same physical page in one slot */
1150 if (page_info->page_offset == 0) {
1151 /* Fresh page */
1152 j++;
1153 skb_shinfo(skb)->frags[j].page = page_info->page;
1154 skb_shinfo(skb)->frags[j].page_offset =
1155 page_info->page_offset;
1156 skb_shinfo(skb)->frags[j].size = 0;
1157 skb_shinfo(skb)->nr_frags++;
1158 } else {
1159 put_page(page_info->page);
1160 }
1161
1162 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001163 skb->len += curr_frag_len;
1164 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165
Sathya Perla2e588f82011-03-11 02:49:26 +00001166 remaining -= curr_frag_len;
1167 index_inc(&rxcp->rxq_idx, rxq->len);
Ajit Khaparde205859a2010-02-09 01:34:21 +00001168 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001169 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001170 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171}
1172
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001173/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001174static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001175 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001176 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177{
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001178 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001179 struct sk_buff *skb;
Sathya Perla89420422010-02-17 01:35:26 +00001180
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001181 skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001182 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183 if (net_ratelimit())
1184 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001185 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001186 return;
1187 }
1188
Sathya Perla2e588f82011-03-11 02:49:26 +00001189 skb_fill_rx_data(adapter, rxo, skb, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001190
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001191 if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001192 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001193 else
1194 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001195
1196 skb->truesize = skb->len + sizeof(struct sk_buff);
Michał Mirosław6332c8d2011-04-07 02:43:48 +00001197 skb->protocol = eth_type_trans(skb, netdev);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001198 if (adapter->netdev->features & NETIF_F_RXHASH)
1199 skb->rxhash = rxcp->rss_hash;
1200
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001201
Sathya Perla2e588f82011-03-11 02:49:26 +00001202 if (unlikely(rxcp->vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001203 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204 kfree_skb(skb);
1205 return;
1206 }
Somnath Kotur6709d952011-05-04 22:40:46 +00001207 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
1208 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001209 } else {
1210 netif_receive_skb(skb);
1211 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001212}
1213
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001214/* Process the RX completion indicated by rxcp when GRO is enabled */
1215static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001216 struct be_rx_obj *rxo,
Sathya Perla2e588f82011-03-11 02:49:26 +00001217 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001218{
1219 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001220 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001221 struct be_queue_info *rxq = &rxo->q;
1222 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001223 u16 remaining, curr_frag_len;
1224 u16 i, j;
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001225
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001226 skb = napi_get_frags(&eq_obj->napi);
1227 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001228 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001229 return;
1230 }
1231
Sathya Perla2e588f82011-03-11 02:49:26 +00001232 remaining = rxcp->pkt_size;
1233 for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1234 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001235
1236 curr_frag_len = min(remaining, rx_frag_size);
1237
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001238 /* Coalesce all frags from the same physical page in one slot */
1239 if (i == 0 || page_info->page_offset == 0) {
1240 /* First frag or Fresh page */
1241 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001242 skb_shinfo(skb)->frags[j].page = page_info->page;
1243 skb_shinfo(skb)->frags[j].page_offset =
1244 page_info->page_offset;
1245 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001246 } else {
1247 put_page(page_info->page);
1248 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001249 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001250
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251 remaining -= curr_frag_len;
Sathya Perla2e588f82011-03-11 02:49:26 +00001252 index_inc(&rxcp->rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001253 memset(page_info, 0, sizeof(*page_info));
1254 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001255 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001257 skb_shinfo(skb)->nr_frags = j + 1;
Sathya Perla2e588f82011-03-11 02:49:26 +00001258 skb->len = rxcp->pkt_size;
1259 skb->data_len = rxcp->pkt_size;
1260 skb->truesize += rxcp->pkt_size;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001261 skb->ip_summed = CHECKSUM_UNNECESSARY;
Ajit Khaparde4b972912011-04-06 18:07:43 +00001262 if (adapter->netdev->features & NETIF_F_RXHASH)
1263 skb->rxhash = rxcp->rss_hash;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001264
Sathya Perla2e588f82011-03-11 02:49:26 +00001265 if (likely(!rxcp->vlanf))
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001266 napi_gro_frags(&eq_obj->napi);
Sathya Perla2e588f82011-03-11 02:49:26 +00001267 else
Somnath Kotur6709d952011-05-04 22:40:46 +00001268 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp,
1269 rxcp->vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001270}
1271
Sathya Perla2e588f82011-03-11 02:49:26 +00001272static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1273 struct be_eth_rx_compl *compl,
1274 struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001275{
Sathya Perla2e588f82011-03-11 02:49:26 +00001276 rxcp->pkt_size =
1277 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1278 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1279 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1280 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001281 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001282 rxcp->ip_csum =
1283 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1284 rxcp->l4_csum =
1285 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1286 rxcp->ipv6 =
1287 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1288 rxcp->rxq_idx =
1289 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1290 rxcp->num_rcvd =
1291 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1292 rxcp->pkt_type =
1293 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001294 rxcp->rss_hash =
1295 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001296 if (rxcp->vlanf) {
1297 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001298 compl);
1299 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1300 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001301 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001302}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001303
Sathya Perla2e588f82011-03-11 02:49:26 +00001304static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1305 struct be_eth_rx_compl *compl,
1306 struct be_rx_compl_info *rxcp)
1307{
1308 rxcp->pkt_size =
1309 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1310 rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1311 rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1312 rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
Padmanabh Ratnakar9ecb42f2011-03-15 14:57:09 -07001313 rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
Sathya Perla2e588f82011-03-11 02:49:26 +00001314 rxcp->ip_csum =
1315 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1316 rxcp->l4_csum =
1317 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1318 rxcp->ipv6 =
1319 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1320 rxcp->rxq_idx =
1321 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1322 rxcp->num_rcvd =
1323 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1324 rxcp->pkt_type =
1325 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
Ajit Khaparde4b972912011-04-06 18:07:43 +00001326 rxcp->rss_hash =
1327 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
Sathya Perla15d72182011-03-21 20:49:26 +00001328 if (rxcp->vlanf) {
1329 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
David S. Miller3c709f82011-05-11 14:26:15 -04001330 compl);
1331 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1332 compl);
Sathya Perla15d72182011-03-21 20:49:26 +00001333 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001334}
1335
1336static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1337{
1338 struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1339 struct be_rx_compl_info *rxcp = &rxo->rxcp;
1340 struct be_adapter *adapter = rxo->adapter;
1341
1342 /* For checking the valid bit it is Ok to use either definition as the
1343 * valid bit is at the same position in both v0 and v1 Rx compl */
1344 if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001345 return NULL;
1346
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001347 rmb();
Sathya Perla2e588f82011-03-11 02:49:26 +00001348 be_dws_le_to_cpu(compl, sizeof(*compl));
1349
1350 if (adapter->be3_native)
1351 be_parse_rx_compl_v1(adapter, compl, rxcp);
1352 else
1353 be_parse_rx_compl_v0(adapter, compl, rxcp);
1354
Sathya Perla15d72182011-03-21 20:49:26 +00001355 if (rxcp->vlanf) {
1356 /* vlanf could be wrongly set in some cards.
1357 * ignore if vtm is not set */
1358 if ((adapter->function_mode & 0x400) && !rxcp->vtm)
1359 rxcp->vlanf = 0;
Sathya Perla2e588f82011-03-11 02:49:26 +00001360
Sathya Perla15d72182011-03-21 20:49:26 +00001361 if (!lancer_chip(adapter))
David S. Miller3c709f82011-05-11 14:26:15 -04001362 rxcp->vlan_tag = swab16(rxcp->vlan_tag);
Sathya Perla2e588f82011-03-11 02:49:26 +00001363
David S. Miller3c709f82011-05-11 14:26:15 -04001364 if (((adapter->pvid & VLAN_VID_MASK) ==
1365 (rxcp->vlan_tag & VLAN_VID_MASK)) &&
1366 !adapter->vlan_tag[rxcp->vlan_tag])
Sathya Perla15d72182011-03-21 20:49:26 +00001367 rxcp->vlanf = 0;
1368 }
Sathya Perla2e588f82011-03-11 02:49:26 +00001369
1370 /* As the compl has been parsed, reset it; we wont touch it again */
1371 compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001372
Sathya Perla3abcded2010-10-03 22:12:27 -07001373 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374 return rxcp;
1375}
1376
Eric Dumazet1829b082011-03-01 05:48:12 +00001377static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001378{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001379 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001380
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001381 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001382 gfp |= __GFP_COMP;
1383 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001384}
1385
1386/*
1387 * Allocate a page, split it to fragments of size rx_frag_size and post as
1388 * receive buffers to BE
1389 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001390static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001391{
Sathya Perla3abcded2010-10-03 22:12:27 -07001392 struct be_adapter *adapter = rxo->adapter;
1393 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001394 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001395 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001396 struct page *pagep = NULL;
1397 struct be_eth_rx_d *rxd;
1398 u64 page_dmaaddr = 0, frag_dmaaddr;
1399 u32 posted, page_offset = 0;
1400
Sathya Perla3abcded2010-10-03 22:12:27 -07001401 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001402 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1403 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001404 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001406 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407 break;
1408 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001409 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1410 0, adapter->big_page_size,
1411 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001412 page_info->page_offset = 0;
1413 } else {
1414 get_page(pagep);
1415 page_info->page_offset = page_offset + rx_frag_size;
1416 }
1417 page_offset = page_info->page_offset;
1418 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001419 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001420 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1421
1422 rxd = queue_head_node(rxq);
1423 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1424 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001425
1426 /* Any space left in the current big page for another frag? */
1427 if ((page_offset + rx_frag_size + rx_frag_size) >
1428 adapter->big_page_size) {
1429 pagep = NULL;
1430 page_info->last_page_user = true;
1431 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001432
1433 prev_page_info = page_info;
1434 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001435 page_info = &page_info_tbl[rxq->head];
1436 }
1437 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001438 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001439
1440 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001441 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001442 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001443 } else if (atomic_read(&rxq->used) == 0) {
1444 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001445 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001446 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001447}
1448
Sathya Perla5fb379e2009-06-18 00:02:59 +00001449static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1452
1453 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1454 return NULL;
1455
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001456 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1458
1459 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1460
1461 queue_tail_inc(tx_cq);
1462 return txcp;
1463}
1464
Sathya Perla3c8def92011-06-12 20:01:58 +00001465static u16 be_tx_compl_process(struct be_adapter *adapter,
1466 struct be_tx_obj *txo, u16 last_index)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467{
Sathya Perla3c8def92011-06-12 20:01:58 +00001468 struct be_queue_info *txq = &txo->q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001469 struct be_eth_wrb *wrb;
Sathya Perla3c8def92011-06-12 20:01:58 +00001470 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001471 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001472 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1473 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001474
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001475 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001476 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001477 sent_skbs[txq->tail] = NULL;
1478
1479 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001480 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001481
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001482 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001483 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001484 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001485 unmap_tx_frag(&adapter->pdev->dev, wrb,
1486 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001487 unmap_skb_hdr = false;
1488
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001489 num_wrbs++;
1490 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001491 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001492
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001493 kfree_skb(sent_skb);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001494 return num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001495}
1496
Sathya Perla859b1e42009-08-10 03:43:51 +00001497static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1498{
1499 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1500
1501 if (!eqe->evt)
1502 return NULL;
1503
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001504 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001505 eqe->evt = le32_to_cpu(eqe->evt);
1506 queue_tail_inc(&eq_obj->q);
1507 return eqe;
1508}
1509
1510static int event_handle(struct be_adapter *adapter,
Sathya Perla3c8def92011-06-12 20:01:58 +00001511 struct be_eq_obj *eq_obj,
1512 bool rearm)
Sathya Perla859b1e42009-08-10 03:43:51 +00001513{
1514 struct be_eq_entry *eqe;
1515 u16 num = 0;
1516
1517 while ((eqe = event_get(eq_obj)) != NULL) {
1518 eqe->evt = 0;
1519 num++;
1520 }
1521
1522 /* Deal with any spurious interrupts that come
1523 * without events
1524 */
Sathya Perla3c8def92011-06-12 20:01:58 +00001525 if (!num)
1526 rearm = true;
1527
1528 be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
Sathya Perla859b1e42009-08-10 03:43:51 +00001529 if (num)
1530 napi_schedule(&eq_obj->napi);
1531
1532 return num;
1533}
1534
1535/* Just read and notify events without processing them.
1536 * Used at the time of destroying event queues */
1537static void be_eq_clean(struct be_adapter *adapter,
1538 struct be_eq_obj *eq_obj)
1539{
1540 struct be_eq_entry *eqe;
1541 u16 num = 0;
1542
1543 while ((eqe = event_get(eq_obj)) != NULL) {
1544 eqe->evt = 0;
1545 num++;
1546 }
1547
1548 if (num)
1549 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1550}
1551
Sathya Perla3abcded2010-10-03 22:12:27 -07001552static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001553{
1554 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001555 struct be_queue_info *rxq = &rxo->q;
1556 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001557 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001558 u16 tail;
1559
1560 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001561 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1562 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001563 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001564 }
1565
1566 /* Then free posted rx buffer that were not used */
1567 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001568 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001569 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001570 put_page(page_info->page);
1571 memset(page_info, 0, sizeof(*page_info));
1572 }
1573 BUG_ON(atomic_read(&rxq->used));
1574}
1575
Sathya Perla3c8def92011-06-12 20:01:58 +00001576static void be_tx_compl_clean(struct be_adapter *adapter,
1577 struct be_tx_obj *txo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001578{
Sathya Perla3c8def92011-06-12 20:01:58 +00001579 struct be_queue_info *tx_cq = &txo->cq;
1580 struct be_queue_info *txq = &txo->q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001581 struct be_eth_tx_compl *txcp;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001582 u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
Sathya Perla3c8def92011-06-12 20:01:58 +00001583 struct sk_buff **sent_skbs = txo->sent_skb_list;
Sathya Perlab03388d2010-02-18 00:37:17 +00001584 struct sk_buff *sent_skb;
1585 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586
Sathya Perlaa8e91792009-08-10 03:42:43 +00001587 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1588 do {
1589 while ((txcp = be_tx_compl_get(tx_cq))) {
1590 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1591 wrb_index, txcp);
Sathya Perla3c8def92011-06-12 20:01:58 +00001592 num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001593 cmpl++;
1594 }
1595 if (cmpl) {
1596 be_cq_notify(adapter, tx_cq->id, false, cmpl);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001597 atomic_sub(num_wrbs, &txq->used);
Sathya Perlaa8e91792009-08-10 03:42:43 +00001598 cmpl = 0;
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001599 num_wrbs = 0;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001600 }
1601
1602 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1603 break;
1604
1605 mdelay(1);
1606 } while (true);
1607
1608 if (atomic_read(&txq->used))
1609 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1610 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001611
1612 /* free posted tx for which compls will never arrive */
1613 while (atomic_read(&txq->used)) {
1614 sent_skb = sent_skbs[txq->tail];
1615 end_idx = txq->tail;
1616 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001617 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1618 txq->len);
Sathya Perla3c8def92011-06-12 20:01:58 +00001619 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
Padmanabh Ratnakar4d586b82011-05-10 05:13:57 +00001620 atomic_sub(num_wrbs, &txq->used);
Sathya Perlab03388d2010-02-18 00:37:17 +00001621 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001622}
1623
Sathya Perla5fb379e2009-06-18 00:02:59 +00001624static void be_mcc_queues_destroy(struct be_adapter *adapter)
1625{
1626 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001627
Sathya Perla8788fdc2009-07-27 22:52:03 +00001628 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001629 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001630 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001631 be_queue_free(adapter, q);
1632
Sathya Perla8788fdc2009-07-27 22:52:03 +00001633 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001634 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001635 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001636 be_queue_free(adapter, q);
1637}
1638
1639/* Must be called only after TX qs are created as MCC shares TX EQ */
1640static int be_mcc_queues_create(struct be_adapter *adapter)
1641{
1642 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001643
1644 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001645 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001646 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001647 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001648 goto err;
1649
1650 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001651 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001652 goto mcc_cq_free;
1653
1654 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001655 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001656 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1657 goto mcc_cq_destroy;
1658
1659 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001660 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001661 goto mcc_q_free;
1662
1663 return 0;
1664
1665mcc_q_free:
1666 be_queue_free(adapter, q);
1667mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001668 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001669mcc_cq_free:
1670 be_queue_free(adapter, cq);
1671err:
1672 return -1;
1673}
1674
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675static void be_tx_queues_destroy(struct be_adapter *adapter)
1676{
1677 struct be_queue_info *q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001678 struct be_tx_obj *txo;
1679 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680
Sathya Perla3c8def92011-06-12 20:01:58 +00001681 for_all_tx_queues(adapter, txo, i) {
1682 q = &txo->q;
1683 if (q->created)
1684 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1685 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001686
Sathya Perla3c8def92011-06-12 20:01:58 +00001687 q = &txo->cq;
1688 if (q->created)
1689 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1690 be_queue_free(adapter, q);
1691 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001692
Sathya Perla859b1e42009-08-10 03:43:51 +00001693 /* Clear any residual events */
1694 be_eq_clean(adapter, &adapter->tx_eq);
1695
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696 q = &adapter->tx_eq.q;
1697 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001698 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001699 be_queue_free(adapter, q);
1700}
1701
Sathya Perla3c8def92011-06-12 20:01:58 +00001702/* One TX event queue is shared by all TX compl qs */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001703static int be_tx_queues_create(struct be_adapter *adapter)
1704{
1705 struct be_queue_info *eq, *q, *cq;
Sathya Perla3c8def92011-06-12 20:01:58 +00001706 struct be_tx_obj *txo;
1707 u8 i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001708
1709 adapter->tx_eq.max_eqd = 0;
1710 adapter->tx_eq.min_eqd = 0;
1711 adapter->tx_eq.cur_eqd = 96;
1712 adapter->tx_eq.enable_aic = false;
Sathya Perla3c8def92011-06-12 20:01:58 +00001713
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001714 eq = &adapter->tx_eq.q;
Sathya Perla3c8def92011-06-12 20:01:58 +00001715 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1716 sizeof(struct be_eq_entry)))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717 return -1;
1718
Sathya Perla8788fdc2009-07-27 22:52:03 +00001719 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla3c8def92011-06-12 20:01:58 +00001720 goto err;
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001721 adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001722
Sathya Perla3c8def92011-06-12 20:01:58 +00001723 for_all_tx_queues(adapter, txo, i) {
1724 cq = &txo->cq;
1725 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001726 sizeof(struct be_eth_tx_compl)))
Sathya Perla3c8def92011-06-12 20:01:58 +00001727 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001728
Sathya Perla3c8def92011-06-12 20:01:58 +00001729 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1730 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001731
Sathya Perla3c8def92011-06-12 20:01:58 +00001732 q = &txo->q;
1733 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1734 sizeof(struct be_eth_wrb)))
1735 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001736
Sathya Perla3c8def92011-06-12 20:01:58 +00001737 if (be_cmd_txq_create(adapter, q, cq))
1738 goto err;
1739 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001740 return 0;
1741
Sathya Perla3c8def92011-06-12 20:01:58 +00001742err:
1743 be_tx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001744 return -1;
1745}
1746
1747static void be_rx_queues_destroy(struct be_adapter *adapter)
1748{
1749 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001750 struct be_rx_obj *rxo;
1751 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001752
Sathya Perla3abcded2010-10-03 22:12:27 -07001753 for_all_rx_queues(adapter, rxo, i) {
1754 q = &rxo->q;
1755 if (q->created) {
1756 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1757 /* After the rxq is invalidated, wait for a grace time
1758 * of 1ms for all dma to end and the flush compl to
1759 * arrive
1760 */
1761 mdelay(1);
1762 be_rx_q_clean(adapter, rxo);
1763 }
1764 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001765
Sathya Perla3abcded2010-10-03 22:12:27 -07001766 q = &rxo->cq;
1767 if (q->created)
1768 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1769 be_queue_free(adapter, q);
1770
1771 /* Clear any residual events */
1772 q = &rxo->rx_eq.q;
1773 if (q->created) {
1774 be_eq_clean(adapter, &rxo->rx_eq);
1775 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1776 }
1777 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001778 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001779}
1780
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001781static u32 be_num_rxqs_want(struct be_adapter *adapter)
1782{
1783 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1784 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1785 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1786 } else {
1787 dev_warn(&adapter->pdev->dev,
1788 "No support for multiple RX queues\n");
1789 return 1;
1790 }
1791}
1792
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001793static int be_rx_queues_create(struct be_adapter *adapter)
1794{
1795 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001796 struct be_rx_obj *rxo;
1797 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001798
Sathya Perlaac6a0c42011-03-21 20:49:25 +00001799 adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1800 msix_enabled(adapter) ?
1801 adapter->num_msix_vec - 1 : 1);
1802 if (adapter->num_rx_qs != MAX_RX_QS)
1803 dev_warn(&adapter->pdev->dev,
1804 "Can create only %d RX queues", adapter->num_rx_qs);
1805
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001806 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001807 for_all_rx_queues(adapter, rxo, i) {
1808 rxo->adapter = adapter;
1809 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1810 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001811
Sathya Perla3abcded2010-10-03 22:12:27 -07001812 /* EQ */
1813 eq = &rxo->rx_eq.q;
1814 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1815 sizeof(struct be_eq_entry));
1816 if (rc)
1817 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001818
Sathya Perla3abcded2010-10-03 22:12:27 -07001819 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1820 if (rc)
1821 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001822
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001823 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001824
Sathya Perla3abcded2010-10-03 22:12:27 -07001825 /* CQ */
1826 cq = &rxo->cq;
1827 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1828 sizeof(struct be_eth_rx_compl));
1829 if (rc)
1830 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001831
Sathya Perla3abcded2010-10-03 22:12:27 -07001832 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1833 if (rc)
1834 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001835 /* Rx Q */
1836 q = &rxo->q;
1837 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1838 sizeof(struct be_eth_rx_d));
1839 if (rc)
1840 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001841
Sathya Perla3abcded2010-10-03 22:12:27 -07001842 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1843 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1844 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1845 if (rc)
1846 goto err;
1847 }
1848
1849 if (be_multi_rxq(adapter)) {
1850 u8 rsstable[MAX_RSS_QS];
1851
1852 for_all_rss_queues(adapter, rxo, i)
1853 rsstable[i] = rxo->rss_id;
1854
1855 rc = be_cmd_rss_config(adapter, rsstable,
1856 adapter->num_rx_qs - 1);
1857 if (rc)
1858 goto err;
1859 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001860
1861 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001862err:
1863 be_rx_queues_destroy(adapter);
1864 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001865}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001866
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001867static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001868{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001869 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1870 if (!eqe->evt)
1871 return false;
1872 else
1873 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001874}
1875
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876static irqreturn_t be_intx(int irq, void *dev)
1877{
1878 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001879 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001880 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001881
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001882 if (lancer_chip(adapter)) {
1883 if (event_peek(&adapter->tx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001884 tx = event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001885 for_all_rx_queues(adapter, rxo, i) {
1886 if (event_peek(&rxo->rx_eq))
Sathya Perla3c8def92011-06-12 20:01:58 +00001887 rx |= event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001888 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001889
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001890 if (!(tx || rx))
1891 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001892
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001893 } else {
1894 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1895 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1896 if (!isr)
1897 return IRQ_NONE;
1898
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001899 if ((1 << adapter->tx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001900 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001901
1902 for_all_rx_queues(adapter, rxo, i) {
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00001903 if ((1 << rxo->rx_eq.eq_idx & isr))
Sathya Perla3c8def92011-06-12 20:01:58 +00001904 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001905 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001906 }
Sathya Perlac001c212009-07-01 01:06:07 +00001907
Sathya Perla8788fdc2009-07-27 22:52:03 +00001908 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001909}
1910
1911static irqreturn_t be_msix_rx(int irq, void *dev)
1912{
Sathya Perla3abcded2010-10-03 22:12:27 -07001913 struct be_rx_obj *rxo = dev;
1914 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001915
Sathya Perla3c8def92011-06-12 20:01:58 +00001916 event_handle(adapter, &rxo->rx_eq, true);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001917
1918 return IRQ_HANDLED;
1919}
1920
Sathya Perla5fb379e2009-06-18 00:02:59 +00001921static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001922{
1923 struct be_adapter *adapter = dev;
1924
Sathya Perla3c8def92011-06-12 20:01:58 +00001925 event_handle(adapter, &adapter->tx_eq, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001926
1927 return IRQ_HANDLED;
1928}
1929
Sathya Perla2e588f82011-03-11 02:49:26 +00001930static inline bool do_gro(struct be_rx_compl_info *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001931{
Sathya Perla2e588f82011-03-11 02:49:26 +00001932 return (rxcp->tcpf && !rxcp->err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001933}
1934
stephen hemminger49b05222010-10-21 07:50:48 +00001935static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936{
1937 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001938 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1939 struct be_adapter *adapter = rxo->adapter;
1940 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla2e588f82011-03-11 02:49:26 +00001941 struct be_rx_compl_info *rxcp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001942 u32 work_done;
1943
Sathya Perla3abcded2010-10-03 22:12:27 -07001944 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001945 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001946 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001947 if (!rxcp)
1948 break;
1949
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001950 /* Ignore flush completions */
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001951 if (rxcp->num_rcvd && rxcp->pkt_size) {
Sathya Perla2e588f82011-03-11 02:49:26 +00001952 if (do_gro(rxcp))
Sathya Perla64642812010-12-01 01:04:17 +00001953 be_rx_compl_process_gro(adapter, rxo, rxcp);
1954 else
1955 be_rx_compl_process(adapter, rxo, rxcp);
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001956 } else if (rxcp->pkt_size == 0) {
1957 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001958 }
Padmanabh Ratnakar009dd872011-05-10 05:12:17 +00001959
Sathya Perla2e588f82011-03-11 02:49:26 +00001960 be_rx_stats_update(rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001961 }
1962
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001963 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001964 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001965 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001966
1967 /* All consumed */
1968 if (work_done < budget) {
1969 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001970 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001971 } else {
1972 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001973 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001974 }
1975 return work_done;
1976}
1977
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001978/* As TX and MCC share the same EQ check for both TX and MCC completions.
1979 * For TX/MCC we don't honour budget; consume everything
1980 */
1981static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001982{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001983 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1984 struct be_adapter *adapter =
1985 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla3c8def92011-06-12 20:01:58 +00001986 struct be_tx_obj *txo;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001987 struct be_eth_tx_compl *txcp;
Sathya Perla3c8def92011-06-12 20:01:58 +00001988 int tx_compl, mcc_compl, status = 0;
1989 u8 i;
1990 u16 num_wrbs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001991
Sathya Perla3c8def92011-06-12 20:01:58 +00001992 for_all_tx_queues(adapter, txo, i) {
1993 tx_compl = 0;
1994 num_wrbs = 0;
1995 while ((txcp = be_tx_compl_get(&txo->cq))) {
1996 num_wrbs += be_tx_compl_process(adapter, txo,
1997 AMAP_GET_BITS(struct amap_eth_tx_compl,
1998 wrb_index, txcp));
1999 tx_compl++;
2000 }
2001 if (tx_compl) {
2002 be_cq_notify(adapter, txo->cq.id, true, tx_compl);
2003
2004 atomic_sub(num_wrbs, &txo->q.used);
2005
2006 /* As Tx wrbs have been freed up, wake up netdev queue
2007 * if it was stopped due to lack of tx wrbs. */
2008 if (__netif_subqueue_stopped(adapter->netdev, i) &&
2009 atomic_read(&txo->q.used) < txo->q.len / 2) {
2010 netif_wake_subqueue(adapter->netdev, i);
2011 }
2012
2013 adapter->drv_stats.be_tx_events++;
2014 txo->stats.be_tx_compl += tx_compl;
2015 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002016 }
2017
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002018 mcc_compl = be_process_mcc(adapter, &status);
2019
Sathya Perlaf31e50a2010-03-02 03:56:39 -08002020 if (mcc_compl) {
2021 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2022 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2023 }
2024
Sathya Perla3c8def92011-06-12 20:01:58 +00002025 napi_complete(napi);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002026
Sathya Perla3c8def92011-06-12 20:01:58 +00002027 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002028 return 1;
2029}
2030
Ajit Khaparded053de92010-09-03 06:23:30 +00002031void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00002032{
2033 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
2034 u32 i;
2035
2036 pci_read_config_dword(adapter->pdev,
2037 PCICFG_UE_STATUS_LOW, &ue_status_lo);
2038 pci_read_config_dword(adapter->pdev,
2039 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
2040 pci_read_config_dword(adapter->pdev,
2041 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
2042 pci_read_config_dword(adapter->pdev,
2043 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
2044
2045 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
2046 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
2047
Ajit Khaparded053de92010-09-03 06:23:30 +00002048 if (ue_status_lo || ue_status_hi) {
2049 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00002050 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00002051 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
2052 }
2053
Ajit Khaparde7c185272010-07-29 06:16:33 +00002054 if (ue_status_lo) {
2055 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
2056 if (ue_status_lo & 1)
2057 dev_err(&adapter->pdev->dev,
2058 "UE: %s bit set\n", ue_status_low_desc[i]);
2059 }
2060 }
2061 if (ue_status_hi) {
2062 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
2063 if (ue_status_hi & 1)
2064 dev_err(&adapter->pdev->dev,
2065 "UE: %s bit set\n", ue_status_hi_desc[i]);
2066 }
2067 }
2068
2069}
2070
Sathya Perlaea1dae12009-03-19 23:56:20 -07002071static void be_worker(struct work_struct *work)
2072{
2073 struct be_adapter *adapter =
2074 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07002075 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002076 struct be_tx_obj *txo;
Sathya Perla3abcded2010-10-03 22:12:27 -07002077 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002078
Sathya Perla16da8252011-03-21 20:49:27 +00002079 if (!adapter->ue_detected && !lancer_chip(adapter))
2080 be_detect_dump_ue(adapter);
2081
Somnath Koturf203af72010-10-25 23:01:03 +00002082 /* when interrupts are not yet enabled, just reap any pending
2083 * mcc completions */
2084 if (!netif_running(adapter->netdev)) {
2085 int mcc_compl, status = 0;
2086
2087 mcc_compl = be_process_mcc(adapter, &status);
2088
2089 if (mcc_compl) {
2090 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2091 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
2092 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00002093
Somnath Koturf203af72010-10-25 23:01:03 +00002094 goto reschedule;
2095 }
2096
Selvin Xavier005d5692011-05-16 07:36:35 +00002097 if (!adapter->stats_cmd_sent) {
2098 if (lancer_chip(adapter))
2099 lancer_cmd_get_pport_stats(adapter,
2100 &adapter->stats_cmd);
2101 else
2102 be_cmd_get_stats(adapter, &adapter->stats_cmd);
2103 }
Sathya Perla3c8def92011-06-12 20:01:58 +00002104
2105 for_all_tx_queues(adapter, txo, i)
2106 be_tx_rate_update(txo);
Sathya Perla4097f662009-03-24 16:40:13 -07002107
Sathya Perla3abcded2010-10-03 22:12:27 -07002108 for_all_rx_queues(adapter, rxo, i) {
2109 be_rx_rate_update(rxo);
2110 be_rx_eqd_update(adapter, rxo);
2111
2112 if (rxo->rx_post_starved) {
2113 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00002114 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002115 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07002116 }
2117
Somnath Koturf203af72010-10-25 23:01:03 +00002118reschedule:
Ivan Vecerae74fbd032011-04-21 00:20:04 +00002119 adapter->work_counter++;
Sathya Perlaea1dae12009-03-19 23:56:20 -07002120 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2121}
2122
Sathya Perla8d56ff12009-11-22 22:02:26 +00002123static void be_msix_disable(struct be_adapter *adapter)
2124{
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002125 if (msix_enabled(adapter)) {
Sathya Perla8d56ff12009-11-22 22:02:26 +00002126 pci_disable_msix(adapter->pdev);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002127 adapter->num_msix_vec = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002128 }
2129}
2130
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002131static void be_msix_enable(struct be_adapter *adapter)
2132{
Sathya Perla3abcded2010-10-03 22:12:27 -07002133#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002134 int i, status, num_vec;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002136 num_vec = be_num_rxqs_want(adapter) + 1;
Sathya Perla3abcded2010-10-03 22:12:27 -07002137
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002138 for (i = 0; i < num_vec; i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002139 adapter->msix_entries[i].entry = i;
2140
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002141 status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002142 if (status == 0) {
2143 goto done;
2144 } else if (status >= BE_MIN_MSIX_VECTORS) {
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002145 num_vec = status;
Sathya Perla3abcded2010-10-03 22:12:27 -07002146 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002147 num_vec) == 0)
Sathya Perla3abcded2010-10-03 22:12:27 -07002148 goto done;
Sathya Perla3abcded2010-10-03 22:12:27 -07002149 }
2150 return;
2151done:
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002152 adapter->num_msix_vec = num_vec;
2153 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002154}
2155
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002156static void be_sriov_enable(struct be_adapter *adapter)
2157{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00002158 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002159#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002160 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde81be8f02011-04-06 18:08:17 +00002161 int status, pos;
2162 u16 nvfs;
2163
2164 pos = pci_find_ext_capability(adapter->pdev,
2165 PCI_EXT_CAP_ID_SRIOV);
2166 pci_read_config_word(adapter->pdev,
2167 pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2168
2169 if (num_vfs > nvfs) {
2170 dev_info(&adapter->pdev->dev,
2171 "Device supports %d VFs and not %d\n",
2172 nvfs, num_vfs);
2173 num_vfs = nvfs;
2174 }
Ajit Khaparde6dedec82010-07-29 06:15:32 +00002175
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002176 status = pci_enable_sriov(adapter->pdev, num_vfs);
2177 adapter->sriov_enabled = status ? false : true;
2178 }
2179#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002180}
2181
2182static void be_sriov_disable(struct be_adapter *adapter)
2183{
2184#ifdef CONFIG_PCI_IOV
2185 if (adapter->sriov_enabled) {
2186 pci_disable_sriov(adapter->pdev);
2187 adapter->sriov_enabled = false;
2188 }
2189#endif
2190}
2191
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002192static inline int be_msix_vec_get(struct be_adapter *adapter,
2193 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002194{
Padmanabh Ratnakarecd62102011-04-03 01:54:11 +00002195 return adapter->msix_entries[eq_obj->eq_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00002196}
2197
2198static int be_request_irq(struct be_adapter *adapter,
2199 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07002200 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002201{
2202 struct net_device *netdev = adapter->netdev;
2203 int vec;
2204
2205 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002206 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002207 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00002208}
2209
Sathya Perla3abcded2010-10-03 22:12:27 -07002210static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2211 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00002212{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002213 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07002214 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002215}
2216
2217static int be_msix_register(struct be_adapter *adapter)
2218{
Sathya Perla3abcded2010-10-03 22:12:27 -07002219 struct be_rx_obj *rxo;
2220 int status, i;
2221 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002222
Sathya Perla3abcded2010-10-03 22:12:27 -07002223 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2224 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002225 if (status)
2226 goto err;
2227
Sathya Perla3abcded2010-10-03 22:12:27 -07002228 for_all_rx_queues(adapter, rxo, i) {
2229 sprintf(qname, "rxq%d", i);
2230 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2231 qname, rxo);
2232 if (status)
2233 goto err_msix;
2234 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002235
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002236 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002237
Sathya Perla3abcded2010-10-03 22:12:27 -07002238err_msix:
2239 be_free_irq(adapter, &adapter->tx_eq, adapter);
2240
2241 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2242 be_free_irq(adapter, &rxo->rx_eq, rxo);
2243
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244err:
2245 dev_warn(&adapter->pdev->dev,
2246 "MSIX Request IRQ failed - err %d\n", status);
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002247 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002248 return status;
2249}
2250
2251static int be_irq_register(struct be_adapter *adapter)
2252{
2253 struct net_device *netdev = adapter->netdev;
2254 int status;
2255
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002256 if (msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257 status = be_msix_register(adapter);
2258 if (status == 0)
2259 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002260 /* INTx is not supported for VF */
2261 if (!be_physfn(adapter))
2262 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002263 }
2264
2265 /* INTx */
2266 netdev->irq = adapter->pdev->irq;
2267 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2268 adapter);
2269 if (status) {
2270 dev_err(&adapter->pdev->dev,
2271 "INTx request IRQ failed - err %d\n", status);
2272 return status;
2273 }
2274done:
2275 adapter->isr_registered = true;
2276 return 0;
2277}
2278
2279static void be_irq_unregister(struct be_adapter *adapter)
2280{
2281 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002282 struct be_rx_obj *rxo;
2283 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002284
2285 if (!adapter->isr_registered)
2286 return;
2287
2288 /* INTx */
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002289 if (!msix_enabled(adapter)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002290 free_irq(netdev->irq, adapter);
2291 goto done;
2292 }
2293
2294 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002295 be_free_irq(adapter, &adapter->tx_eq, adapter);
2296
2297 for_all_rx_queues(adapter, rxo, i)
2298 be_free_irq(adapter, &rxo->rx_eq, rxo);
2299
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002300done:
2301 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302}
2303
Sathya Perla889cd4b2010-05-30 23:33:45 +00002304static int be_close(struct net_device *netdev)
2305{
2306 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002307 struct be_rx_obj *rxo;
Sathya Perla3c8def92011-06-12 20:01:58 +00002308 struct be_tx_obj *txo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002309 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002310 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002311
Sathya Perla889cd4b2010-05-30 23:33:45 +00002312 be_async_mcc_disable(adapter);
2313
Sathya Perla889cd4b2010-05-30 23:33:45 +00002314 netif_carrier_off(netdev);
2315 adapter->link_up = false;
2316
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002317 if (!lancer_chip(adapter))
2318 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002319
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002320 for_all_rx_queues(adapter, rxo, i)
2321 napi_disable(&rxo->rx_eq.napi);
2322
2323 napi_disable(&tx_eq->napi);
2324
2325 if (lancer_chip(adapter)) {
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002326 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2327 for_all_rx_queues(adapter, rxo, i)
2328 be_cq_notify(adapter, rxo->cq.id, false, 0);
Sathya Perla3c8def92011-06-12 20:01:58 +00002329 for_all_tx_queues(adapter, txo, i)
2330 be_cq_notify(adapter, txo->cq.id, false, 0);
Padmanabh Ratnakar63fcb272011-03-07 03:09:17 +00002331 }
2332
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002333 if (msix_enabled(adapter)) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002334 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002335 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002336
2337 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002338 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002339 synchronize_irq(vec);
2340 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002341 } else {
2342 synchronize_irq(netdev->irq);
2343 }
2344 be_irq_unregister(adapter);
2345
Sathya Perla889cd4b2010-05-30 23:33:45 +00002346 /* Wait for all pending tx completions to arrive so that
2347 * all tx skbs are freed.
2348 */
Sathya Perla3c8def92011-06-12 20:01:58 +00002349 for_all_tx_queues(adapter, txo, i)
2350 be_tx_compl_clean(adapter, txo);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002351
2352 return 0;
2353}
2354
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002355static int be_open(struct net_device *netdev)
2356{
2357 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002358 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002359 struct be_rx_obj *rxo;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002360 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002361 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002362 u8 mac_speed;
2363 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002364
Sathya Perla3abcded2010-10-03 22:12:27 -07002365 for_all_rx_queues(adapter, rxo, i) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002366 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002367 napi_enable(&rxo->rx_eq.napi);
2368 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002369 napi_enable(&tx_eq->napi);
2370
2371 be_irq_register(adapter);
2372
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002373 if (!lancer_chip(adapter))
2374 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002375
2376 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002377 for_all_rx_queues(adapter, rxo, i) {
2378 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2379 be_cq_notify(adapter, rxo->cq.id, true, 0);
2380 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002381 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002382
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002383 /* Now that interrupts are on we can process async mcc */
2384 be_async_mcc_enable(adapter);
2385
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002386 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
Ajit Khaparde187e8752011-04-19 12:11:46 +00002387 &link_speed, 0);
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002388 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002389 goto err;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002390 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002391
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002392 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002393 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002394 if (status)
2395 goto err;
2396
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002397 status = be_cmd_set_flow_control(adapter,
2398 adapter->tx_fc, adapter->rx_fc);
2399 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002400 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002401 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002402
Sathya Perla889cd4b2010-05-30 23:33:45 +00002403 return 0;
2404err:
2405 be_close(adapter->netdev);
2406 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002407}
2408
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002409static int be_setup_wol(struct be_adapter *adapter, bool enable)
2410{
2411 struct be_dma_mem cmd;
2412 int status = 0;
2413 u8 mac[ETH_ALEN];
2414
2415 memset(mac, 0, ETH_ALEN);
2416
2417 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002418 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2419 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002420 if (cmd.va == NULL)
2421 return -1;
2422 memset(cmd.va, 0, cmd.size);
2423
2424 if (enable) {
2425 status = pci_write_config_dword(adapter->pdev,
2426 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2427 if (status) {
2428 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002429 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002430 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2431 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002432 return status;
2433 }
2434 status = be_cmd_enable_magic_wol(adapter,
2435 adapter->netdev->dev_addr, &cmd);
2436 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2437 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2438 } else {
2439 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2440 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2441 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2442 }
2443
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002444 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002445 return status;
2446}
2447
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002448/*
2449 * Generate a seed MAC address from the PF MAC Address using jhash.
2450 * MAC Address for VFs are assigned incrementally starting from the seed.
2451 * These addresses are programmed in the ASIC by the PF and the VF driver
2452 * queries for the MAC address during its probe.
2453 */
2454static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2455{
2456 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002457 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002458 u8 mac[ETH_ALEN];
2459
2460 be_vf_eth_addr_generate(adapter, mac);
2461
2462 for (vf = 0; vf < num_vfs; vf++) {
2463 status = be_cmd_pmac_add(adapter, mac,
2464 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002465 &adapter->vf_cfg[vf].vf_pmac_id,
2466 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002467 if (status)
2468 dev_err(&adapter->pdev->dev,
2469 "Mac address add failed for VF %d\n", vf);
2470 else
2471 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2472
2473 mac[5] += 1;
2474 }
2475 return status;
2476}
2477
2478static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2479{
2480 u32 vf;
2481
2482 for (vf = 0; vf < num_vfs; vf++) {
2483 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2484 be_cmd_pmac_del(adapter,
2485 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002486 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002487 }
2488}
2489
Sathya Perla5fb379e2009-06-18 00:02:59 +00002490static int be_setup(struct be_adapter *adapter)
2491{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002492 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002493 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002494 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002495 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002496
Padmanabh Ratnakarf21b5382011-03-07 03:09:36 +00002497 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2498 BE_IF_FLAGS_BROADCAST |
2499 BE_IF_FLAGS_MULTICAST;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002500
2501 if (be_physfn(adapter)) {
2502 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2503 BE_IF_FLAGS_PROMISCUOUS |
2504 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2505 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002506
Sathya Perlaac6a0c42011-03-21 20:49:25 +00002507 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
Sathya Perla3abcded2010-10-03 22:12:27 -07002508 cap_flags |= BE_IF_FLAGS_RSS;
2509 en_flags |= BE_IF_FLAGS_RSS;
2510 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002511 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002512
2513 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2514 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002515 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002516 if (status != 0)
2517 goto do_none;
2518
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002519 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002520 if (adapter->sriov_enabled) {
2521 while (vf < num_vfs) {
2522 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2523 BE_IF_FLAGS_BROADCAST;
2524 status = be_cmd_if_create(adapter, cap_flags,
2525 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002526 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002527 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002528 if (status) {
2529 dev_err(&adapter->pdev->dev,
2530 "Interface Create failed for VF %d\n",
2531 vf);
2532 goto if_destroy;
2533 }
2534 adapter->vf_cfg[vf].vf_pmac_id =
2535 BE_INVALID_PMAC_ID;
2536 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002537 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002538 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002539 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002540 status = be_cmd_mac_addr_query(adapter, mac,
2541 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2542 if (!status) {
2543 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2544 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2545 }
2546 }
2547
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002548 status = be_tx_queues_create(adapter);
2549 if (status != 0)
2550 goto if_destroy;
2551
2552 status = be_rx_queues_create(adapter);
2553 if (status != 0)
2554 goto tx_qs_destroy;
2555
Sathya Perla5fb379e2009-06-18 00:02:59 +00002556 status = be_mcc_queues_create(adapter);
2557 if (status != 0)
2558 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002559
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002560 adapter->link_speed = -1;
2561
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002562 return 0;
2563
Sathya Perla5fb379e2009-06-18 00:02:59 +00002564rx_qs_destroy:
2565 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002566tx_qs_destroy:
2567 be_tx_queues_destroy(adapter);
2568if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002569 if (be_physfn(adapter) && adapter->sriov_enabled)
2570 for (vf = 0; vf < num_vfs; vf++)
2571 if (adapter->vf_cfg[vf].vf_if_handle)
2572 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002573 adapter->vf_cfg[vf].vf_if_handle,
2574 vf + 1);
2575 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002576do_none:
2577 return status;
2578}
2579
Sathya Perla5fb379e2009-06-18 00:02:59 +00002580static int be_clear(struct be_adapter *adapter)
2581{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002582 int vf;
2583
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002584 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002585 be_vf_eth_addr_rem(adapter);
2586
Sathya Perla1a8887d2009-08-17 00:58:41 +00002587 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002588 be_rx_queues_destroy(adapter);
2589 be_tx_queues_destroy(adapter);
Padmanabh Ratnakar1f5db832011-04-03 01:54:39 +00002590 adapter->eq_next_idx = 0;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002591
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002592 if (be_physfn(adapter) && adapter->sriov_enabled)
2593 for (vf = 0; vf < num_vfs; vf++)
2594 if (adapter->vf_cfg[vf].vf_if_handle)
2595 be_cmd_if_destroy(adapter,
2596 adapter->vf_cfg[vf].vf_if_handle,
2597 vf + 1);
2598
Ajit Khaparde658681f2011-02-11 13:34:46 +00002599 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002600
Sathya Perla2243e2e2009-11-22 22:02:03 +00002601 /* tell fw we're done with firing cmds */
2602 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002603 return 0;
2604}
2605
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002606
Ajit Khaparde84517482009-09-04 03:12:16 +00002607#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002608static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002609 const u8 *p, u32 img_start, int image_size,
2610 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002611{
2612 u32 crc_offset;
2613 u8 flashed_crc[4];
2614 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002615
2616 crc_offset = hdr_size + img_start + image_size - 4;
2617
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002618 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002619
2620 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002621 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002622 if (status) {
2623 dev_err(&adapter->pdev->dev,
2624 "could not get crc from flash, not flashing redboot\n");
2625 return false;
2626 }
2627
2628 /*update redboot only if crc does not match*/
2629 if (!memcmp(flashed_crc, p, 4))
2630 return false;
2631 else
2632 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002633}
2634
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002635static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002636 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002637 struct be_dma_mem *flash_cmd, int num_of_images)
2638
Ajit Khaparde84517482009-09-04 03:12:16 +00002639{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002640 int status = 0, i, filehdr_size = 0;
2641 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002642 int num_bytes;
2643 const u8 *p = fw->data;
2644 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002645 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002646 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002647
Joe Perches215faf92010-12-21 02:16:10 -08002648 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002649 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2650 FLASH_IMAGE_MAX_SIZE_g3},
2651 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2652 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2653 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2654 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2655 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2656 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2657 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2658 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2659 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2660 FLASH_IMAGE_MAX_SIZE_g3},
2661 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2662 FLASH_IMAGE_MAX_SIZE_g3},
2663 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002664 FLASH_IMAGE_MAX_SIZE_g3},
2665 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2666 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002667 };
Joe Perches215faf92010-12-21 02:16:10 -08002668 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002669 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2670 FLASH_IMAGE_MAX_SIZE_g2},
2671 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2672 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2673 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2674 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2675 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2676 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2677 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2678 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2679 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2680 FLASH_IMAGE_MAX_SIZE_g2},
2681 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2682 FLASH_IMAGE_MAX_SIZE_g2},
2683 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2684 FLASH_IMAGE_MAX_SIZE_g2}
2685 };
2686
2687 if (adapter->generation == BE_GEN3) {
2688 pflashcomp = gen3_flash_types;
2689 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002690 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002691 } else {
2692 pflashcomp = gen2_flash_types;
2693 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002694 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002695 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002696 for (i = 0; i < num_comp; i++) {
2697 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2698 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2699 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002700 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2701 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002702 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2703 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002704 continue;
2705 p = fw->data;
2706 p += filehdr_size + pflashcomp[i].offset
2707 + (num_of_images * sizeof(struct image_hdr));
2708 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002709 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002710 total_bytes = pflashcomp[i].size;
2711 while (total_bytes) {
2712 if (total_bytes > 32*1024)
2713 num_bytes = 32*1024;
2714 else
2715 num_bytes = total_bytes;
2716 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002717
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002718 if (!total_bytes)
2719 flash_op = FLASHROM_OPER_FLASH;
2720 else
2721 flash_op = FLASHROM_OPER_SAVE;
2722 memcpy(req->params.data_buf, p, num_bytes);
2723 p += num_bytes;
2724 status = be_cmd_write_flashrom(adapter, flash_cmd,
2725 pflashcomp[i].optype, flash_op, num_bytes);
2726 if (status) {
2727 dev_err(&adapter->pdev->dev,
2728 "cmd to write to flash rom failed.\n");
2729 return -1;
2730 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002731 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002732 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002733 return 0;
2734}
2735
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002736static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2737{
2738 if (fhdr == NULL)
2739 return 0;
2740 if (fhdr->build[0] == '3')
2741 return BE_GEN3;
2742 else if (fhdr->build[0] == '2')
2743 return BE_GEN2;
2744 else
2745 return 0;
2746}
2747
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002748static int lancer_fw_download(struct be_adapter *adapter,
2749 const struct firmware *fw)
Ajit Khaparde84517482009-09-04 03:12:16 +00002750{
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002751#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
2752#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
2753 struct be_dma_mem flash_cmd;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002754 const u8 *data_ptr = NULL;
2755 u8 *dest_image_ptr = NULL;
2756 size_t image_size = 0;
2757 u32 chunk_size = 0;
2758 u32 data_written = 0;
2759 u32 offset = 0;
2760 int status = 0;
2761 u8 add_status = 0;
2762
2763 if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2764 dev_err(&adapter->pdev->dev,
2765 "FW Image not properly aligned. "
2766 "Length must be 4 byte aligned.\n");
2767 status = -EINVAL;
2768 goto lancer_fw_exit;
2769 }
2770
2771 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2772 + LANCER_FW_DOWNLOAD_CHUNK;
2773 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2774 &flash_cmd.dma, GFP_KERNEL);
2775 if (!flash_cmd.va) {
2776 status = -ENOMEM;
2777 dev_err(&adapter->pdev->dev,
2778 "Memory allocation failure while flashing\n");
2779 goto lancer_fw_exit;
2780 }
2781
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002782 dest_image_ptr = flash_cmd.va +
2783 sizeof(struct lancer_cmd_req_write_object);
2784 image_size = fw->size;
2785 data_ptr = fw->data;
2786
2787 while (image_size) {
2788 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2789
2790 /* Copy the image chunk content. */
2791 memcpy(dest_image_ptr, data_ptr, chunk_size);
2792
2793 status = lancer_cmd_write_object(adapter, &flash_cmd,
2794 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2795 &data_written, &add_status);
2796
2797 if (status)
2798 break;
2799
2800 offset += data_written;
2801 data_ptr += data_written;
2802 image_size -= data_written;
2803 }
2804
2805 if (!status) {
2806 /* Commit the FW written */
2807 status = lancer_cmd_write_object(adapter, &flash_cmd,
2808 0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2809 &data_written, &add_status);
2810 }
2811
2812 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2813 flash_cmd.dma);
2814 if (status) {
2815 dev_err(&adapter->pdev->dev,
2816 "Firmware load error. "
2817 "Status code: 0x%x Additional Status: 0x%x\n",
2818 status, add_status);
2819 goto lancer_fw_exit;
2820 }
2821
2822 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2823lancer_fw_exit:
2824 return status;
2825}
2826
2827static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2828{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002829 struct flash_file_hdr_g2 *fhdr;
2830 struct flash_file_hdr_g3 *fhdr3;
2831 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002832 struct be_dma_mem flash_cmd;
Ajit Khaparde84517482009-09-04 03:12:16 +00002833 const u8 *p;
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002834 int status = 0, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002835
2836 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002837 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002838
Ajit Khaparde84517482009-09-04 03:12:16 +00002839 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002840 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2841 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002842 if (!flash_cmd.va) {
2843 status = -ENOMEM;
2844 dev_err(&adapter->pdev->dev,
2845 "Memory allocation failure while flashing\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002846 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002847 }
2848
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002849 if ((adapter->generation == BE_GEN3) &&
2850 (get_ufigen_type(fhdr) == BE_GEN3)) {
2851 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002852 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2853 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002854 img_hdr_ptr = (struct image_hdr *) (fw->data +
2855 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002856 i * sizeof(struct image_hdr)));
2857 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2858 status = be_flash_data(adapter, fw, &flash_cmd,
2859 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002860 }
2861 } else if ((adapter->generation == BE_GEN2) &&
2862 (get_ufigen_type(fhdr) == BE_GEN2)) {
2863 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2864 } else {
2865 dev_err(&adapter->pdev->dev,
2866 "UFI and Interface are not compatible for flashing\n");
2867 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002868 }
2869
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002870 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2871 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002872 if (status) {
2873 dev_err(&adapter->pdev->dev, "Firmware load error\n");
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002874 goto be_fw_exit;
Ajit Khaparde84517482009-09-04 03:12:16 +00002875 }
2876
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002877 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002878
Shripad Nunjundarao485bf562011-05-16 07:36:59 +00002879be_fw_exit:
2880 return status;
2881}
2882
2883int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
2884{
2885 const struct firmware *fw;
2886 int status;
2887
2888 if (!netif_running(adapter->netdev)) {
2889 dev_err(&adapter->pdev->dev,
2890 "Firmware load not allowed (interface is down)\n");
2891 return -1;
2892 }
2893
2894 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2895 if (status)
2896 goto fw_exit;
2897
2898 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2899
2900 if (lancer_chip(adapter))
2901 status = lancer_fw_download(adapter, fw);
2902 else
2903 status = be_fw_download(adapter, fw);
2904
Ajit Khaparde84517482009-09-04 03:12:16 +00002905fw_exit:
2906 release_firmware(fw);
2907 return status;
2908}
2909
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002910static struct net_device_ops be_netdev_ops = {
2911 .ndo_open = be_open,
2912 .ndo_stop = be_close,
2913 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002914 .ndo_set_rx_mode = be_set_multicast_list,
2915 .ndo_set_mac_address = be_mac_addr_set,
2916 .ndo_change_mtu = be_change_mtu,
2917 .ndo_validate_addr = eth_validate_addr,
2918 .ndo_vlan_rx_register = be_vlan_register,
2919 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2920 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002921 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002922 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002923 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002924 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002925};
2926
2927static void be_netdev_init(struct net_device *netdev)
2928{
2929 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002930 struct be_rx_obj *rxo;
2931 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002932
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002933 netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002934 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
2935 NETIF_F_HW_VLAN_TX;
2936 if (be_multi_rxq(adapter))
2937 netdev->hw_features |= NETIF_F_RXHASH;
Michał Mirosław6332c8d2011-04-07 02:43:48 +00002938
2939 netdev->features |= netdev->hw_features |
Michał Mirosław8b8ddc62011-04-08 02:38:47 +00002940 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
Ajit Khaparde4b972912011-04-06 18:07:43 +00002941
Padmanabh Ratnakareb8a50d2011-06-11 15:58:46 -07002942 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
Michał Mirosław79032642010-11-30 06:38:00 +00002943 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002944
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002945 netdev->flags |= IFF_MULTICAST;
2946
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002947 /* Default settings for Rx and Tx flow control */
2948 adapter->rx_fc = true;
2949 adapter->tx_fc = true;
2950
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002951 netif_set_gso_max_size(netdev, 65535);
2952
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002953 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2954
2955 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2956
Sathya Perla3abcded2010-10-03 22:12:27 -07002957 for_all_rx_queues(adapter, rxo, i)
2958 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2959 BE_NAPI_WEIGHT);
2960
Sathya Perla5fb379e2009-06-18 00:02:59 +00002961 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002962 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002963}
2964
2965static void be_unmap_pci_bars(struct be_adapter *adapter)
2966{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002967 if (adapter->csr)
2968 iounmap(adapter->csr);
2969 if (adapter->db)
2970 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002971 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002972 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002973}
2974
2975static int be_map_pci_bars(struct be_adapter *adapter)
2976{
2977 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002978 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002979
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002980 if (lancer_chip(adapter)) {
2981 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2982 pci_resource_len(adapter->pdev, 0));
2983 if (addr == NULL)
2984 return -ENOMEM;
2985 adapter->db = addr;
2986 return 0;
2987 }
2988
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002989 if (be_physfn(adapter)) {
2990 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2991 pci_resource_len(adapter->pdev, 2));
2992 if (addr == NULL)
2993 return -ENOMEM;
2994 adapter->csr = addr;
2995 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002996
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002997 if (adapter->generation == BE_GEN2) {
2998 pcicfg_reg = 1;
2999 db_reg = 4;
3000 } else {
3001 pcicfg_reg = 0;
3002 if (be_physfn(adapter))
3003 db_reg = 4;
3004 else
3005 db_reg = 0;
3006 }
3007 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3008 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003009 if (addr == NULL)
3010 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00003011 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003012
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003013 if (be_physfn(adapter)) {
3014 addr = ioremap_nocache(
3015 pci_resource_start(adapter->pdev, pcicfg_reg),
3016 pci_resource_len(adapter->pdev, pcicfg_reg));
3017 if (addr == NULL)
3018 goto pci_map_err;
3019 adapter->pcicfg = addr;
3020 } else
3021 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003022
3023 return 0;
3024pci_map_err:
3025 be_unmap_pci_bars(adapter);
3026 return -ENOMEM;
3027}
3028
3029
3030static void be_ctrl_cleanup(struct be_adapter *adapter)
3031{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003032 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003033
3034 be_unmap_pci_bars(adapter);
3035
3036 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003037 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3038 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003039
3040 mem = &adapter->mc_cmd_mem;
3041 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003042 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3043 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003044}
3045
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003046static int be_ctrl_init(struct be_adapter *adapter)
3047{
Sathya Perla8788fdc2009-07-27 22:52:03 +00003048 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3049 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003050 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003051 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003052
3053 status = be_map_pci_bars(adapter);
3054 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00003055 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003056
3057 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003058 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3059 mbox_mem_alloc->size,
3060 &mbox_mem_alloc->dma,
3061 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003062 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00003063 status = -ENOMEM;
3064 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003065 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00003066
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003067 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3068 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3069 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3070 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00003071
3072 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003073 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
3074 mc_cmd_mem->size, &mc_cmd_mem->dma,
3075 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003076 if (mc_cmd_mem->va == NULL) {
3077 status = -ENOMEM;
3078 goto free_mbox;
3079 }
3080 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
3081
Ivan Vecera29849612010-12-14 05:43:19 +00003082 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00003083 spin_lock_init(&adapter->mcc_lock);
3084 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003085
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07003086 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00003087 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003088 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00003089
3090free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003091 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3092 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00003093
3094unmap_pci_bars:
3095 be_unmap_pci_bars(adapter);
3096
3097done:
3098 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003099}
3100
3101static void be_stats_cleanup(struct be_adapter *adapter)
3102{
Sathya Perla3abcded2010-10-03 22:12:27 -07003103 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003104
3105 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003106 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3107 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003108}
3109
3110static int be_stats_init(struct be_adapter *adapter)
3111{
Sathya Perla3abcded2010-10-03 22:12:27 -07003112 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003113
Selvin Xavier005d5692011-05-16 07:36:35 +00003114 if (adapter->generation == BE_GEN2) {
Ajit Khaparde89a88ab2011-05-16 07:36:18 +00003115 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
Selvin Xavier005d5692011-05-16 07:36:35 +00003116 } else {
3117 if (lancer_chip(adapter))
3118 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3119 else
3120 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3121 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003122 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3123 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003124 if (cmd->va == NULL)
3125 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08003126 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003127 return 0;
3128}
3129
3130static void __devexit be_remove(struct pci_dev *pdev)
3131{
3132 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003133
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003134 if (!adapter)
3135 return;
3136
Somnath Koturf203af72010-10-25 23:01:03 +00003137 cancel_delayed_work_sync(&adapter->work);
3138
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003139 unregister_netdev(adapter->netdev);
3140
Sathya Perla5fb379e2009-06-18 00:02:59 +00003141 be_clear(adapter);
3142
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003143 be_stats_cleanup(adapter);
3144
3145 be_ctrl_cleanup(adapter);
3146
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003147 kfree(adapter->vf_cfg);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003148 be_sriov_disable(adapter);
3149
Sathya Perla8d56ff12009-11-22 22:02:26 +00003150 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003151
3152 pci_set_drvdata(pdev, NULL);
3153 pci_release_regions(pdev);
3154 pci_disable_device(pdev);
3155
3156 free_netdev(adapter->netdev);
3157}
3158
Sathya Perla2243e2e2009-11-22 22:02:03 +00003159static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003160{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003161 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003162 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00003163
Sathya Perla8788fdc2009-07-27 22:52:03 +00003164 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003165 if (status)
3166 return status;
3167
Sathya Perla3abcded2010-10-03 22:12:27 -07003168 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3169 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003170 if (status)
3171 return status;
3172
3173 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003174
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003175 /* A default permanent address is given to each VF for Lancer*/
3176 if (be_physfn(adapter) || lancer_chip(adapter)) {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003177 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00003178 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003179
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003180 if (status)
3181 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00003182
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003183 if (!is_valid_ether_addr(mac))
3184 return -EADDRNOTAVAIL;
3185
3186 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3187 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3188 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003189
Ajit Khaparde3486be22010-07-23 02:04:54 +00003190 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00003191 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3192 else
3193 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3194
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00003195 status = be_cmd_get_cntl_attributes(adapter);
3196 if (status)
3197 return status;
3198
Sathya Perla2e588f82011-03-11 02:49:26 +00003199 be_cmd_check_native_mode(adapter);
Sathya Perla3c8def92011-06-12 20:01:58 +00003200
3201 if ((num_vfs && adapter->sriov_enabled) ||
3202 (adapter->function_mode & 0x400) ||
3203 lancer_chip(adapter) || !be_physfn(adapter)) {
3204 adapter->num_tx_qs = 1;
3205 netif_set_real_num_tx_queues(adapter->netdev,
3206 adapter->num_tx_qs);
3207 } else {
3208 adapter->num_tx_qs = MAX_TX_QS;
3209 }
3210
Sathya Perla2243e2e2009-11-22 22:02:03 +00003211 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003212}
3213
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003214static int be_dev_family_check(struct be_adapter *adapter)
3215{
3216 struct pci_dev *pdev = adapter->pdev;
3217 u32 sli_intf = 0, if_type;
3218
3219 switch (pdev->device) {
3220 case BE_DEVICE_ID1:
3221 case OC_DEVICE_ID1:
3222 adapter->generation = BE_GEN2;
3223 break;
3224 case BE_DEVICE_ID2:
3225 case OC_DEVICE_ID2:
3226 adapter->generation = BE_GEN3;
3227 break;
3228 case OC_DEVICE_ID3:
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003229 case OC_DEVICE_ID4:
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003230 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3231 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3232 SLI_INTF_IF_TYPE_SHIFT;
3233
3234 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3235 if_type != 0x02) {
3236 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3237 return -EINVAL;
3238 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003239 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3240 SLI_INTF_FAMILY_SHIFT);
3241 adapter->generation = BE_GEN3;
3242 break;
3243 default:
3244 adapter->generation = 0;
3245 }
3246 return 0;
3247}
3248
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003249static int lancer_wait_ready(struct be_adapter *adapter)
3250{
3251#define SLIPORT_READY_TIMEOUT 500
3252 u32 sliport_status;
3253 int status = 0, i;
3254
3255 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3256 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3257 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3258 break;
3259
3260 msleep(20);
3261 }
3262
3263 if (i == SLIPORT_READY_TIMEOUT)
3264 status = -1;
3265
3266 return status;
3267}
3268
3269static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3270{
3271 int status;
3272 u32 sliport_status, err, reset_needed;
3273 status = lancer_wait_ready(adapter);
3274 if (!status) {
3275 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3276 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3277 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3278 if (err && reset_needed) {
3279 iowrite32(SLI_PORT_CONTROL_IP_MASK,
3280 adapter->db + SLIPORT_CONTROL_OFFSET);
3281
3282 /* check adapter has corrected the error */
3283 status = lancer_wait_ready(adapter);
3284 sliport_status = ioread32(adapter->db +
3285 SLIPORT_STATUS_OFFSET);
3286 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3287 SLIPORT_STATUS_RN_MASK);
3288 if (status || sliport_status)
3289 status = -1;
3290 } else if (err || reset_needed) {
3291 status = -1;
3292 }
3293 }
3294 return status;
3295}
3296
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003297static int __devinit be_probe(struct pci_dev *pdev,
3298 const struct pci_device_id *pdev_id)
3299{
3300 int status = 0;
3301 struct be_adapter *adapter;
3302 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003303
3304 status = pci_enable_device(pdev);
3305 if (status)
3306 goto do_none;
3307
3308 status = pci_request_regions(pdev, DRV_NAME);
3309 if (status)
3310 goto disable_dev;
3311 pci_set_master(pdev);
3312
Sathya Perla3c8def92011-06-12 20:01:58 +00003313 netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003314 if (netdev == NULL) {
3315 status = -ENOMEM;
3316 goto rel_reg;
3317 }
3318 adapter = netdev_priv(netdev);
3319 adapter->pdev = pdev;
3320 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003321
3322 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00003323 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003324 goto free_netdev;
3325
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003326 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003327 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003328
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003329 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003330 if (!status) {
3331 netdev->features |= NETIF_F_HIGHDMA;
3332 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00003333 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003334 if (status) {
3335 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3336 goto free_netdev;
3337 }
3338 }
3339
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003340 be_sriov_enable(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003341 if (adapter->sriov_enabled) {
3342 adapter->vf_cfg = kcalloc(num_vfs,
3343 sizeof(struct be_vf_cfg), GFP_KERNEL);
3344
3345 if (!adapter->vf_cfg)
3346 goto free_netdev;
3347 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003348
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003349 status = be_ctrl_init(adapter);
3350 if (status)
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003351 goto free_vf_cfg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003352
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003353 if (lancer_chip(adapter)) {
3354 status = lancer_test_and_set_rdy_state(adapter);
3355 if (status) {
3356 dev_err(&pdev->dev, "Adapter in non recoverable error\n");
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003357 goto ctrl_clean;
Padmanabh Ratnakar37eed1c2011-03-07 03:08:36 +00003358 }
3359 }
3360
Sathya Perla2243e2e2009-11-22 22:02:03 +00003361 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003362 if (be_physfn(adapter)) {
3363 status = be_cmd_POST(adapter);
3364 if (status)
3365 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003366 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00003367
3368 /* tell fw we're ready to fire cmds */
3369 status = be_cmd_fw_init(adapter);
3370 if (status)
3371 goto ctrl_clean;
3372
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00003373 status = be_cmd_reset_function(adapter);
3374 if (status)
3375 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07003376
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003377 status = be_stats_init(adapter);
3378 if (status)
3379 goto ctrl_clean;
3380
Sathya Perla2243e2e2009-11-22 22:02:03 +00003381 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003382 if (status)
3383 goto stats_clean;
3384
Sathya Perla3abcded2010-10-03 22:12:27 -07003385 be_msix_enable(adapter);
3386
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003387 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003388
Sathya Perla5fb379e2009-06-18 00:02:59 +00003389 status = be_setup(adapter);
3390 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07003391 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00003392
Sathya Perla3abcded2010-10-03 22:12:27 -07003393 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003394 status = register_netdev(netdev);
3395 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00003396 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00003397 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003398
Ajit Khapardee6319362011-02-11 13:35:41 +00003399 if (be_physfn(adapter) && adapter->sriov_enabled) {
Ajit Khaparded0381c42011-04-19 12:11:55 +00003400 u8 mac_speed;
3401 bool link_up;
3402 u16 vf, lnk_speed;
3403
Mammatha Edhala12f4d0a2011-05-18 03:26:22 +00003404 if (!lancer_chip(adapter)) {
3405 status = be_vf_eth_addr_config(adapter);
3406 if (status)
3407 goto unreg_netdev;
3408 }
Ajit Khaparded0381c42011-04-19 12:11:55 +00003409
3410 for (vf = 0; vf < num_vfs; vf++) {
3411 status = be_cmd_link_status_query(adapter, &link_up,
3412 &mac_speed, &lnk_speed, vf + 1);
3413 if (!status)
3414 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
3415 else
3416 goto unreg_netdev;
3417 }
Ajit Khapardee6319362011-02-11 13:35:41 +00003418 }
3419
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003420 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Kotur34b1ef02011-06-01 00:33:22 +00003421 /* By default all priorities are enabled.
3422 * Needed in case of no GRP5 evt support
3423 */
3424 adapter->vlan_prio_bmap = 0xff;
3425
Somnath Koturf203af72010-10-25 23:01:03 +00003426 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003427 return 0;
3428
Ajit Khapardee6319362011-02-11 13:35:41 +00003429unreg_netdev:
3430 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003431unsetup:
3432 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003433msix_disable:
3434 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003435stats_clean:
3436 be_stats_cleanup(adapter);
3437ctrl_clean:
3438 be_ctrl_cleanup(adapter);
Ajit Khaparde48f5a192011-04-06 18:08:30 +00003439free_vf_cfg:
3440 kfree(adapter->vf_cfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003441free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003442 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003443 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003444 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003445rel_reg:
3446 pci_release_regions(pdev);
3447disable_dev:
3448 pci_disable_device(pdev);
3449do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003450 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003451 return status;
3452}
3453
3454static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3455{
3456 struct be_adapter *adapter = pci_get_drvdata(pdev);
3457 struct net_device *netdev = adapter->netdev;
3458
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003459 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003460 if (adapter->wol)
3461 be_setup_wol(adapter, true);
3462
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003463 netif_device_detach(netdev);
3464 if (netif_running(netdev)) {
3465 rtnl_lock();
3466 be_close(netdev);
3467 rtnl_unlock();
3468 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003469 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003470 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003471
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003472 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003473 pci_save_state(pdev);
3474 pci_disable_device(pdev);
3475 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3476 return 0;
3477}
3478
3479static int be_resume(struct pci_dev *pdev)
3480{
3481 int status = 0;
3482 struct be_adapter *adapter = pci_get_drvdata(pdev);
3483 struct net_device *netdev = adapter->netdev;
3484
3485 netif_device_detach(netdev);
3486
3487 status = pci_enable_device(pdev);
3488 if (status)
3489 return status;
3490
3491 pci_set_power_state(pdev, 0);
3492 pci_restore_state(pdev);
3493
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003494 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003495 /* tell fw we're ready to fire cmds */
3496 status = be_cmd_fw_init(adapter);
3497 if (status)
3498 return status;
3499
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003500 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003501 if (netif_running(netdev)) {
3502 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003503 be_open(netdev);
3504 rtnl_unlock();
3505 }
3506 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003507
3508 if (adapter->wol)
3509 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003510
3511 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003512 return 0;
3513}
3514
Sathya Perla82456b02010-02-17 01:35:37 +00003515/*
3516 * An FLR will stop BE from DMAing any data.
3517 */
3518static void be_shutdown(struct pci_dev *pdev)
3519{
3520 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003521
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003522 if (!adapter)
3523 return;
Sathya Perla82456b02010-02-17 01:35:37 +00003524
Sathya Perla0f4a6822011-03-21 20:49:28 +00003525 cancel_delayed_work_sync(&adapter->work);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003526
Ajit Khaparde2d5d4152011-04-06 05:53:13 +00003527 netif_device_detach(adapter->netdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003528
Sathya Perla82456b02010-02-17 01:35:37 +00003529 if (adapter->wol)
3530 be_setup_wol(adapter, true);
3531
Ajit Khaparde57841862011-04-06 18:08:43 +00003532 be_cmd_reset_function(adapter);
3533
Sathya Perla82456b02010-02-17 01:35:37 +00003534 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003535}
3536
Sathya Perlacf588472010-02-14 21:22:01 +00003537static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3538 pci_channel_state_t state)
3539{
3540 struct be_adapter *adapter = pci_get_drvdata(pdev);
3541 struct net_device *netdev = adapter->netdev;
3542
3543 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3544
3545 adapter->eeh_err = true;
3546
3547 netif_device_detach(netdev);
3548
3549 if (netif_running(netdev)) {
3550 rtnl_lock();
3551 be_close(netdev);
3552 rtnl_unlock();
3553 }
3554 be_clear(adapter);
3555
3556 if (state == pci_channel_io_perm_failure)
3557 return PCI_ERS_RESULT_DISCONNECT;
3558
3559 pci_disable_device(pdev);
3560
3561 return PCI_ERS_RESULT_NEED_RESET;
3562}
3563
3564static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3565{
3566 struct be_adapter *adapter = pci_get_drvdata(pdev);
3567 int status;
3568
3569 dev_info(&adapter->pdev->dev, "EEH reset\n");
3570 adapter->eeh_err = false;
3571
3572 status = pci_enable_device(pdev);
3573 if (status)
3574 return PCI_ERS_RESULT_DISCONNECT;
3575
3576 pci_set_master(pdev);
3577 pci_set_power_state(pdev, 0);
3578 pci_restore_state(pdev);
3579
3580 /* Check if card is ok and fw is ready */
3581 status = be_cmd_POST(adapter);
3582 if (status)
3583 return PCI_ERS_RESULT_DISCONNECT;
3584
3585 return PCI_ERS_RESULT_RECOVERED;
3586}
3587
3588static void be_eeh_resume(struct pci_dev *pdev)
3589{
3590 int status = 0;
3591 struct be_adapter *adapter = pci_get_drvdata(pdev);
3592 struct net_device *netdev = adapter->netdev;
3593
3594 dev_info(&adapter->pdev->dev, "EEH resume\n");
3595
3596 pci_save_state(pdev);
3597
3598 /* tell fw we're ready to fire cmds */
3599 status = be_cmd_fw_init(adapter);
3600 if (status)
3601 goto err;
3602
3603 status = be_setup(adapter);
3604 if (status)
3605 goto err;
3606
3607 if (netif_running(netdev)) {
3608 status = be_open(netdev);
3609 if (status)
3610 goto err;
3611 }
3612 netif_device_attach(netdev);
3613 return;
3614err:
3615 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003616}
3617
3618static struct pci_error_handlers be_eeh_handlers = {
3619 .error_detected = be_eeh_err_detected,
3620 .slot_reset = be_eeh_reset,
3621 .resume = be_eeh_resume,
3622};
3623
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003624static struct pci_driver be_driver = {
3625 .name = DRV_NAME,
3626 .id_table = be_dev_ids,
3627 .probe = be_probe,
3628 .remove = be_remove,
3629 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003630 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003631 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003632 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003633};
3634
3635static int __init be_init_module(void)
3636{
Joe Perches8e95a202009-12-03 07:58:21 +00003637 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3638 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003639 printk(KERN_WARNING DRV_NAME
3640 " : Module param rx_frag_size must be 2048/4096/8192."
3641 " Using 2048\n");
3642 rx_frag_size = 2048;
3643 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003644
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003645 return pci_register_driver(&be_driver);
3646}
3647module_init(be_init_module);
3648
3649static void __exit be_exit_module(void)
3650{
3651 pci_unregister_driver(&be_driver);
3652}
3653module_exit(be_exit_module);