blob: ac7ae21bd0d1f0d0ad0c18303e2c05fd90b61310 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparde294aedc2010-02-19 13:54:58 +00002 * Copyright (C) 2005 - 2010 ServerEngines
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030module_param(rx_frag_size, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla3abcded2010-10-03 22:12:27 -070035static bool multi_rxq = true;
36module_param(multi_rxq, bool, S_IRUGO | S_IWUSR);
37MODULE_PARM_DESC(multi_rxq, "Multi Rx Queue support. Enabled by default");
38
Sathya Perla6b7c5b92009-03-11 23:32:03 -070039static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070040 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070041 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070042 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perlafe6d2a32010-11-21 23:25:50 +000044 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
Sathya Perla6b7c5b92009-03-11 23:32:03 -070045 { 0 }
46};
47MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000048/* UE Status Low CSR */
49static char *ue_status_low_desc[] = {
50 "CEV",
51 "CTX",
52 "DBUF",
53 "ERX",
54 "Host",
55 "MPU",
56 "NDMA",
57 "PTC ",
58 "RDMA ",
59 "RXF ",
60 "RXIPS ",
61 "RXULP0 ",
62 "RXULP1 ",
63 "RXULP2 ",
64 "TIM ",
65 "TPOST ",
66 "TPRE ",
67 "TXIPS ",
68 "TXULP0 ",
69 "TXULP1 ",
70 "UC ",
71 "WDMA ",
72 "TXULP2 ",
73 "HOST1 ",
74 "P0_OB_LINK ",
75 "P1_OB_LINK ",
76 "HOST_GPIO ",
77 "MBOX ",
78 "AXGMAC0",
79 "AXGMAC1",
80 "JTAG",
81 "MPU_INTPEND"
82};
83/* UE Status High CSR */
84static char *ue_status_hi_desc[] = {
85 "LPCMEMHOST",
86 "MGMT_MAC",
87 "PCS0ONLINE",
88 "MPU_IRAM",
89 "PCS1ONLINE",
90 "PCTL0",
91 "PCTL1",
92 "PMEM",
93 "RR",
94 "TXPB",
95 "RXPP",
96 "XAUI",
97 "TXP",
98 "ARM",
99 "IPC",
100 "HOST2",
101 "HOST3",
102 "HOST4",
103 "HOST5",
104 "HOST6",
105 "HOST7",
106 "HOST8",
107 "HOST9",
108 "NETC"
109 "Unknown",
110 "Unknown",
111 "Unknown",
112 "Unknown",
113 "Unknown",
114 "Unknown",
115 "Unknown",
116 "Unknown"
117};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700118
Sathya Perla3abcded2010-10-03 22:12:27 -0700119static inline bool be_multi_rxq(struct be_adapter *adapter)
120{
121 return (adapter->num_rx_qs > 1);
122}
123
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700124static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
125{
126 struct be_dma_mem *mem = &q->dma_mem;
127 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000128 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
129 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700130}
131
132static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
133 u16 len, u16 entry_size)
134{
135 struct be_dma_mem *mem = &q->dma_mem;
136
137 memset(q, 0, sizeof(*q));
138 q->len = len;
139 q->entry_size = entry_size;
140 mem->size = len * entry_size;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000141 mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
142 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700143 if (!mem->va)
144 return -1;
145 memset(mem->va, 0, mem->size);
146 return 0;
147}
148
Sathya Perla8788fdc2009-07-27 22:52:03 +0000149static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000151 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 u32 reg = ioread32(addr);
153 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000154
Sathya Perlacf588472010-02-14 21:22:01 +0000155 if (adapter->eeh_err)
156 return;
157
Sathya Perla5f0b8492009-07-27 22:52:56 +0000158 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700159 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000160 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700161 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000162 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700163 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000164
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165 iowrite32(reg, addr);
166}
167
Sathya Perla8788fdc2009-07-27 22:52:03 +0000168static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700169{
170 u32 val = 0;
171 val |= qid & DB_RQ_RING_ID_MASK;
172 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000173
174 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000175 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700176}
177
Sathya Perla8788fdc2009-07-27 22:52:03 +0000178static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700179{
180 u32 val = 0;
181 val |= qid & DB_TXULP_RING_ID_MASK;
182 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000183
184 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000185 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186}
187
Sathya Perla8788fdc2009-07-27 22:52:03 +0000188static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700189 bool arm, bool clear_int, u16 num_popped)
190{
191 u32 val = 0;
192 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000193 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
194 DB_EQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000195
196 if (adapter->eeh_err)
197 return;
198
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700199 if (arm)
200 val |= 1 << DB_EQ_REARM_SHIFT;
201 if (clear_int)
202 val |= 1 << DB_EQ_CLR_SHIFT;
203 val |= 1 << DB_EQ_EVNT_SHIFT;
204 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000205 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700206}
207
Sathya Perla8788fdc2009-07-27 22:52:03 +0000208void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209{
210 u32 val = 0;
211 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000212 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
213 DB_CQ_RING_ID_EXT_MASK_SHIFT);
Sathya Perlacf588472010-02-14 21:22:01 +0000214
215 if (adapter->eeh_err)
216 return;
217
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218 if (arm)
219 val |= 1 << DB_CQ_REARM_SHIFT;
220 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000221 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700222}
223
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224static int be_mac_addr_set(struct net_device *netdev, void *p)
225{
226 struct be_adapter *adapter = netdev_priv(netdev);
227 struct sockaddr *addr = p;
228 int status = 0;
229
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000230 if (!is_valid_ether_addr(addr->sa_data))
231 return -EADDRNOTAVAIL;
232
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000233 /* MAC addr configuration will be done in hardware for VFs
234 * by their corresponding PFs. Just copy to netdev addr here
235 */
236 if (!be_physfn(adapter))
237 goto netdev_addr;
238
Ajit Khapardef8617e02011-02-11 13:36:37 +0000239 status = be_cmd_pmac_del(adapter, adapter->if_handle,
240 adapter->pmac_id, 0);
Sathya Perlaa65027e2009-08-17 00:58:04 +0000241 if (status)
242 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700243
Sathya Perlaa65027e2009-08-17 00:58:04 +0000244 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000245 adapter->if_handle, &adapter->pmac_id, 0);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000246netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700247 if (!status)
248 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
249
250 return status;
251}
252
Sathya Perlab31c50a2009-09-17 10:30:13 -0700253void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700254{
Sathya Perla3abcded2010-10-03 22:12:27 -0700255 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats_cmd.va);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700256 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
257 struct be_port_rxf_stats *port_stats =
258 &rxf_stats->port[adapter->port_num];
Ajit Khaparde78122a52009-10-07 03:11:20 -0700259 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla68110862009-06-10 02:21:16 +0000260 struct be_erx_stats *erx_stats = &hw_stats->erx;
Sathya Perla3abcded2010-10-03 22:12:27 -0700261 struct be_rx_obj *rxo;
262 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700263
Sathya Perla3abcded2010-10-03 22:12:27 -0700264 memset(dev_stats, 0, sizeof(*dev_stats));
265 for_all_rx_queues(adapter, rxo, i) {
266 dev_stats->rx_packets += rx_stats(rxo)->rx_pkts;
267 dev_stats->rx_bytes += rx_stats(rxo)->rx_bytes;
268 dev_stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
269 /* no space in linux buffers: best possible approximation */
270 dev_stats->rx_dropped +=
271 erx_stats->rx_drops_no_fragments[rxo->q.id];
272 }
273
274 dev_stats->tx_packets = tx_stats(adapter)->be_tx_pkts;
275 dev_stats->tx_bytes = tx_stats(adapter)->be_tx_bytes;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700276
277 /* bad pkts received */
278 dev_stats->rx_errors = port_stats->rx_crc_errors +
279 port_stats->rx_alignment_symbol_errors +
280 port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000281 port_stats->rx_out_range_errors +
282 port_stats->rx_frame_too_long +
283 port_stats->rx_dropped_too_small +
284 port_stats->rx_dropped_too_short +
285 port_stats->rx_dropped_header_too_small +
286 port_stats->rx_dropped_tcp_length +
287 port_stats->rx_dropped_runt +
288 port_stats->rx_tcp_checksum_errs +
289 port_stats->rx_ip_checksum_errs +
290 port_stats->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700291
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700292 /* detailed rx errors */
293 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000294 port_stats->rx_out_range_errors +
295 port_stats->rx_frame_too_long;
296
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700297 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
298
299 /* frame alignment errors */
300 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000301
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700302 /* receiver fifo overrun */
303 /* drops_no_pbuf is no per i/f, it's per BE card */
304 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
305 port_stats->rx_input_fifo_overflow +
306 rxf_stats->rx_drops_no_pbuf;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700307}
308
Sathya Perla8788fdc2009-07-27 22:52:03 +0000309void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700310{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700311 struct net_device *netdev = adapter->netdev;
312
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700313 /* If link came up or went down */
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000314 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000315 adapter->link_speed = -1;
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000316 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700317 netif_carrier_on(netdev);
318 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000319 } else {
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000320 netif_carrier_off(netdev);
321 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700322 }
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000323 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700324 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700325}
326
327/* Update the EQ delay n BE based on the RX frags consumed / sec */
Sathya Perla3abcded2010-10-03 22:12:27 -0700328static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700329{
Sathya Perla3abcded2010-10-03 22:12:27 -0700330 struct be_eq_obj *rx_eq = &rxo->rx_eq;
331 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700332 ulong now = jiffies;
333 u32 eqd;
334
335 if (!rx_eq->enable_aic)
336 return;
337
338 /* Wrapped around */
339 if (time_before(now, stats->rx_fps_jiffies)) {
340 stats->rx_fps_jiffies = now;
341 return;
342 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700343
344 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700345 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700346 return;
347
Sathya Perla3abcded2010-10-03 22:12:27 -0700348 stats->rx_fps = (stats->rx_frags - stats->prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700349 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700350
Sathya Perla4097f662009-03-24 16:40:13 -0700351 stats->rx_fps_jiffies = now;
Sathya Perla3abcded2010-10-03 22:12:27 -0700352 stats->prev_rx_frags = stats->rx_frags;
353 eqd = stats->rx_fps / 110000;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700354 eqd = eqd << 3;
355 if (eqd > rx_eq->max_eqd)
356 eqd = rx_eq->max_eqd;
357 if (eqd < rx_eq->min_eqd)
358 eqd = rx_eq->min_eqd;
359 if (eqd < 10)
360 eqd = 0;
361 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000362 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700363
364 rx_eq->cur_eqd = eqd;
365}
366
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700367static u32 be_calc_rate(u64 bytes, unsigned long ticks)
368{
369 u64 rate = bytes;
370
371 do_div(rate, ticks / HZ);
372 rate <<= 3; /* bytes/sec -> bits/sec */
373 do_div(rate, 1000000ul); /* MB/Sec */
374
375 return rate;
376}
377
Sathya Perla4097f662009-03-24 16:40:13 -0700378static void be_tx_rate_update(struct be_adapter *adapter)
379{
Sathya Perla3abcded2010-10-03 22:12:27 -0700380 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -0700381 ulong now = jiffies;
382
383 /* Wrapped around? */
384 if (time_before(now, stats->be_tx_jiffies)) {
385 stats->be_tx_jiffies = now;
386 return;
387 }
388
389 /* Update tx rate once in two seconds */
390 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700391 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
392 - stats->be_tx_bytes_prev,
393 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700394 stats->be_tx_jiffies = now;
395 stats->be_tx_bytes_prev = stats->be_tx_bytes;
396 }
397}
398
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700399static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000400 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700401{
Sathya Perla3abcded2010-10-03 22:12:27 -0700402 struct be_tx_stats *stats = tx_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700403 stats->be_tx_reqs++;
404 stats->be_tx_wrbs += wrb_cnt;
405 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000406 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700407 if (stopped)
408 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700409}
410
411/* Determine number of WRB entries needed to xmit data in an skb */
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000412static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413 bool *dummy)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700414{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700415 int cnt = (skb->len > skb->data_len);
416
417 cnt += skb_shinfo(skb)->nr_frags;
418
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700419 /* to account for hdr wrb */
420 cnt++;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000421 if (lancer_chip(adapter) || !(cnt & 1)) {
422 *dummy = false;
423 } else {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700424 /* add a dummy to make it an even num */
425 cnt++;
426 *dummy = true;
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000427 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700428 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
429 return cnt;
430}
431
432static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
433{
434 wrb->frag_pa_hi = upper_32_bits(addr);
435 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
436 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
437}
438
Somnath Koturcc4ce022010-10-21 07:11:14 -0700439static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
440 struct sk_buff *skb, u32 wrb_cnt, u32 len)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700441{
Somnath Koturcc4ce022010-10-21 07:11:14 -0700442 u8 vlan_prio = 0;
443 u16 vlan_tag = 0;
444
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700445 memset(hdr, 0, sizeof(*hdr));
446
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
448
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000449 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
452 hdr, skb_shinfo(skb)->gso_size);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000453 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000454 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000455 if (lancer_chip(adapter) && adapter->sli_family ==
456 LANCER_A0_SLI_FAMILY) {
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458 if (is_tcp_pkt(skb))
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460 tcpcs, hdr, 1);
461 else if (is_udp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463 udpcs, hdr, 1);
464 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700465 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466 if (is_tcp_pkt(skb))
467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
468 else if (is_udp_pkt(skb))
469 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
470 }
471
Somnath Koturcc4ce022010-10-21 07:11:14 -0700472 if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700473 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
Somnath Koturcc4ce022010-10-21 07:11:14 -0700474 vlan_tag = vlan_tx_tag_get(skb);
475 vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
476 /* If vlan priority provided by OS is NOT in available bmap */
477 if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
478 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
479 adapter->recommended_prio;
480 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700481 }
482
483 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
484 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
485 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
486 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
487}
488
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000489static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
Sathya Perla7101e112010-03-22 20:41:12 +0000490 bool unmap_single)
491{
492 dma_addr_t dma;
493
494 be_dws_le_to_cpu(wrb, sizeof(*wrb));
495
496 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000497 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000498 if (unmap_single)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000499 dma_unmap_single(dev, dma, wrb->frag_len,
500 DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000501 else
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000502 dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000503 }
504}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700505
506static int make_tx_wrbs(struct be_adapter *adapter,
507 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
508{
Sathya Perla7101e112010-03-22 20:41:12 +0000509 dma_addr_t busaddr;
510 int i, copied = 0;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000511 struct device *dev = &adapter->pdev->dev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 struct sk_buff *first_skb = skb;
513 struct be_queue_info *txq = &adapter->tx_obj.q;
514 struct be_eth_wrb *wrb;
515 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000516 bool map_single = false;
517 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700518
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700519 hdr = queue_head_node(txq);
520 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000521 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700522
David S. Millerebc8d2a2009-06-09 01:01:31 -0700523 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700524 int len = skb_headlen(skb);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000525 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
526 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000527 goto dma_err;
528 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700529 wrb = queue_head_node(txq);
530 wrb_fill(wrb, busaddr, len);
531 be_dws_cpu_to_le(wrb, sizeof(*wrb));
532 queue_head_inc(txq);
533 copied += len;
534 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700535
David S. Millerebc8d2a2009-06-09 01:01:31 -0700536 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
537 struct skb_frag_struct *frag =
538 &skb_shinfo(skb)->frags[i];
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000539 busaddr = dma_map_page(dev, frag->page, frag->page_offset,
540 frag->size, DMA_TO_DEVICE);
541 if (dma_mapping_error(dev, busaddr))
Sathya Perla7101e112010-03-22 20:41:12 +0000542 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700543 wrb = queue_head_node(txq);
544 wrb_fill(wrb, busaddr, frag->size);
545 be_dws_cpu_to_le(wrb, sizeof(*wrb));
546 queue_head_inc(txq);
547 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700548 }
549
550 if (dummy_wrb) {
551 wrb = queue_head_node(txq);
552 wrb_fill(wrb, 0, 0);
553 be_dws_cpu_to_le(wrb, sizeof(*wrb));
554 queue_head_inc(txq);
555 }
556
Somnath Koturcc4ce022010-10-21 07:11:14 -0700557 wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700558 be_dws_cpu_to_le(hdr, sizeof(*hdr));
559
560 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000561dma_err:
562 txq->head = map_head;
563 while (copied) {
564 wrb = queue_head_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000565 unmap_tx_frag(dev, wrb, map_single);
Sathya Perla7101e112010-03-22 20:41:12 +0000566 map_single = false;
567 copied -= wrb->frag_len;
568 queue_head_inc(txq);
569 }
570 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700571}
572
Stephen Hemminger613573252009-08-31 19:50:58 +0000573static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700574 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700575{
576 struct be_adapter *adapter = netdev_priv(netdev);
577 struct be_tx_obj *tx_obj = &adapter->tx_obj;
578 struct be_queue_info *txq = &tx_obj->q;
579 u32 wrb_cnt = 0, copied = 0;
580 u32 start = txq->head;
581 bool dummy_wrb, stopped = false;
582
Sathya Perlafe6d2a32010-11-21 23:25:50 +0000583 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700584
585 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000586 if (copied) {
587 /* record the sent skb in the sent_skb table */
588 BUG_ON(tx_obj->sent_skb_list[start]);
589 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700590
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000591 /* Ensure txq has space for the next skb; Else stop the queue
592 * *BEFORE* ringing the tx doorbell, so that we serialze the
593 * tx compls of the current transmit which'll wake up the queue
594 */
Sathya Perla7101e112010-03-22 20:41:12 +0000595 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000596 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
597 txq->len) {
598 netif_stop_queue(netdev);
599 stopped = true;
600 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700601
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000602 be_txq_notify(adapter, txq->id, wrb_cnt);
603
Ajit Khaparde91992e42010-02-19 13:57:12 +0000604 be_tx_stats_update(adapter, wrb_cnt, copied,
605 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000606 } else {
607 txq->head = start;
608 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700609 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700610 return NETDEV_TX_OK;
611}
612
613static int be_change_mtu(struct net_device *netdev, int new_mtu)
614{
615 struct be_adapter *adapter = netdev_priv(netdev);
616 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000617 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
618 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700619 dev_info(&adapter->pdev->dev,
620 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000621 BE_MIN_MTU,
622 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700623 return -EINVAL;
624 }
625 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
626 netdev->mtu, new_mtu);
627 netdev->mtu = new_mtu;
628 return 0;
629}
630
631/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000632 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
633 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700634 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000635static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700636{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637 u16 vtag[BE_NUM_VLANS_SUPPORTED];
638 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000639 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000640 u32 if_handle;
641
642 if (vf) {
643 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
644 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
645 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
646 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700647
Ajit Khaparde82903e42010-02-09 01:34:57 +0000648 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700649 /* Construct VLAN Table to give to HW */
Jesse Grossb7381272010-10-20 13:56:02 +0000650 for (i = 0; i < VLAN_N_VID; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 if (adapter->vlan_tag[i]) {
652 vtag[ntags] = cpu_to_le16(i);
653 ntags++;
654 }
655 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700656 status = be_cmd_vlan_config(adapter, adapter->if_handle,
657 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700658 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700659 status = be_cmd_vlan_config(adapter, adapter->if_handle,
660 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000662
Sathya Perlab31c50a2009-09-17 10:30:13 -0700663 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664}
665
666static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
667{
668 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700669
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700670 adapter->vlan_grp = grp;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700671}
672
673static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
674{
675 struct be_adapter *adapter = netdev_priv(netdev);
676
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000677 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000678 if (!be_physfn(adapter))
679 return;
680
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700681 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000682 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000683 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700684}
685
686static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
687{
688 struct be_adapter *adapter = netdev_priv(netdev);
689
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000690 adapter->vlans_added--;
691 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
692
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000693 if (!be_physfn(adapter))
694 return;
695
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700696 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000697 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000698 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700699}
700
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700701static void be_set_multicast_list(struct net_device *netdev)
702{
703 struct be_adapter *adapter = netdev_priv(netdev);
704
705 if (netdev->flags & IFF_PROMISC) {
Sathya Perla8788fdc2009-07-27 22:52:03 +0000706 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
Sathya Perla24307ee2009-06-18 00:09:25 +0000707 adapter->promiscuous = true;
708 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700709 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000710
711 /* BE was previously in promiscous mode; disable it */
712 if (adapter->promiscuous) {
713 adapter->promiscuous = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000714 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000715 }
716
Sathya Perlae7b909a2009-11-22 22:01:10 +0000717 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000718 if (netdev->flags & IFF_ALLMULTI ||
719 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000720 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000721 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000722 goto done;
723 }
724
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000725 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800726 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000727done:
728 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700729}
730
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000731static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
732{
733 struct be_adapter *adapter = netdev_priv(netdev);
734 int status;
735
736 if (!adapter->sriov_enabled)
737 return -EPERM;
738
739 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
740 return -EINVAL;
741
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000742 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
743 status = be_cmd_pmac_del(adapter,
744 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000745 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000746
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000747 status = be_cmd_pmac_add(adapter, mac,
748 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +0000749 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000750
751 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000752 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
753 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000754 else
755 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
756
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000757 return status;
758}
759
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000760static int be_get_vf_config(struct net_device *netdev, int vf,
761 struct ifla_vf_info *vi)
762{
763 struct be_adapter *adapter = netdev_priv(netdev);
764
765 if (!adapter->sriov_enabled)
766 return -EPERM;
767
768 if (vf >= num_vfs)
769 return -EINVAL;
770
771 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000772 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000773 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000774 vi->qos = 0;
775 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
776
777 return 0;
778}
779
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000780static int be_set_vf_vlan(struct net_device *netdev,
781 int vf, u16 vlan, u8 qos)
782{
783 struct be_adapter *adapter = netdev_priv(netdev);
784 int status = 0;
785
786 if (!adapter->sriov_enabled)
787 return -EPERM;
788
789 if ((vf >= num_vfs) || (vlan > 4095))
790 return -EINVAL;
791
792 if (vlan) {
793 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
794 adapter->vlans_added++;
795 } else {
796 adapter->vf_cfg[vf].vf_vlan_tag = 0;
797 adapter->vlans_added--;
798 }
799
800 status = be_vid_config(adapter, true, vf);
801
802 if (status)
803 dev_info(&adapter->pdev->dev,
804 "VLAN %d config on VF %d failed\n", vlan, vf);
805 return status;
806}
807
Ajit Khapardee1d18732010-07-23 01:52:13 +0000808static int be_set_vf_tx_rate(struct net_device *netdev,
809 int vf, int rate)
810{
811 struct be_adapter *adapter = netdev_priv(netdev);
812 int status = 0;
813
814 if (!adapter->sriov_enabled)
815 return -EPERM;
816
817 if ((vf >= num_vfs) || (rate < 0))
818 return -EINVAL;
819
820 if (rate > 10000)
821 rate = 10000;
822
823 adapter->vf_cfg[vf].vf_tx_rate = rate;
Ajit Khaparde856c4012011-02-11 13:32:32 +0000824 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
Ajit Khapardee1d18732010-07-23 01:52:13 +0000825
826 if (status)
827 dev_info(&adapter->pdev->dev,
828 "tx rate %d on VF %d failed\n", rate, vf);
829 return status;
830}
831
Sathya Perla3abcded2010-10-03 22:12:27 -0700832static void be_rx_rate_update(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700833{
Sathya Perla3abcded2010-10-03 22:12:27 -0700834 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700835 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700836
Sathya Perla4097f662009-03-24 16:40:13 -0700837 /* Wrapped around */
Sathya Perla3abcded2010-10-03 22:12:27 -0700838 if (time_before(now, stats->rx_jiffies)) {
839 stats->rx_jiffies = now;
Sathya Perla4097f662009-03-24 16:40:13 -0700840 return;
841 }
842
843 /* Update the rate once in two seconds */
Sathya Perla3abcded2010-10-03 22:12:27 -0700844 if ((now - stats->rx_jiffies) < 2 * HZ)
Sathya Perla4097f662009-03-24 16:40:13 -0700845 return;
846
Sathya Perla3abcded2010-10-03 22:12:27 -0700847 stats->rx_rate = be_calc_rate(stats->rx_bytes - stats->rx_bytes_prev,
848 now - stats->rx_jiffies);
849 stats->rx_jiffies = now;
850 stats->rx_bytes_prev = stats->rx_bytes;
Sathya Perla4097f662009-03-24 16:40:13 -0700851}
852
Sathya Perla3abcded2010-10-03 22:12:27 -0700853static void be_rx_stats_update(struct be_rx_obj *rxo,
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000854 u32 pktsize, u16 numfrags, u8 pkt_type)
Sathya Perla4097f662009-03-24 16:40:13 -0700855{
Sathya Perla3abcded2010-10-03 22:12:27 -0700856 struct be_rx_stats *stats = &rxo->stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700857
Sathya Perla3abcded2010-10-03 22:12:27 -0700858 stats->rx_compl++;
859 stats->rx_frags += numfrags;
860 stats->rx_bytes += pktsize;
861 stats->rx_pkts++;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000862 if (pkt_type == BE_MULTICAST_PACKET)
Sathya Perla3abcded2010-10-03 22:12:27 -0700863 stats->rx_mcast_pkts++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700864}
865
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000866static inline bool csum_passed(struct be_eth_rx_compl *rxcp)
Ajit Khaparde728a9972009-04-13 15:41:22 -0700867{
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000868 u8 l4_cksm, ipv6, ipcksm, tcpf, udpf;
Ajit Khaparde728a9972009-04-13 15:41:22 -0700869
870 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
871 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
Somnath Koturc6ce2f42010-10-25 01:11:58 +0000872 ipv6 = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000873 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
874 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700875
Padmanabh Ratnakar19fad862011-03-07 03:08:16 +0000876 /* L4 checksum is not reliable for non TCP/UDP packets.
877 * Also ignore ipcksm for ipv6 pkts */
878 return (tcpf || udpf) && l4_cksm && (ipcksm || ipv6);
Ajit Khaparde728a9972009-04-13 15:41:22 -0700879}
880
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700881static struct be_rx_page_info *
Sathya Perla3abcded2010-10-03 22:12:27 -0700882get_rx_page_info(struct be_adapter *adapter,
883 struct be_rx_obj *rxo,
884 u16 frag_idx)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700885{
886 struct be_rx_page_info *rx_page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -0700887 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700888
Sathya Perla3abcded2010-10-03 22:12:27 -0700889 rx_page_info = &rxo->page_info_tbl[frag_idx];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700890 BUG_ON(!rx_page_info->page);
891
Ajit Khaparde205859a2010-02-09 01:34:21 +0000892 if (rx_page_info->last_page_user) {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +0000893 dma_unmap_page(&adapter->pdev->dev,
894 dma_unmap_addr(rx_page_info, bus),
895 adapter->big_page_size, DMA_FROM_DEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000896 rx_page_info->last_page_user = false;
897 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700898
899 atomic_dec(&rxq->used);
900 return rx_page_info;
901}
902
903/* Throwaway the data in the Rx completion */
904static void be_rx_compl_discard(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -0700905 struct be_rx_obj *rxo,
906 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700907{
Sathya Perla3abcded2010-10-03 22:12:27 -0700908 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700909 struct be_rx_page_info *page_info;
910 u16 rxq_idx, i, num_rcvd;
911
912 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
913 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
914
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +0000915 for (i = 0; i < num_rcvd; i++) {
916 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
917 put_page(page_info->page);
918 memset(page_info, 0, sizeof(*page_info));
919 index_inc(&rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700920 }
921}
922
923/*
924 * skb_fill_rx_data forms a complete skb for an ether frame
925 * indicated by rxcp.
926 */
Sathya Perla3abcded2010-10-03 22:12:27 -0700927static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
Sathya Perla89420422010-02-17 01:35:26 +0000928 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
929 u16 num_rcvd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700930{
Sathya Perla3abcded2010-10-03 22:12:27 -0700931 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700932 struct be_rx_page_info *page_info;
Sathya Perla89420422010-02-17 01:35:26 +0000933 u16 rxq_idx, i, j;
Ajit Khapardefa774062009-07-22 09:28:55 -0700934 u32 pktsize, hdr_len, curr_frag_len, size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700935 u8 *start;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000936 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700937
938 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
939 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000940 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700941
Sathya Perla3abcded2010-10-03 22:12:27 -0700942 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700943
944 start = page_address(page_info->page) + page_info->page_offset;
945 prefetch(start);
946
947 /* Copy data in the first descriptor of this completion */
948 curr_frag_len = min(pktsize, rx_frag_size);
949
950 /* Copy the header portion into skb_data */
951 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
952 memcpy(skb->data, start, hdr_len);
953 skb->len = curr_frag_len;
954 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
955 /* Complete packet has now been moved to data */
956 put_page(page_info->page);
957 skb->data_len = 0;
958 skb->tail += curr_frag_len;
959 } else {
960 skb_shinfo(skb)->nr_frags = 1;
961 skb_shinfo(skb)->frags[0].page = page_info->page;
962 skb_shinfo(skb)->frags[0].page_offset =
963 page_info->page_offset + hdr_len;
964 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
965 skb->data_len = curr_frag_len - hdr_len;
966 skb->tail += hdr_len;
967 }
Ajit Khaparde205859a2010-02-09 01:34:21 +0000968 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700969
970 if (pktsize <= rx_frag_size) {
971 BUG_ON(num_rcvd != 1);
Sathya Perla76fbb422009-06-10 02:21:56 +0000972 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700973 }
974
975 /* More frags present for this completion */
Ajit Khapardefa774062009-07-22 09:28:55 -0700976 size = pktsize;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000977 for (i = 1, j = 0; i < num_rcvd; i++) {
Ajit Khapardefa774062009-07-22 09:28:55 -0700978 size -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700979 index_inc(&rxq_idx, rxq->len);
Sathya Perla3abcded2010-10-03 22:12:27 -0700980 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700981
Ajit Khapardefa774062009-07-22 09:28:55 -0700982 curr_frag_len = min(size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700983
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000984 /* Coalesce all frags from the same physical page in one slot */
985 if (page_info->page_offset == 0) {
986 /* Fresh page */
987 j++;
988 skb_shinfo(skb)->frags[j].page = page_info->page;
989 skb_shinfo(skb)->frags[j].page_offset =
990 page_info->page_offset;
991 skb_shinfo(skb)->frags[j].size = 0;
992 skb_shinfo(skb)->nr_frags++;
993 } else {
994 put_page(page_info->page);
995 }
996
997 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700998 skb->len += curr_frag_len;
999 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001000
Ajit Khaparde205859a2010-02-09 01:34:21 +00001001 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001002 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001003 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001004
Sathya Perla76fbb422009-06-10 02:21:56 +00001005done:
Sathya Perla3abcded2010-10-03 22:12:27 -07001006 be_rx_stats_update(rxo, pktsize, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001007}
1008
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001009/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001010static void be_rx_compl_process(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001011 struct be_rx_obj *rxo,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001012 struct be_eth_rx_compl *rxcp)
1013{
1014 struct sk_buff *skb;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001015 u32 vlanf, vid;
Sathya Perla89420422010-02-17 01:35:26 +00001016 u16 num_rcvd;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001017 u8 vtm;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001018
Sathya Perla89420422010-02-17 01:35:26 +00001019 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
Sathya Perla89420422010-02-17 01:35:26 +00001020
Eric Dumazet89d71a62009-10-13 05:34:20 +00001021 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001022 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001023 if (net_ratelimit())
1024 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
Sathya Perla3abcded2010-10-03 22:12:27 -07001025 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001026 return;
1027 }
1028
Sathya Perla3abcded2010-10-03 22:12:27 -07001029 skb_fill_rx_data(adapter, rxo, skb, rxcp, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001030
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001031 if (likely(adapter->rx_csum && csum_passed(rxcp)))
Ajit Khaparde728a9972009-04-13 15:41:22 -07001032 skb->ip_summed = CHECKSUM_UNNECESSARY;
Somnath Koturc6ce2f42010-10-25 01:11:58 +00001033 else
1034 skb_checksum_none_assert(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001035
1036 skb->truesize = skb->len + sizeof(struct sk_buff);
1037 skb->protocol = eth_type_trans(skb, adapter->netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001038
Sathya Perlaa058a632010-02-17 01:34:22 +00001039 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1040 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1041
1042 /* vlanf could be wrongly set in some cards.
1043 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001044 if ((adapter->function_mode & 0x400) && !vtm)
Sathya Perlaa058a632010-02-17 01:34:22 +00001045 vlanf = 0;
1046
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001047 if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
1048 vlanf = 0;
1049
Sathya Perlaa058a632010-02-17 01:34:22 +00001050 if (unlikely(vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001051 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001052 kfree_skb(skb);
1053 return;
1054 }
1055 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001056 if (!lancer_chip(adapter))
1057 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1059 } else {
1060 netif_receive_skb(skb);
1061 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001062}
1063
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001064/* Process the RX completion indicated by rxcp when GRO is enabled */
1065static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla3abcded2010-10-03 22:12:27 -07001066 struct be_rx_obj *rxo,
1067 struct be_eth_rx_compl *rxcp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001068{
1069 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001070 struct sk_buff *skb = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001071 struct be_queue_info *rxq = &rxo->q;
1072 struct be_eq_obj *eq_obj = &rxo->rx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001073 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001074 u16 i, rxq_idx = 0, vid, j;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001075 u8 vtm;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001076 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001077
1078 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1079 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1080 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1081 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001082 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001083 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001084
1085 /* vlanf could be wrongly set in some cards.
1086 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001087 if ((adapter->function_mode & 0x400) && !vtm)
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001088 vlanf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089
Ajit Khaparde3968fa12011-02-20 11:41:53 +00001090 if ((adapter->pvid == vlanf) && !adapter->vlan_tag[vlanf])
1091 vlanf = 0;
1092
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001093 skb = napi_get_frags(&eq_obj->napi);
1094 if (!skb) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001095 be_rx_compl_discard(adapter, rxo, rxcp);
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001096 return;
1097 }
1098
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001099 remaining = pkt_size;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001100 for (i = 0, j = -1; i < num_rcvd; i++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001101 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001102
1103 curr_frag_len = min(remaining, rx_frag_size);
1104
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001105 /* Coalesce all frags from the same physical page in one slot */
1106 if (i == 0 || page_info->page_offset == 0) {
1107 /* First frag or Fresh page */
1108 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001109 skb_shinfo(skb)->frags[j].page = page_info->page;
1110 skb_shinfo(skb)->frags[j].page_offset =
1111 page_info->page_offset;
1112 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001113 } else {
1114 put_page(page_info->page);
1115 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001116 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001117
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001118 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001119 index_inc(&rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001120 memset(page_info, 0, sizeof(*page_info));
1121 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001122 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001123
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001124 skb_shinfo(skb)->nr_frags = j + 1;
1125 skb->len = pkt_size;
1126 skb->data_len = pkt_size;
1127 skb->truesize += pkt_size;
1128 skb->ip_summed = CHECKSUM_UNNECESSARY;
1129
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130 if (likely(!vlanf)) {
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001131 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001132 } else {
1133 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001134 if (!lancer_chip(adapter))
1135 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001136
Ajit Khaparde82903e42010-02-09 01:34:57 +00001137 if (!adapter->vlan_grp || adapter->vlans_added == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001138 return;
1139
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001140 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001141 }
1142
Sathya Perla3abcded2010-10-03 22:12:27 -07001143 be_rx_stats_update(rxo, pkt_size, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001144}
1145
Sathya Perla3abcded2010-10-03 22:12:27 -07001146static struct be_eth_rx_compl *be_rx_compl_get(struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001147{
Sathya Perla3abcded2010-10-03 22:12:27 -07001148 struct be_eth_rx_compl *rxcp = queue_tail_node(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001149
1150 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1151 return NULL;
1152
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001153 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001154 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1155
Sathya Perla3abcded2010-10-03 22:12:27 -07001156 queue_tail_inc(&rxo->cq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001157 return rxcp;
1158}
1159
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001160/* To reset the valid bit, we need to reset the whole word as
1161 * when walking the queue the valid entries are little-endian
1162 * and invalid entries are host endian
1163 */
1164static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1165{
1166 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1167}
1168
Eric Dumazet1829b082011-03-01 05:48:12 +00001169static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001170{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001171 u32 order = get_order(size);
Eric Dumazet1829b082011-03-01 05:48:12 +00001172
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001173 if (order > 0)
Eric Dumazet1829b082011-03-01 05:48:12 +00001174 gfp |= __GFP_COMP;
1175 return alloc_pages(gfp, order);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001176}
1177
1178/*
1179 * Allocate a page, split it to fragments of size rx_frag_size and post as
1180 * receive buffers to BE
1181 */
Eric Dumazet1829b082011-03-01 05:48:12 +00001182static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001183{
Sathya Perla3abcded2010-10-03 22:12:27 -07001184 struct be_adapter *adapter = rxo->adapter;
1185 struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001186 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla3abcded2010-10-03 22:12:27 -07001187 struct be_queue_info *rxq = &rxo->q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001188 struct page *pagep = NULL;
1189 struct be_eth_rx_d *rxd;
1190 u64 page_dmaaddr = 0, frag_dmaaddr;
1191 u32 posted, page_offset = 0;
1192
Sathya Perla3abcded2010-10-03 22:12:27 -07001193 page_info = &rxo->page_info_tbl[rxq->head];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001194 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1195 if (!pagep) {
Eric Dumazet1829b082011-03-01 05:48:12 +00001196 pagep = be_alloc_pages(adapter->big_page_size, gfp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001197 if (unlikely(!pagep)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001198 rxo->stats.rx_post_fail++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001199 break;
1200 }
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001201 page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1202 0, adapter->big_page_size,
1203 DMA_FROM_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001204 page_info->page_offset = 0;
1205 } else {
1206 get_page(pagep);
1207 page_info->page_offset = page_offset + rx_frag_size;
1208 }
1209 page_offset = page_info->page_offset;
1210 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001211 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001212 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1213
1214 rxd = queue_head_node(rxq);
1215 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1216 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001217
1218 /* Any space left in the current big page for another frag? */
1219 if ((page_offset + rx_frag_size + rx_frag_size) >
1220 adapter->big_page_size) {
1221 pagep = NULL;
1222 page_info->last_page_user = true;
1223 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001224
1225 prev_page_info = page_info;
1226 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001227 page_info = &page_info_tbl[rxq->head];
1228 }
1229 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001230 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001231
1232 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001233 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001234 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001235 } else if (atomic_read(&rxq->used) == 0) {
1236 /* Let be_worker replenish when memory is available */
Sathya Perla3abcded2010-10-03 22:12:27 -07001237 rxo->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001238 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001239}
1240
Sathya Perla5fb379e2009-06-18 00:02:59 +00001241static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001242{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001243 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1244
1245 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1246 return NULL;
1247
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001248 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001249 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1250
1251 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1252
1253 queue_tail_inc(tx_cq);
1254 return txcp;
1255}
1256
1257static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1258{
1259 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001260 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001261 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1262 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001263 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1264 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001265
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001266 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001267 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001268 sent_skbs[txq->tail] = NULL;
1269
1270 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001271 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001272
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001273 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001274 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001275 wrb = queue_tail_node(txq);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00001276 unmap_tx_frag(&adapter->pdev->dev, wrb,
1277 (unmap_skb_hdr && skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001278 unmap_skb_hdr = false;
1279
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001280 num_wrbs++;
1281 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001282 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001283
1284 atomic_sub(num_wrbs, &txq->used);
Alexander Duycka73b7962009-12-02 16:48:18 +00001285
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001286 kfree_skb(sent_skb);
1287}
1288
Sathya Perla859b1e42009-08-10 03:43:51 +00001289static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1290{
1291 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1292
1293 if (!eqe->evt)
1294 return NULL;
1295
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001296 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001297 eqe->evt = le32_to_cpu(eqe->evt);
1298 queue_tail_inc(&eq_obj->q);
1299 return eqe;
1300}
1301
1302static int event_handle(struct be_adapter *adapter,
1303 struct be_eq_obj *eq_obj)
1304{
1305 struct be_eq_entry *eqe;
1306 u16 num = 0;
1307
1308 while ((eqe = event_get(eq_obj)) != NULL) {
1309 eqe->evt = 0;
1310 num++;
1311 }
1312
1313 /* Deal with any spurious interrupts that come
1314 * without events
1315 */
1316 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1317 if (num)
1318 napi_schedule(&eq_obj->napi);
1319
1320 return num;
1321}
1322
1323/* Just read and notify events without processing them.
1324 * Used at the time of destroying event queues */
1325static void be_eq_clean(struct be_adapter *adapter,
1326 struct be_eq_obj *eq_obj)
1327{
1328 struct be_eq_entry *eqe;
1329 u16 num = 0;
1330
1331 while ((eqe = event_get(eq_obj)) != NULL) {
1332 eqe->evt = 0;
1333 num++;
1334 }
1335
1336 if (num)
1337 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1338}
1339
Sathya Perla3abcded2010-10-03 22:12:27 -07001340static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001341{
1342 struct be_rx_page_info *page_info;
Sathya Perla3abcded2010-10-03 22:12:27 -07001343 struct be_queue_info *rxq = &rxo->q;
1344 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001345 struct be_eth_rx_compl *rxcp;
1346 u16 tail;
1347
1348 /* First cleanup pending rx completions */
Sathya Perla3abcded2010-10-03 22:12:27 -07001349 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1350 be_rx_compl_discard(adapter, rxo, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001351 be_rx_compl_reset(rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001352 be_cq_notify(adapter, rx_cq->id, false, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001353 }
1354
1355 /* Then free posted rx buffer that were not used */
1356 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001357 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001358 page_info = get_rx_page_info(adapter, rxo, tail);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359 put_page(page_info->page);
1360 memset(page_info, 0, sizeof(*page_info));
1361 }
1362 BUG_ON(atomic_read(&rxq->used));
1363}
1364
Sathya Perlaa8e91792009-08-10 03:42:43 +00001365static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001366{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001367 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001368 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001369 struct be_eth_tx_compl *txcp;
1370 u16 end_idx, cmpl = 0, timeo = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001371 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1372 struct sk_buff *sent_skb;
1373 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001374
Sathya Perlaa8e91792009-08-10 03:42:43 +00001375 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1376 do {
1377 while ((txcp = be_tx_compl_get(tx_cq))) {
1378 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1379 wrb_index, txcp);
1380 be_tx_compl_process(adapter, end_idx);
1381 cmpl++;
1382 }
1383 if (cmpl) {
1384 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1385 cmpl = 0;
1386 }
1387
1388 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1389 break;
1390
1391 mdelay(1);
1392 } while (true);
1393
1394 if (atomic_read(&txq->used))
1395 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1396 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001397
1398 /* free posted tx for which compls will never arrive */
1399 while (atomic_read(&txq->used)) {
1400 sent_skb = sent_skbs[txq->tail];
1401 end_idx = txq->tail;
1402 index_adv(&end_idx,
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001403 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1404 txq->len);
Sathya Perlab03388d2010-02-18 00:37:17 +00001405 be_tx_compl_process(adapter, end_idx);
1406 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001407}
1408
Sathya Perla5fb379e2009-06-18 00:02:59 +00001409static void be_mcc_queues_destroy(struct be_adapter *adapter)
1410{
1411 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001412
Sathya Perla8788fdc2009-07-27 22:52:03 +00001413 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001414 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001415 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001416 be_queue_free(adapter, q);
1417
Sathya Perla8788fdc2009-07-27 22:52:03 +00001418 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001419 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001420 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001421 be_queue_free(adapter, q);
1422}
1423
1424/* Must be called only after TX qs are created as MCC shares TX EQ */
1425static int be_mcc_queues_create(struct be_adapter *adapter)
1426{
1427 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001428
1429 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001430 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001431 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001432 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001433 goto err;
1434
1435 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001436 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001437 goto mcc_cq_free;
1438
1439 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001440 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001441 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1442 goto mcc_cq_destroy;
1443
1444 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001445 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001446 goto mcc_q_free;
1447
1448 return 0;
1449
1450mcc_q_free:
1451 be_queue_free(adapter, q);
1452mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001453 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001454mcc_cq_free:
1455 be_queue_free(adapter, cq);
1456err:
1457 return -1;
1458}
1459
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001460static void be_tx_queues_destroy(struct be_adapter *adapter)
1461{
1462 struct be_queue_info *q;
1463
1464 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001465 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001466 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467 be_queue_free(adapter, q);
1468
1469 q = &adapter->tx_obj.cq;
1470 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001471 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001472 be_queue_free(adapter, q);
1473
Sathya Perla859b1e42009-08-10 03:43:51 +00001474 /* Clear any residual events */
1475 be_eq_clean(adapter, &adapter->tx_eq);
1476
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001477 q = &adapter->tx_eq.q;
1478 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001479 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001480 be_queue_free(adapter, q);
1481}
1482
1483static int be_tx_queues_create(struct be_adapter *adapter)
1484{
1485 struct be_queue_info *eq, *q, *cq;
1486
1487 adapter->tx_eq.max_eqd = 0;
1488 adapter->tx_eq.min_eqd = 0;
1489 adapter->tx_eq.cur_eqd = 96;
1490 adapter->tx_eq.enable_aic = false;
1491 /* Alloc Tx Event queue */
1492 eq = &adapter->tx_eq.q;
1493 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1494 return -1;
1495
1496 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001497 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001498 goto tx_eq_free;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001499
1500 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1501
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001502
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001503 /* Alloc TX eth compl queue */
1504 cq = &adapter->tx_obj.cq;
1505 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1506 sizeof(struct be_eth_tx_compl)))
1507 goto tx_eq_destroy;
1508
1509 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001510 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001511 goto tx_cq_free;
1512
1513 /* Alloc TX eth queue */
1514 q = &adapter->tx_obj.q;
1515 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1516 goto tx_cq_destroy;
1517
1518 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001519 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001520 goto tx_q_free;
1521 return 0;
1522
1523tx_q_free:
1524 be_queue_free(adapter, q);
1525tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001526 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001527tx_cq_free:
1528 be_queue_free(adapter, cq);
1529tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001530 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001531tx_eq_free:
1532 be_queue_free(adapter, eq);
1533 return -1;
1534}
1535
1536static void be_rx_queues_destroy(struct be_adapter *adapter)
1537{
1538 struct be_queue_info *q;
Sathya Perla3abcded2010-10-03 22:12:27 -07001539 struct be_rx_obj *rxo;
1540 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001541
Sathya Perla3abcded2010-10-03 22:12:27 -07001542 for_all_rx_queues(adapter, rxo, i) {
1543 q = &rxo->q;
1544 if (q->created) {
1545 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
1546 /* After the rxq is invalidated, wait for a grace time
1547 * of 1ms for all dma to end and the flush compl to
1548 * arrive
1549 */
1550 mdelay(1);
1551 be_rx_q_clean(adapter, rxo);
1552 }
1553 be_queue_free(adapter, q);
Sathya Perla89420422010-02-17 01:35:26 +00001554
Sathya Perla3abcded2010-10-03 22:12:27 -07001555 q = &rxo->cq;
1556 if (q->created)
1557 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1558 be_queue_free(adapter, q);
1559
1560 /* Clear any residual events */
1561 q = &rxo->rx_eq.q;
1562 if (q->created) {
1563 be_eq_clean(adapter, &rxo->rx_eq);
1564 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1565 }
1566 be_queue_free(adapter, q);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001567 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001568}
1569
1570static int be_rx_queues_create(struct be_adapter *adapter)
1571{
1572 struct be_queue_info *eq, *q, *cq;
Sathya Perla3abcded2010-10-03 22:12:27 -07001573 struct be_rx_obj *rxo;
1574 int rc, i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001575
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001576 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001577 for_all_rx_queues(adapter, rxo, i) {
1578 rxo->adapter = adapter;
1579 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1580 rxo->rx_eq.enable_aic = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001581
Sathya Perla3abcded2010-10-03 22:12:27 -07001582 /* EQ */
1583 eq = &rxo->rx_eq.q;
1584 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1585 sizeof(struct be_eq_entry));
1586 if (rc)
1587 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001588
Sathya Perla3abcded2010-10-03 22:12:27 -07001589 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1590 if (rc)
1591 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001592
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001593 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1594
Sathya Perla3abcded2010-10-03 22:12:27 -07001595 /* CQ */
1596 cq = &rxo->cq;
1597 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1598 sizeof(struct be_eth_rx_compl));
1599 if (rc)
1600 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601
Sathya Perla3abcded2010-10-03 22:12:27 -07001602 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1603 if (rc)
1604 goto err;
Sathya Perla3abcded2010-10-03 22:12:27 -07001605 /* Rx Q */
1606 q = &rxo->q;
1607 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1608 sizeof(struct be_eth_rx_d));
1609 if (rc)
1610 goto err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001611
Sathya Perla3abcded2010-10-03 22:12:27 -07001612 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
1613 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle,
1614 (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
1615 if (rc)
1616 goto err;
1617 }
1618
1619 if (be_multi_rxq(adapter)) {
1620 u8 rsstable[MAX_RSS_QS];
1621
1622 for_all_rss_queues(adapter, rxo, i)
1623 rsstable[i] = rxo->rss_id;
1624
1625 rc = be_cmd_rss_config(adapter, rsstable,
1626 adapter->num_rx_qs - 1);
1627 if (rc)
1628 goto err;
1629 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630
1631 return 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07001632err:
1633 be_rx_queues_destroy(adapter);
1634 return -1;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001636
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001637static bool event_peek(struct be_eq_obj *eq_obj)
Sathya Perlab628bde2009-08-17 00:58:26 +00001638{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001639 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1640 if (!eqe->evt)
1641 return false;
1642 else
1643 return true;
Sathya Perlab628bde2009-08-17 00:58:26 +00001644}
1645
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001646static irqreturn_t be_intx(int irq, void *dev)
1647{
1648 struct be_adapter *adapter = dev;
Sathya Perla3abcded2010-10-03 22:12:27 -07001649 struct be_rx_obj *rxo;
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001650 int isr, i, tx = 0 , rx = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001652 if (lancer_chip(adapter)) {
1653 if (event_peek(&adapter->tx_eq))
1654 tx = event_handle(adapter, &adapter->tx_eq);
1655 for_all_rx_queues(adapter, rxo, i) {
1656 if (event_peek(&rxo->rx_eq))
1657 rx |= event_handle(adapter, &rxo->rx_eq);
1658 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001659
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001660 if (!(tx || rx))
1661 return IRQ_NONE;
Sathya Perla3abcded2010-10-03 22:12:27 -07001662
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001663 } else {
1664 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1665 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1666 if (!isr)
1667 return IRQ_NONE;
1668
1669 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1670 event_handle(adapter, &adapter->tx_eq);
1671
1672 for_all_rx_queues(adapter, rxo, i) {
1673 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1674 event_handle(adapter, &rxo->rx_eq);
1675 }
Sathya Perla3abcded2010-10-03 22:12:27 -07001676 }
Sathya Perlac001c212009-07-01 01:06:07 +00001677
Sathya Perla8788fdc2009-07-27 22:52:03 +00001678 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001679}
1680
1681static irqreturn_t be_msix_rx(int irq, void *dev)
1682{
Sathya Perla3abcded2010-10-03 22:12:27 -07001683 struct be_rx_obj *rxo = dev;
1684 struct be_adapter *adapter = rxo->adapter;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001685
Sathya Perla3abcded2010-10-03 22:12:27 -07001686 event_handle(adapter, &rxo->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001687
1688 return IRQ_HANDLED;
1689}
1690
Sathya Perla5fb379e2009-06-18 00:02:59 +00001691static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001692{
1693 struct be_adapter *adapter = dev;
1694
Sathya Perla8788fdc2009-07-27 22:52:03 +00001695 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001696
1697 return IRQ_HANDLED;
1698}
1699
Sathya Perla64642812010-12-01 01:04:17 +00001700static inline bool do_gro(struct be_rx_obj *rxo,
1701 struct be_eth_rx_compl *rxcp, u8 err)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001702{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001703 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1704
1705 if (err)
Sathya Perla3abcded2010-10-03 22:12:27 -07001706 rxo->stats.rxcp_err++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001707
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001708 return (tcp_frame && !err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709}
1710
stephen hemminger49b05222010-10-21 07:50:48 +00001711static int be_poll_rx(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001712{
1713 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
Sathya Perla3abcded2010-10-03 22:12:27 -07001714 struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1715 struct be_adapter *adapter = rxo->adapter;
1716 struct be_queue_info *rx_cq = &rxo->cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717 struct be_eth_rx_compl *rxcp;
1718 u32 work_done;
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001719 u16 num_rcvd;
Sathya Perla64642812010-12-01 01:04:17 +00001720 u8 err;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001721
Sathya Perla3abcded2010-10-03 22:12:27 -07001722 rxo->stats.rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001723 for (work_done = 0; work_done < budget; work_done++) {
Sathya Perla3abcded2010-10-03 22:12:27 -07001724 rxcp = be_rx_compl_get(rxo);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001725 if (!rxcp)
1726 break;
1727
Sathya Perla64642812010-12-01 01:04:17 +00001728 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
Sathya Perla64642812010-12-01 01:04:17 +00001729 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1730 rxcp);
Padmanabh Ratnakare80d9da2011-03-07 03:07:58 +00001731 /* Ignore flush completions */
1732 if (num_rcvd) {
Sathya Perla64642812010-12-01 01:04:17 +00001733 if (do_gro(rxo, rxcp, err))
1734 be_rx_compl_process_gro(adapter, rxo, rxcp);
1735 else
1736 be_rx_compl_process(adapter, rxo, rxcp);
1737 }
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001738
1739 be_rx_compl_reset(rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001740 }
1741
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742 /* Refill the queue */
Sathya Perla3abcded2010-10-03 22:12:27 -07001743 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
Eric Dumazet1829b082011-03-01 05:48:12 +00001744 be_post_rx_frags(rxo, GFP_ATOMIC);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001745
1746 /* All consumed */
1747 if (work_done < budget) {
1748 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001749 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001750 } else {
1751 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001752 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001753 }
1754 return work_done;
1755}
1756
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001757/* As TX and MCC share the same EQ check for both TX and MCC completions.
1758 * For TX/MCC we don't honour budget; consume everything
1759 */
1760static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001761{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001762 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1763 struct be_adapter *adapter =
1764 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001765 struct be_queue_info *txq = &adapter->tx_obj.q;
1766 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001767 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001768 int tx_compl = 0, mcc_compl, status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001769 u16 end_idx;
1770
Sathya Perla5fb379e2009-06-18 00:02:59 +00001771 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001772 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001773 wrb_index, txcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001774 be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001775 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001776 }
1777
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001778 mcc_compl = be_process_mcc(adapter, &status);
1779
1780 napi_complete(napi);
1781
1782 if (mcc_compl) {
1783 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1784 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1785 }
1786
1787 if (tx_compl) {
1788 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001789
1790 /* As Tx wrbs have been freed up, wake up netdev queue if
1791 * it was stopped due to lack of tx wrbs.
1792 */
1793 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001794 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001795 netif_wake_queue(adapter->netdev);
1796 }
1797
Sathya Perla3abcded2010-10-03 22:12:27 -07001798 tx_stats(adapter)->be_tx_events++;
1799 tx_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001800 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001801
1802 return 1;
1803}
1804
Ajit Khaparded053de92010-09-03 06:23:30 +00001805void be_detect_dump_ue(struct be_adapter *adapter)
Ajit Khaparde7c185272010-07-29 06:16:33 +00001806{
1807 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1808 u32 i;
1809
1810 pci_read_config_dword(adapter->pdev,
1811 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1812 pci_read_config_dword(adapter->pdev,
1813 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1814 pci_read_config_dword(adapter->pdev,
1815 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1816 pci_read_config_dword(adapter->pdev,
1817 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1818
1819 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1820 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1821
Ajit Khaparded053de92010-09-03 06:23:30 +00001822 if (ue_status_lo || ue_status_hi) {
1823 adapter->ue_detected = true;
Ajit Khaparde7acc2082011-02-11 13:38:17 +00001824 adapter->eeh_err = true;
Ajit Khaparded053de92010-09-03 06:23:30 +00001825 dev_err(&adapter->pdev->dev, "UE Detected!!\n");
1826 }
1827
Ajit Khaparde7c185272010-07-29 06:16:33 +00001828 if (ue_status_lo) {
1829 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1830 if (ue_status_lo & 1)
1831 dev_err(&adapter->pdev->dev,
1832 "UE: %s bit set\n", ue_status_low_desc[i]);
1833 }
1834 }
1835 if (ue_status_hi) {
1836 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1837 if (ue_status_hi & 1)
1838 dev_err(&adapter->pdev->dev,
1839 "UE: %s bit set\n", ue_status_hi_desc[i]);
1840 }
1841 }
1842
1843}
1844
Sathya Perlaea1dae12009-03-19 23:56:20 -07001845static void be_worker(struct work_struct *work)
1846{
1847 struct be_adapter *adapter =
1848 container_of(work, struct be_adapter, work.work);
Sathya Perla3abcded2010-10-03 22:12:27 -07001849 struct be_rx_obj *rxo;
1850 int i;
Sathya Perlaea1dae12009-03-19 23:56:20 -07001851
Somnath Koturf203af72010-10-25 23:01:03 +00001852 /* when interrupts are not yet enabled, just reap any pending
1853 * mcc completions */
1854 if (!netif_running(adapter->netdev)) {
1855 int mcc_compl, status = 0;
1856
1857 mcc_compl = be_process_mcc(adapter, &status);
1858
1859 if (mcc_compl) {
1860 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1861 be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
1862 }
Ajit Khaparde9b037f32011-02-11 13:38:29 +00001863
1864 if (!adapter->ue_detected && !lancer_chip(adapter))
1865 be_detect_dump_ue(adapter);
1866
Somnath Koturf203af72010-10-25 23:01:03 +00001867 goto reschedule;
1868 }
1869
Ajit Khapardeb2aebe62011-02-20 11:41:39 +00001870 if (!adapter->stats_cmd_sent)
Sathya Perla3abcded2010-10-03 22:12:27 -07001871 be_cmd_get_stats(adapter, &adapter->stats_cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001872
Sathya Perla4097f662009-03-24 16:40:13 -07001873 be_tx_rate_update(adapter);
Sathya Perla4097f662009-03-24 16:40:13 -07001874
Sathya Perla3abcded2010-10-03 22:12:27 -07001875 for_all_rx_queues(adapter, rxo, i) {
1876 be_rx_rate_update(rxo);
1877 be_rx_eqd_update(adapter, rxo);
1878
1879 if (rxo->rx_post_starved) {
1880 rxo->rx_post_starved = false;
Eric Dumazet1829b082011-03-01 05:48:12 +00001881 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07001882 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001883 }
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001884 if (!adapter->ue_detected && !lancer_chip(adapter))
Ajit Khaparded053de92010-09-03 06:23:30 +00001885 be_detect_dump_ue(adapter);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001886
Somnath Koturf203af72010-10-25 23:01:03 +00001887reschedule:
Sathya Perlaea1dae12009-03-19 23:56:20 -07001888 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1889}
1890
Sathya Perla8d56ff12009-11-22 22:02:26 +00001891static void be_msix_disable(struct be_adapter *adapter)
1892{
1893 if (adapter->msix_enabled) {
1894 pci_disable_msix(adapter->pdev);
1895 adapter->msix_enabled = false;
1896 }
1897}
1898
Sathya Perla3abcded2010-10-03 22:12:27 -07001899static int be_num_rxqs_get(struct be_adapter *adapter)
1900{
1901 if (multi_rxq && (adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1902 !adapter->sriov_enabled && !(adapter->function_mode & 0x400)) {
1903 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1904 } else {
1905 dev_warn(&adapter->pdev->dev,
1906 "No support for multiple RX queues\n");
1907 return 1;
1908 }
1909}
1910
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001911static void be_msix_enable(struct be_adapter *adapter)
1912{
Sathya Perla3abcded2010-10-03 22:12:27 -07001913#define BE_MIN_MSIX_VECTORS (1 + 1) /* Rx + Tx */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001914 int i, status;
1915
Sathya Perla3abcded2010-10-03 22:12:27 -07001916 adapter->num_rx_qs = be_num_rxqs_get(adapter);
1917
1918 for (i = 0; i < (adapter->num_rx_qs + 1); i++)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001919 adapter->msix_entries[i].entry = i;
1920
1921 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
Sathya Perla3abcded2010-10-03 22:12:27 -07001922 adapter->num_rx_qs + 1);
1923 if (status == 0) {
1924 goto done;
1925 } else if (status >= BE_MIN_MSIX_VECTORS) {
1926 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
1927 status) == 0) {
1928 adapter->num_rx_qs = status - 1;
1929 dev_warn(&adapter->pdev->dev,
1930 "Could alloc only %d MSIx vectors. "
1931 "Using %d RX Qs\n", status, adapter->num_rx_qs);
1932 goto done;
1933 }
1934 }
1935 return;
1936done:
1937 adapter->msix_enabled = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001938}
1939
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001940static void be_sriov_enable(struct be_adapter *adapter)
1941{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00001942 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001943#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001944 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001945 int status;
1946
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001947 status = pci_enable_sriov(adapter->pdev, num_vfs);
1948 adapter->sriov_enabled = status ? false : true;
1949 }
1950#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001951}
1952
1953static void be_sriov_disable(struct be_adapter *adapter)
1954{
1955#ifdef CONFIG_PCI_IOV
1956 if (adapter->sriov_enabled) {
1957 pci_disable_sriov(adapter->pdev);
1958 adapter->sriov_enabled = false;
1959 }
1960#endif
1961}
1962
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001963static inline int be_msix_vec_get(struct be_adapter *adapter,
1964 struct be_eq_obj *eq_obj)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001965{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001966 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
Sathya Perlab628bde2009-08-17 00:58:26 +00001967}
1968
1969static int be_request_irq(struct be_adapter *adapter,
1970 struct be_eq_obj *eq_obj,
Sathya Perla3abcded2010-10-03 22:12:27 -07001971 void *handler, char *desc, void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001972{
1973 struct net_device *netdev = adapter->netdev;
1974 int vec;
1975
1976 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001977 vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001978 return request_irq(vec, handler, 0, eq_obj->desc, context);
Sathya Perlab628bde2009-08-17 00:58:26 +00001979}
1980
Sathya Perla3abcded2010-10-03 22:12:27 -07001981static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1982 void *context)
Sathya Perlab628bde2009-08-17 00:58:26 +00001983{
Sathya Perlafe6d2a32010-11-21 23:25:50 +00001984 int vec = be_msix_vec_get(adapter, eq_obj);
Sathya Perla3abcded2010-10-03 22:12:27 -07001985 free_irq(vec, context);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001986}
1987
1988static int be_msix_register(struct be_adapter *adapter)
1989{
Sathya Perla3abcded2010-10-03 22:12:27 -07001990 struct be_rx_obj *rxo;
1991 int status, i;
1992 char qname[10];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001993
Sathya Perla3abcded2010-10-03 22:12:27 -07001994 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
1995 adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001996 if (status)
1997 goto err;
1998
Sathya Perla3abcded2010-10-03 22:12:27 -07001999 for_all_rx_queues(adapter, rxo, i) {
2000 sprintf(qname, "rxq%d", i);
2001 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2002 qname, rxo);
2003 if (status)
2004 goto err_msix;
2005 }
Sathya Perlab628bde2009-08-17 00:58:26 +00002006
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002007 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00002008
Sathya Perla3abcded2010-10-03 22:12:27 -07002009err_msix:
2010 be_free_irq(adapter, &adapter->tx_eq, adapter);
2011
2012 for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2013 be_free_irq(adapter, &rxo->rx_eq, rxo);
2014
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002015err:
2016 dev_warn(&adapter->pdev->dev,
2017 "MSIX Request IRQ failed - err %d\n", status);
2018 pci_disable_msix(adapter->pdev);
2019 adapter->msix_enabled = false;
2020 return status;
2021}
2022
2023static int be_irq_register(struct be_adapter *adapter)
2024{
2025 struct net_device *netdev = adapter->netdev;
2026 int status;
2027
2028 if (adapter->msix_enabled) {
2029 status = be_msix_register(adapter);
2030 if (status == 0)
2031 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002032 /* INTx is not supported for VF */
2033 if (!be_physfn(adapter))
2034 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035 }
2036
2037 /* INTx */
2038 netdev->irq = adapter->pdev->irq;
2039 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2040 adapter);
2041 if (status) {
2042 dev_err(&adapter->pdev->dev,
2043 "INTx request IRQ failed - err %d\n", status);
2044 return status;
2045 }
2046done:
2047 adapter->isr_registered = true;
2048 return 0;
2049}
2050
2051static void be_irq_unregister(struct be_adapter *adapter)
2052{
2053 struct net_device *netdev = adapter->netdev;
Sathya Perla3abcded2010-10-03 22:12:27 -07002054 struct be_rx_obj *rxo;
2055 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056
2057 if (!adapter->isr_registered)
2058 return;
2059
2060 /* INTx */
2061 if (!adapter->msix_enabled) {
2062 free_irq(netdev->irq, adapter);
2063 goto done;
2064 }
2065
2066 /* MSIx */
Sathya Perla3abcded2010-10-03 22:12:27 -07002067 be_free_irq(adapter, &adapter->tx_eq, adapter);
2068
2069 for_all_rx_queues(adapter, rxo, i)
2070 be_free_irq(adapter, &rxo->rx_eq, rxo);
2071
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002072done:
2073 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074}
2075
Sathya Perla889cd4b2010-05-30 23:33:45 +00002076static int be_close(struct net_device *netdev)
2077{
2078 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002079 struct be_rx_obj *rxo;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002080 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002081 int vec, i;
Sathya Perla889cd4b2010-05-30 23:33:45 +00002082
Sathya Perla889cd4b2010-05-30 23:33:45 +00002083 be_async_mcc_disable(adapter);
2084
2085 netif_stop_queue(netdev);
2086 netif_carrier_off(netdev);
2087 adapter->link_up = false;
2088
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002089 if (!lancer_chip(adapter))
2090 be_intr_set(adapter, false);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002091
2092 if (adapter->msix_enabled) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002093 vec = be_msix_vec_get(adapter, tx_eq);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002094 synchronize_irq(vec);
Sathya Perla3abcded2010-10-03 22:12:27 -07002095
2096 for_all_rx_queues(adapter, rxo, i) {
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002097 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
Sathya Perla3abcded2010-10-03 22:12:27 -07002098 synchronize_irq(vec);
2099 }
Sathya Perla889cd4b2010-05-30 23:33:45 +00002100 } else {
2101 synchronize_irq(netdev->irq);
2102 }
2103 be_irq_unregister(adapter);
2104
Sathya Perla3abcded2010-10-03 22:12:27 -07002105 for_all_rx_queues(adapter, rxo, i)
2106 napi_disable(&rxo->rx_eq.napi);
2107
Sathya Perla889cd4b2010-05-30 23:33:45 +00002108 napi_disable(&tx_eq->napi);
2109
2110 /* Wait for all pending tx completions to arrive so that
2111 * all tx skbs are freed.
2112 */
2113 be_tx_compl_clean(adapter);
2114
2115 return 0;
2116}
2117
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002118static int be_open(struct net_device *netdev)
2119{
2120 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002121 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla3abcded2010-10-03 22:12:27 -07002122 struct be_rx_obj *rxo;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002123 bool link_up;
Sathya Perla3abcded2010-10-03 22:12:27 -07002124 int status, i;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002125 u8 mac_speed;
2126 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002127
Sathya Perla3abcded2010-10-03 22:12:27 -07002128 for_all_rx_queues(adapter, rxo, i) {
Eric Dumazet1829b082011-03-01 05:48:12 +00002129 be_post_rx_frags(rxo, GFP_KERNEL);
Sathya Perla3abcded2010-10-03 22:12:27 -07002130 napi_enable(&rxo->rx_eq.napi);
2131 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00002132 napi_enable(&tx_eq->napi);
2133
2134 be_irq_register(adapter);
2135
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002136 if (!lancer_chip(adapter))
2137 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002138
2139 /* The evt queues are created in unarmed state; arm them */
Sathya Perla3abcded2010-10-03 22:12:27 -07002140 for_all_rx_queues(adapter, rxo, i) {
2141 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2142 be_cq_notify(adapter, rxo->cq.id, true, 0);
2143 }
Sathya Perla8788fdc2009-07-27 22:52:03 +00002144 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002145
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002146 /* Now that interrupts are on we can process async mcc */
2147 be_async_mcc_enable(adapter);
2148
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002149 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2150 &link_speed);
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002151 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002152 goto err;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002153 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002154
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002155 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002156 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002157 if (status)
2158 goto err;
2159
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002160 status = be_cmd_set_flow_control(adapter,
2161 adapter->tx_fc, adapter->rx_fc);
2162 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002163 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002164 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002165
Sathya Perla889cd4b2010-05-30 23:33:45 +00002166 return 0;
2167err:
2168 be_close(adapter->netdev);
2169 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002170}
2171
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002172static int be_setup_wol(struct be_adapter *adapter, bool enable)
2173{
2174 struct be_dma_mem cmd;
2175 int status = 0;
2176 u8 mac[ETH_ALEN];
2177
2178 memset(mac, 0, ETH_ALEN);
2179
2180 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002181 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2182 GFP_KERNEL);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002183 if (cmd.va == NULL)
2184 return -1;
2185 memset(cmd.va, 0, cmd.size);
2186
2187 if (enable) {
2188 status = pci_write_config_dword(adapter->pdev,
2189 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2190 if (status) {
2191 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002192 "Could not enable Wake-on-lan\n");
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002193 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2194 cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002195 return status;
2196 }
2197 status = be_cmd_enable_magic_wol(adapter,
2198 adapter->netdev->dev_addr, &cmd);
2199 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2200 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2201 } else {
2202 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2203 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2204 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2205 }
2206
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002207 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002208 return status;
2209}
2210
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002211/*
2212 * Generate a seed MAC address from the PF MAC Address using jhash.
2213 * MAC Address for VFs are assigned incrementally starting from the seed.
2214 * These addresses are programmed in the ASIC by the PF and the VF driver
2215 * queries for the MAC address during its probe.
2216 */
2217static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2218{
2219 u32 vf = 0;
Sathya Perla3abcded2010-10-03 22:12:27 -07002220 int status = 0;
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002221 u8 mac[ETH_ALEN];
2222
2223 be_vf_eth_addr_generate(adapter, mac);
2224
2225 for (vf = 0; vf < num_vfs; vf++) {
2226 status = be_cmd_pmac_add(adapter, mac,
2227 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002228 &adapter->vf_cfg[vf].vf_pmac_id,
2229 vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002230 if (status)
2231 dev_err(&adapter->pdev->dev,
2232 "Mac address add failed for VF %d\n", vf);
2233 else
2234 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2235
2236 mac[5] += 1;
2237 }
2238 return status;
2239}
2240
2241static inline void be_vf_eth_addr_rem(struct be_adapter *adapter)
2242{
2243 u32 vf;
2244
2245 for (vf = 0; vf < num_vfs; vf++) {
2246 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
2247 be_cmd_pmac_del(adapter,
2248 adapter->vf_cfg[vf].vf_if_handle,
Ajit Khapardef8617e02011-02-11 13:36:37 +00002249 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002250 }
2251}
2252
Sathya Perla5fb379e2009-06-18 00:02:59 +00002253static int be_setup(struct be_adapter *adapter)
2254{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002255 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002256 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002257 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002258 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002259
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002260 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2261
2262 if (be_physfn(adapter)) {
2263 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2264 BE_IF_FLAGS_PROMISCUOUS |
2265 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2266 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
Sathya Perla3abcded2010-10-03 22:12:27 -07002267
2268 if (be_multi_rxq(adapter)) {
2269 cap_flags |= BE_IF_FLAGS_RSS;
2270 en_flags |= BE_IF_FLAGS_RSS;
2271 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002272 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002273
2274 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2275 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002276 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002277 if (status != 0)
2278 goto do_none;
2279
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002280 if (be_physfn(adapter)) {
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002281 if (adapter->sriov_enabled) {
2282 while (vf < num_vfs) {
2283 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED |
2284 BE_IF_FLAGS_BROADCAST;
2285 status = be_cmd_if_create(adapter, cap_flags,
2286 en_flags, mac, true,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002287 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002288 NULL, vf+1);
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002289 if (status) {
2290 dev_err(&adapter->pdev->dev,
2291 "Interface Create failed for VF %d\n",
2292 vf);
2293 goto if_destroy;
2294 }
2295 adapter->vf_cfg[vf].vf_pmac_id =
2296 BE_INVALID_PMAC_ID;
2297 vf++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002298 }
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002299 }
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002300 } else {
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002301 status = be_cmd_mac_addr_query(adapter, mac,
2302 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2303 if (!status) {
2304 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2305 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2306 }
2307 }
2308
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002309 status = be_tx_queues_create(adapter);
2310 if (status != 0)
2311 goto if_destroy;
2312
2313 status = be_rx_queues_create(adapter);
2314 if (status != 0)
2315 goto tx_qs_destroy;
2316
Sathya Perla5fb379e2009-06-18 00:02:59 +00002317 status = be_mcc_queues_create(adapter);
2318 if (status != 0)
2319 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002320
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002321 adapter->link_speed = -1;
2322
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002323 return 0;
2324
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002325 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002326rx_qs_destroy:
2327 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002328tx_qs_destroy:
2329 be_tx_queues_destroy(adapter);
2330if_destroy:
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002331 if (be_physfn(adapter) && adapter->sriov_enabled)
2332 for (vf = 0; vf < num_vfs; vf++)
2333 if (adapter->vf_cfg[vf].vf_if_handle)
2334 be_cmd_if_destroy(adapter,
Ajit Khaparde658681f2011-02-11 13:34:46 +00002335 adapter->vf_cfg[vf].vf_if_handle,
2336 vf + 1);
2337 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002338do_none:
2339 return status;
2340}
2341
Sathya Perla5fb379e2009-06-18 00:02:59 +00002342static int be_clear(struct be_adapter *adapter)
2343{
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002344 int vf;
2345
Ajit Khapardec99ac3e2011-02-11 13:35:02 +00002346 if (be_physfn(adapter) && adapter->sriov_enabled)
Ajit Khaparde6d87f5c2010-08-25 00:32:33 +00002347 be_vf_eth_addr_rem(adapter);
2348
Sathya Perla1a8887d2009-08-17 00:58:41 +00002349 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002350 be_rx_queues_destroy(adapter);
2351 be_tx_queues_destroy(adapter);
2352
Ajit Khaparde7ab8b0b2011-02-11 13:35:56 +00002353 if (be_physfn(adapter) && adapter->sriov_enabled)
2354 for (vf = 0; vf < num_vfs; vf++)
2355 if (adapter->vf_cfg[vf].vf_if_handle)
2356 be_cmd_if_destroy(adapter,
2357 adapter->vf_cfg[vf].vf_if_handle,
2358 vf + 1);
2359
Ajit Khaparde658681f2011-02-11 13:34:46 +00002360 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002361
Sathya Perla2243e2e2009-11-22 22:02:03 +00002362 /* tell fw we're done with firing cmds */
2363 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002364 return 0;
2365}
2366
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002367
Ajit Khaparde84517482009-09-04 03:12:16 +00002368#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002369static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002370 const u8 *p, u32 img_start, int image_size,
2371 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002372{
2373 u32 crc_offset;
2374 u8 flashed_crc[4];
2375 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002376
2377 crc_offset = hdr_size + img_start + image_size - 4;
2378
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002379 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002380
2381 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002382 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002383 if (status) {
2384 dev_err(&adapter->pdev->dev,
2385 "could not get crc from flash, not flashing redboot\n");
2386 return false;
2387 }
2388
2389 /*update redboot only if crc does not match*/
2390 if (!memcmp(flashed_crc, p, 4))
2391 return false;
2392 else
2393 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002394}
2395
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002396static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002397 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002398 struct be_dma_mem *flash_cmd, int num_of_images)
2399
Ajit Khaparde84517482009-09-04 03:12:16 +00002400{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002401 int status = 0, i, filehdr_size = 0;
2402 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002403 int num_bytes;
2404 const u8 *p = fw->data;
2405 struct be_cmd_write_flashrom *req = flash_cmd->va;
Joe Perches215faf92010-12-21 02:16:10 -08002406 const struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002407 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002408
Joe Perches215faf92010-12-21 02:16:10 -08002409 static const struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002410 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2411 FLASH_IMAGE_MAX_SIZE_g3},
2412 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2413 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2414 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2415 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2416 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2417 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2418 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2419 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2420 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2421 FLASH_IMAGE_MAX_SIZE_g3},
2422 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2423 FLASH_IMAGE_MAX_SIZE_g3},
2424 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002425 FLASH_IMAGE_MAX_SIZE_g3},
2426 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2427 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002428 };
Joe Perches215faf92010-12-21 02:16:10 -08002429 static const struct flash_comp gen2_flash_types[8] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002430 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2431 FLASH_IMAGE_MAX_SIZE_g2},
2432 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2433 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2434 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2435 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2436 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2437 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2438 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2439 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2440 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2441 FLASH_IMAGE_MAX_SIZE_g2},
2442 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2443 FLASH_IMAGE_MAX_SIZE_g2},
2444 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2445 FLASH_IMAGE_MAX_SIZE_g2}
2446 };
2447
2448 if (adapter->generation == BE_GEN3) {
2449 pflashcomp = gen3_flash_types;
2450 filehdr_size = sizeof(struct flash_file_hdr_g3);
Joe Perches215faf92010-12-21 02:16:10 -08002451 num_comp = ARRAY_SIZE(gen3_flash_types);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002452 } else {
2453 pflashcomp = gen2_flash_types;
2454 filehdr_size = sizeof(struct flash_file_hdr_g2);
Joe Perches215faf92010-12-21 02:16:10 -08002455 num_comp = ARRAY_SIZE(gen2_flash_types);
Ajit Khaparde84517482009-09-04 03:12:16 +00002456 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002457 for (i = 0; i < num_comp; i++) {
2458 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2459 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2460 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002461 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2462 (!be_flash_redboot(adapter, fw->data,
Ajit Khapardefae21a42011-02-11 13:37:42 +00002463 pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2464 (num_of_images * sizeof(struct image_hdr)))))
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002465 continue;
2466 p = fw->data;
2467 p += filehdr_size + pflashcomp[i].offset
2468 + (num_of_images * sizeof(struct image_hdr));
2469 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002470 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002471 total_bytes = pflashcomp[i].size;
2472 while (total_bytes) {
2473 if (total_bytes > 32*1024)
2474 num_bytes = 32*1024;
2475 else
2476 num_bytes = total_bytes;
2477 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002478
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002479 if (!total_bytes)
2480 flash_op = FLASHROM_OPER_FLASH;
2481 else
2482 flash_op = FLASHROM_OPER_SAVE;
2483 memcpy(req->params.data_buf, p, num_bytes);
2484 p += num_bytes;
2485 status = be_cmd_write_flashrom(adapter, flash_cmd,
2486 pflashcomp[i].optype, flash_op, num_bytes);
2487 if (status) {
2488 dev_err(&adapter->pdev->dev,
2489 "cmd to write to flash rom failed.\n");
2490 return -1;
2491 }
2492 yield();
Ajit Khaparde84517482009-09-04 03:12:16 +00002493 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002494 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002495 return 0;
2496}
2497
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002498static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2499{
2500 if (fhdr == NULL)
2501 return 0;
2502 if (fhdr->build[0] == '3')
2503 return BE_GEN3;
2504 else if (fhdr->build[0] == '2')
2505 return BE_GEN2;
2506 else
2507 return 0;
2508}
2509
Ajit Khaparde84517482009-09-04 03:12:16 +00002510int be_load_fw(struct be_adapter *adapter, u8 *func)
2511{
2512 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2513 const struct firmware *fw;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002514 struct flash_file_hdr_g2 *fhdr;
2515 struct flash_file_hdr_g3 *fhdr3;
2516 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002517 struct be_dma_mem flash_cmd;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002518 int status, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002519 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002520
Sarveshwar Bandid9efd2a2010-11-18 23:44:45 +00002521 if (!netif_running(adapter->netdev)) {
2522 dev_err(&adapter->pdev->dev,
2523 "Firmware load not allowed (interface is down)\n");
2524 return -EPERM;
2525 }
2526
Ajit Khaparde84517482009-09-04 03:12:16 +00002527 strcpy(fw_file, func);
2528
2529 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2530 if (status)
2531 goto fw_exit;
2532
2533 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002534 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002535 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2536
Ajit Khaparde84517482009-09-04 03:12:16 +00002537 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002538 flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2539 &flash_cmd.dma, GFP_KERNEL);
Ajit Khaparde84517482009-09-04 03:12:16 +00002540 if (!flash_cmd.va) {
2541 status = -ENOMEM;
2542 dev_err(&adapter->pdev->dev,
2543 "Memory allocation failure while flashing\n");
2544 goto fw_exit;
2545 }
2546
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002547 if ((adapter->generation == BE_GEN3) &&
2548 (get_ufigen_type(fhdr) == BE_GEN3)) {
2549 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002550 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2551 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002552 img_hdr_ptr = (struct image_hdr *) (fw->data +
2553 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002554 i * sizeof(struct image_hdr)));
2555 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2556 status = be_flash_data(adapter, fw, &flash_cmd,
2557 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002558 }
2559 } else if ((adapter->generation == BE_GEN2) &&
2560 (get_ufigen_type(fhdr) == BE_GEN2)) {
2561 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2562 } else {
2563 dev_err(&adapter->pdev->dev,
2564 "UFI and Interface are not compatible for flashing\n");
2565 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002566 }
2567
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002568 dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2569 flash_cmd.dma);
Ajit Khaparde84517482009-09-04 03:12:16 +00002570 if (status) {
2571 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2572 goto fw_exit;
2573 }
2574
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002575 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002576
2577fw_exit:
2578 release_firmware(fw);
2579 return status;
2580}
2581
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002582static struct net_device_ops be_netdev_ops = {
2583 .ndo_open = be_open,
2584 .ndo_stop = be_close,
2585 .ndo_start_xmit = be_xmit,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002586 .ndo_set_rx_mode = be_set_multicast_list,
2587 .ndo_set_mac_address = be_mac_addr_set,
2588 .ndo_change_mtu = be_change_mtu,
2589 .ndo_validate_addr = eth_validate_addr,
2590 .ndo_vlan_rx_register = be_vlan_register,
2591 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2592 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002593 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002594 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002595 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002596 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002597};
2598
2599static void be_netdev_init(struct net_device *netdev)
2600{
2601 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla3abcded2010-10-03 22:12:27 -07002602 struct be_rx_obj *rxo;
2603 int i;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002604
2605 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
Michał Mirosław79032642010-11-30 06:38:00 +00002606 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2607 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Ajit Khaparde49e4b842010-06-14 04:56:07 +00002608 NETIF_F_GRO | NETIF_F_TSO6;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002609
Michał Mirosław79032642010-11-30 06:38:00 +00002610 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2611 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Ajit Khaparde51c59872009-11-29 17:54:54 +00002612
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002613 if (lancer_chip(adapter))
2614 netdev->vlan_features |= NETIF_F_TSO6;
2615
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002616 netdev->flags |= IFF_MULTICAST;
2617
Ajit Khaparde728a9972009-04-13 15:41:22 -07002618 adapter->rx_csum = true;
2619
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002620 /* Default settings for Rx and Tx flow control */
2621 adapter->rx_fc = true;
2622 adapter->tx_fc = true;
2623
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002624 netif_set_gso_max_size(netdev, 65535);
2625
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002626 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2627
2628 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2629
Sathya Perla3abcded2010-10-03 22:12:27 -07002630 for_all_rx_queues(adapter, rxo, i)
2631 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
2632 BE_NAPI_WEIGHT);
2633
Sathya Perla5fb379e2009-06-18 00:02:59 +00002634 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002635 BE_NAPI_WEIGHT);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002636}
2637
2638static void be_unmap_pci_bars(struct be_adapter *adapter)
2639{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002640 if (adapter->csr)
2641 iounmap(adapter->csr);
2642 if (adapter->db)
2643 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002644 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002645 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002646}
2647
2648static int be_map_pci_bars(struct be_adapter *adapter)
2649{
2650 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002651 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002652
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002653 if (lancer_chip(adapter)) {
2654 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2655 pci_resource_len(adapter->pdev, 0));
2656 if (addr == NULL)
2657 return -ENOMEM;
2658 adapter->db = addr;
2659 return 0;
2660 }
2661
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002662 if (be_physfn(adapter)) {
2663 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2664 pci_resource_len(adapter->pdev, 2));
2665 if (addr == NULL)
2666 return -ENOMEM;
2667 adapter->csr = addr;
2668 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002669
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002670 if (adapter->generation == BE_GEN2) {
2671 pcicfg_reg = 1;
2672 db_reg = 4;
2673 } else {
2674 pcicfg_reg = 0;
2675 if (be_physfn(adapter))
2676 db_reg = 4;
2677 else
2678 db_reg = 0;
2679 }
2680 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2681 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002682 if (addr == NULL)
2683 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002684 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002685
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002686 if (be_physfn(adapter)) {
2687 addr = ioremap_nocache(
2688 pci_resource_start(adapter->pdev, pcicfg_reg),
2689 pci_resource_len(adapter->pdev, pcicfg_reg));
2690 if (addr == NULL)
2691 goto pci_map_err;
2692 adapter->pcicfg = addr;
2693 } else
2694 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002695
2696 return 0;
2697pci_map_err:
2698 be_unmap_pci_bars(adapter);
2699 return -ENOMEM;
2700}
2701
2702
2703static void be_ctrl_cleanup(struct be_adapter *adapter)
2704{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002705 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002706
2707 be_unmap_pci_bars(adapter);
2708
2709 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002710 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2711 mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002712
2713 mem = &adapter->mc_cmd_mem;
2714 if (mem->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002715 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
2716 mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002717}
2718
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002719static int be_ctrl_init(struct be_adapter *adapter)
2720{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002721 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2722 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002723 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002724 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002725
2726 status = be_map_pci_bars(adapter);
2727 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002728 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002729
2730 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002731 mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
2732 mbox_mem_alloc->size,
2733 &mbox_mem_alloc->dma,
2734 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002735 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002736 status = -ENOMEM;
2737 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002738 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002739
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002740 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2741 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2742 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2743 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002744
2745 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002746 mc_cmd_mem->va = dma_alloc_coherent(&adapter->pdev->dev,
2747 mc_cmd_mem->size, &mc_cmd_mem->dma,
2748 GFP_KERNEL);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002749 if (mc_cmd_mem->va == NULL) {
2750 status = -ENOMEM;
2751 goto free_mbox;
2752 }
2753 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2754
Ivan Vecera29849612010-12-14 05:43:19 +00002755 mutex_init(&adapter->mbox_lock);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002756 spin_lock_init(&adapter->mcc_lock);
2757 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002758
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002759 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00002760 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002761 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002762
2763free_mbox:
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002764 dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
2765 mbox_mem_alloc->va, mbox_mem_alloc->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002766
2767unmap_pci_bars:
2768 be_unmap_pci_bars(adapter);
2769
2770done:
2771 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002772}
2773
2774static void be_stats_cleanup(struct be_adapter *adapter)
2775{
Sathya Perla3abcded2010-10-03 22:12:27 -07002776 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002777
2778 if (cmd->va)
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002779 dma_free_coherent(&adapter->pdev->dev, cmd->size,
2780 cmd->va, cmd->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002781}
2782
2783static int be_stats_init(struct be_adapter *adapter)
2784{
Sathya Perla3abcded2010-10-03 22:12:27 -07002785 struct be_dma_mem *cmd = &adapter->stats_cmd;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002786
2787 cmd->size = sizeof(struct be_cmd_req_get_stats);
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002788 cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
2789 GFP_KERNEL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002790 if (cmd->va == NULL)
2791 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08002792 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002793 return 0;
2794}
2795
2796static void __devexit be_remove(struct pci_dev *pdev)
2797{
2798 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002799
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002800 if (!adapter)
2801 return;
2802
Somnath Koturf203af72010-10-25 23:01:03 +00002803 cancel_delayed_work_sync(&adapter->work);
2804
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002805 unregister_netdev(adapter->netdev);
2806
Sathya Perla5fb379e2009-06-18 00:02:59 +00002807 be_clear(adapter);
2808
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002809 be_stats_cleanup(adapter);
2810
2811 be_ctrl_cleanup(adapter);
2812
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002813 be_sriov_disable(adapter);
2814
Sathya Perla8d56ff12009-11-22 22:02:26 +00002815 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002816
2817 pci_set_drvdata(pdev, NULL);
2818 pci_release_regions(pdev);
2819 pci_disable_device(pdev);
2820
2821 free_netdev(adapter->netdev);
2822}
2823
Sathya Perla2243e2e2009-11-22 22:02:03 +00002824static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002825{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002826 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002827 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002828
Sathya Perla8788fdc2009-07-27 22:52:03 +00002829 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002830 if (status)
2831 return status;
2832
Sathya Perla3abcded2010-10-03 22:12:27 -07002833 status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2834 &adapter->function_mode, &adapter->function_caps);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002835 if (status)
2836 return status;
2837
2838 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002839
2840 if (be_physfn(adapter)) {
2841 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00002842 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002843
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002844 if (status)
2845 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002846
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002847 if (!is_valid_ether_addr(mac))
2848 return -EADDRNOTAVAIL;
2849
2850 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2851 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2852 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002853
Ajit Khaparde3486be22010-07-23 02:04:54 +00002854 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00002855 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2856 else
2857 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2858
Ajit Khaparde9e1453c2011-02-20 11:42:22 +00002859 status = be_cmd_get_cntl_attributes(adapter);
2860 if (status)
2861 return status;
2862
Sathya Perla2243e2e2009-11-22 22:02:03 +00002863 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002864}
2865
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002866static int be_dev_family_check(struct be_adapter *adapter)
2867{
2868 struct pci_dev *pdev = adapter->pdev;
2869 u32 sli_intf = 0, if_type;
2870
2871 switch (pdev->device) {
2872 case BE_DEVICE_ID1:
2873 case OC_DEVICE_ID1:
2874 adapter->generation = BE_GEN2;
2875 break;
2876 case BE_DEVICE_ID2:
2877 case OC_DEVICE_ID2:
2878 adapter->generation = BE_GEN3;
2879 break;
2880 case OC_DEVICE_ID3:
2881 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2882 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2883 SLI_INTF_IF_TYPE_SHIFT;
2884
2885 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2886 if_type != 0x02) {
2887 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2888 return -EINVAL;
2889 }
2890 if (num_vfs > 0) {
2891 dev_err(&pdev->dev, "VFs not supported\n");
2892 return -EINVAL;
2893 }
2894 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2895 SLI_INTF_FAMILY_SHIFT);
2896 adapter->generation = BE_GEN3;
2897 break;
2898 default:
2899 adapter->generation = 0;
2900 }
2901 return 0;
2902}
2903
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002904static int __devinit be_probe(struct pci_dev *pdev,
2905 const struct pci_device_id *pdev_id)
2906{
2907 int status = 0;
2908 struct be_adapter *adapter;
2909 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002910
2911 status = pci_enable_device(pdev);
2912 if (status)
2913 goto do_none;
2914
2915 status = pci_request_regions(pdev, DRV_NAME);
2916 if (status)
2917 goto disable_dev;
2918 pci_set_master(pdev);
2919
2920 netdev = alloc_etherdev(sizeof(struct be_adapter));
2921 if (netdev == NULL) {
2922 status = -ENOMEM;
2923 goto rel_reg;
2924 }
2925 adapter = netdev_priv(netdev);
2926 adapter->pdev = pdev;
2927 pci_set_drvdata(pdev, adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002928
2929 status = be_dev_family_check(adapter);
Sathya Perla63657b92010-12-01 01:02:28 +00002930 if (status)
Sathya Perlafe6d2a32010-11-21 23:25:50 +00002931 goto free_netdev;
2932
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002933 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002934 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002935
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002936 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002937 if (!status) {
2938 netdev->features |= NETIF_F_HIGHDMA;
2939 } else {
Ivan Vecera2b7bceb2011-02-02 08:05:12 +00002940 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002941 if (status) {
2942 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2943 goto free_netdev;
2944 }
2945 }
2946
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002947 be_sriov_enable(adapter);
2948
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002949 status = be_ctrl_init(adapter);
2950 if (status)
2951 goto free_netdev;
2952
Sathya Perla2243e2e2009-11-22 22:02:03 +00002953 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002954 if (be_physfn(adapter)) {
2955 status = be_cmd_POST(adapter);
2956 if (status)
2957 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002958 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002959
2960 /* tell fw we're ready to fire cmds */
2961 status = be_cmd_fw_init(adapter);
2962 if (status)
2963 goto ctrl_clean;
2964
Ajit Khapardea4b4dfa2011-02-11 13:36:57 +00002965 status = be_cmd_reset_function(adapter);
2966 if (status)
2967 goto ctrl_clean;
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07002968
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002969 status = be_stats_init(adapter);
2970 if (status)
2971 goto ctrl_clean;
2972
Sathya Perla2243e2e2009-11-22 22:02:03 +00002973 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002974 if (status)
2975 goto stats_clean;
2976
Sathya Perla3abcded2010-10-03 22:12:27 -07002977 be_msix_enable(adapter);
2978
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002979 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002980
Sathya Perla5fb379e2009-06-18 00:02:59 +00002981 status = be_setup(adapter);
2982 if (status)
Sathya Perla3abcded2010-10-03 22:12:27 -07002983 goto msix_disable;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002984
Sathya Perla3abcded2010-10-03 22:12:27 -07002985 be_netdev_init(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002986 status = register_netdev(netdev);
2987 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00002988 goto unsetup;
Somnath Kotur63a76942010-10-25 01:11:10 +00002989 netif_carrier_off(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002990
Ajit Khapardee6319362011-02-11 13:35:41 +00002991 if (be_physfn(adapter) && adapter->sriov_enabled) {
2992 status = be_vf_eth_addr_config(adapter);
2993 if (status)
2994 goto unreg_netdev;
2995 }
2996
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002997 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Somnath Koturf203af72010-10-25 23:01:03 +00002998 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002999 return 0;
3000
Ajit Khapardee6319362011-02-11 13:35:41 +00003001unreg_netdev:
3002 unregister_netdev(netdev);
Sathya Perla5fb379e2009-06-18 00:02:59 +00003003unsetup:
3004 be_clear(adapter);
Sathya Perla3abcded2010-10-03 22:12:27 -07003005msix_disable:
3006 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003007stats_clean:
3008 be_stats_cleanup(adapter);
3009ctrl_clean:
3010 be_ctrl_cleanup(adapter);
3011free_netdev:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003012 be_sriov_disable(adapter);
Sathya Perlafe6d2a32010-11-21 23:25:50 +00003013 free_netdev(netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00003014 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003015rel_reg:
3016 pci_release_regions(pdev);
3017disable_dev:
3018 pci_disable_device(pdev);
3019do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07003020 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003021 return status;
3022}
3023
3024static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3025{
3026 struct be_adapter *adapter = pci_get_drvdata(pdev);
3027 struct net_device *netdev = adapter->netdev;
3028
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003029 cancel_delayed_work_sync(&adapter->work);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003030 if (adapter->wol)
3031 be_setup_wol(adapter, true);
3032
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003033 netif_device_detach(netdev);
3034 if (netif_running(netdev)) {
3035 rtnl_lock();
3036 be_close(netdev);
3037 rtnl_unlock();
3038 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00003039 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003040 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003041
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003042 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003043 pci_save_state(pdev);
3044 pci_disable_device(pdev);
3045 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3046 return 0;
3047}
3048
3049static int be_resume(struct pci_dev *pdev)
3050{
3051 int status = 0;
3052 struct be_adapter *adapter = pci_get_drvdata(pdev);
3053 struct net_device *netdev = adapter->netdev;
3054
3055 netif_device_detach(netdev);
3056
3057 status = pci_enable_device(pdev);
3058 if (status)
3059 return status;
3060
3061 pci_set_power_state(pdev, 0);
3062 pci_restore_state(pdev);
3063
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003064 be_msix_enable(adapter);
Sathya Perla2243e2e2009-11-22 22:02:03 +00003065 /* tell fw we're ready to fire cmds */
3066 status = be_cmd_fw_init(adapter);
3067 if (status)
3068 return status;
3069
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00003070 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003071 if (netif_running(netdev)) {
3072 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003073 be_open(netdev);
3074 rtnl_unlock();
3075 }
3076 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00003077
3078 if (adapter->wol)
3079 be_setup_wol(adapter, false);
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003080
3081 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003082 return 0;
3083}
3084
Sathya Perla82456b02010-02-17 01:35:37 +00003085/*
3086 * An FLR will stop BE from DMAing any data.
3087 */
3088static void be_shutdown(struct pci_dev *pdev)
3089{
3090 struct be_adapter *adapter = pci_get_drvdata(pdev);
3091 struct net_device *netdev = adapter->netdev;
3092
Ajit Khapardea4ca0552011-02-11 13:38:03 +00003093 if (netif_running(netdev))
3094 cancel_delayed_work_sync(&adapter->work);
3095
Sathya Perla82456b02010-02-17 01:35:37 +00003096 netif_device_detach(netdev);
3097
3098 be_cmd_reset_function(adapter);
3099
3100 if (adapter->wol)
3101 be_setup_wol(adapter, true);
3102
3103 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00003104}
3105
Sathya Perlacf588472010-02-14 21:22:01 +00003106static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3107 pci_channel_state_t state)
3108{
3109 struct be_adapter *adapter = pci_get_drvdata(pdev);
3110 struct net_device *netdev = adapter->netdev;
3111
3112 dev_err(&adapter->pdev->dev, "EEH error detected\n");
3113
3114 adapter->eeh_err = true;
3115
3116 netif_device_detach(netdev);
3117
3118 if (netif_running(netdev)) {
3119 rtnl_lock();
3120 be_close(netdev);
3121 rtnl_unlock();
3122 }
3123 be_clear(adapter);
3124
3125 if (state == pci_channel_io_perm_failure)
3126 return PCI_ERS_RESULT_DISCONNECT;
3127
3128 pci_disable_device(pdev);
3129
3130 return PCI_ERS_RESULT_NEED_RESET;
3131}
3132
3133static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3134{
3135 struct be_adapter *adapter = pci_get_drvdata(pdev);
3136 int status;
3137
3138 dev_info(&adapter->pdev->dev, "EEH reset\n");
3139 adapter->eeh_err = false;
3140
3141 status = pci_enable_device(pdev);
3142 if (status)
3143 return PCI_ERS_RESULT_DISCONNECT;
3144
3145 pci_set_master(pdev);
3146 pci_set_power_state(pdev, 0);
3147 pci_restore_state(pdev);
3148
3149 /* Check if card is ok and fw is ready */
3150 status = be_cmd_POST(adapter);
3151 if (status)
3152 return PCI_ERS_RESULT_DISCONNECT;
3153
3154 return PCI_ERS_RESULT_RECOVERED;
3155}
3156
3157static void be_eeh_resume(struct pci_dev *pdev)
3158{
3159 int status = 0;
3160 struct be_adapter *adapter = pci_get_drvdata(pdev);
3161 struct net_device *netdev = adapter->netdev;
3162
3163 dev_info(&adapter->pdev->dev, "EEH resume\n");
3164
3165 pci_save_state(pdev);
3166
3167 /* tell fw we're ready to fire cmds */
3168 status = be_cmd_fw_init(adapter);
3169 if (status)
3170 goto err;
3171
3172 status = be_setup(adapter);
3173 if (status)
3174 goto err;
3175
3176 if (netif_running(netdev)) {
3177 status = be_open(netdev);
3178 if (status)
3179 goto err;
3180 }
3181 netif_device_attach(netdev);
3182 return;
3183err:
3184 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00003185}
3186
3187static struct pci_error_handlers be_eeh_handlers = {
3188 .error_detected = be_eeh_err_detected,
3189 .slot_reset = be_eeh_reset,
3190 .resume = be_eeh_resume,
3191};
3192
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003193static struct pci_driver be_driver = {
3194 .name = DRV_NAME,
3195 .id_table = be_dev_ids,
3196 .probe = be_probe,
3197 .remove = be_remove,
3198 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00003199 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00003200 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00003201 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003202};
3203
3204static int __init be_init_module(void)
3205{
Joe Perches8e95a202009-12-03 07:58:21 +00003206 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3207 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003208 printk(KERN_WARNING DRV_NAME
3209 " : Module param rx_frag_size must be 2048/4096/8192."
3210 " Using 2048\n");
3211 rx_frag_size = 2048;
3212 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003213
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00003214 if (num_vfs > 32) {
3215 printk(KERN_WARNING DRV_NAME
3216 " : Module param num_vfs must not be greater than 32."
3217 "Using 32\n");
3218 num_vfs = 32;
3219 }
3220
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003221 return pci_register_driver(&be_driver);
3222}
3223module_init(be_init_module);
3224
3225static void __exit be_exit_module(void)
3226{
3227 pci_unregister_driver(&be_driver);
3228}
3229module_exit(be_exit_module);