blob: a5a24e6c773e56884a80173707b9016ed3292a46 [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
Ajit Khaparde294aedc2010-02-19 13:54:58 +00002 * Copyright (C) 2005 - 2010 ServerEngines
Sathya Perla6b7c5b92009-03-11 23:32:03 -07003 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000029static unsigned int num_vfs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -070030module_param(rx_frag_size, uint, S_IRUGO);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000031module_param(num_vfs, uint, S_IRUGO);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
Sarveshwar Bandiba343c72010-03-31 02:56:12 +000033MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
Sathya Perla6b7c5b92009-03-11 23:32:03 -070034
Sathya Perla6b7c5b92009-03-11 23:32:03 -070035static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070036 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070037 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070038 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
39 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Sathya Perla6b7c5b92009-03-11 23:32:03 -070040 { 0 }
41};
42MODULE_DEVICE_TABLE(pci, be_dev_ids);
Ajit Khaparde7c185272010-07-29 06:16:33 +000043/* UE Status Low CSR */
44static char *ue_status_low_desc[] = {
45 "CEV",
46 "CTX",
47 "DBUF",
48 "ERX",
49 "Host",
50 "MPU",
51 "NDMA",
52 "PTC ",
53 "RDMA ",
54 "RXF ",
55 "RXIPS ",
56 "RXULP0 ",
57 "RXULP1 ",
58 "RXULP2 ",
59 "TIM ",
60 "TPOST ",
61 "TPRE ",
62 "TXIPS ",
63 "TXULP0 ",
64 "TXULP1 ",
65 "UC ",
66 "WDMA ",
67 "TXULP2 ",
68 "HOST1 ",
69 "P0_OB_LINK ",
70 "P1_OB_LINK ",
71 "HOST_GPIO ",
72 "MBOX ",
73 "AXGMAC0",
74 "AXGMAC1",
75 "JTAG",
76 "MPU_INTPEND"
77};
78/* UE Status High CSR */
79static char *ue_status_hi_desc[] = {
80 "LPCMEMHOST",
81 "MGMT_MAC",
82 "PCS0ONLINE",
83 "MPU_IRAM",
84 "PCS1ONLINE",
85 "PCTL0",
86 "PCTL1",
87 "PMEM",
88 "RR",
89 "TXPB",
90 "RXPP",
91 "XAUI",
92 "TXP",
93 "ARM",
94 "IPC",
95 "HOST2",
96 "HOST3",
97 "HOST4",
98 "HOST5",
99 "HOST6",
100 "HOST7",
101 "HOST8",
102 "HOST9",
103 "NETC"
104 "Unknown",
105 "Unknown",
106 "Unknown",
107 "Unknown",
108 "Unknown",
109 "Unknown",
110 "Unknown",
111 "Unknown"
112};
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700113
114static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
115{
116 struct be_dma_mem *mem = &q->dma_mem;
117 if (mem->va)
118 pci_free_consistent(adapter->pdev, mem->size,
119 mem->va, mem->dma);
120}
121
122static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
123 u16 len, u16 entry_size)
124{
125 struct be_dma_mem *mem = &q->dma_mem;
126
127 memset(q, 0, sizeof(*q));
128 q->len = len;
129 q->entry_size = entry_size;
130 mem->size = len * entry_size;
131 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
132 if (!mem->va)
133 return -1;
134 memset(mem->va, 0, mem->size);
135 return 0;
136}
137
Sathya Perla8788fdc2009-07-27 22:52:03 +0000138static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700139{
Sathya Perla8788fdc2009-07-27 22:52:03 +0000140 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700141 u32 reg = ioread32(addr);
142 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000143
Sathya Perlacf588472010-02-14 21:22:01 +0000144 if (adapter->eeh_err)
145 return;
146
Sathya Perla5f0b8492009-07-27 22:52:56 +0000147 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700148 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000149 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700150 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000151 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700152 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +0000153
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700154 iowrite32(reg, addr);
155}
156
Sathya Perla8788fdc2009-07-27 22:52:03 +0000157static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700158{
159 u32 val = 0;
160 val |= qid & DB_RQ_RING_ID_MASK;
161 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000162
163 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000164 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700165}
166
Sathya Perla8788fdc2009-07-27 22:52:03 +0000167static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700168{
169 u32 val = 0;
170 val |= qid & DB_TXULP_RING_ID_MASK;
171 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perlaf3eb62d2010-06-29 00:11:17 +0000172
173 wmb();
Sathya Perla8788fdc2009-07-27 22:52:03 +0000174 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700175}
176
Sathya Perla8788fdc2009-07-27 22:52:03 +0000177static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178 bool arm, bool clear_int, u16 num_popped)
179{
180 u32 val = 0;
181 val |= qid & DB_EQ_RING_ID_MASK;
Sathya Perlacf588472010-02-14 21:22:01 +0000182
183 if (adapter->eeh_err)
184 return;
185
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700186 if (arm)
187 val |= 1 << DB_EQ_REARM_SHIFT;
188 if (clear_int)
189 val |= 1 << DB_EQ_CLR_SHIFT;
190 val |= 1 << DB_EQ_EVNT_SHIFT;
191 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000192 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700193}
194
Sathya Perla8788fdc2009-07-27 22:52:03 +0000195void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700196{
197 u32 val = 0;
198 val |= qid & DB_CQ_RING_ID_MASK;
Sathya Perlacf588472010-02-14 21:22:01 +0000199
200 if (adapter->eeh_err)
201 return;
202
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700203 if (arm)
204 val |= 1 << DB_CQ_REARM_SHIFT;
205 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000206 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700207}
208
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209static int be_mac_addr_set(struct net_device *netdev, void *p)
210{
211 struct be_adapter *adapter = netdev_priv(netdev);
212 struct sockaddr *addr = p;
213 int status = 0;
214
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000215 if (!is_valid_ether_addr(addr->sa_data))
216 return -EADDRNOTAVAIL;
217
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000218 /* MAC addr configuration will be done in hardware for VFs
219 * by their corresponding PFs. Just copy to netdev addr here
220 */
221 if (!be_physfn(adapter))
222 goto netdev_addr;
223
Sathya Perlaa65027e2009-08-17 00:58:04 +0000224 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
225 if (status)
226 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700227
Sathya Perlaa65027e2009-08-17 00:58:04 +0000228 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
229 adapter->if_handle, &adapter->pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000230netdev_addr:
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231 if (!status)
232 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
233
234 return status;
235}
236
Sathya Perlab31c50a2009-09-17 10:30:13 -0700237void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700238{
239 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
240 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
241 struct be_port_rxf_stats *port_stats =
242 &rxf_stats->port[adapter->port_num];
Ajit Khaparde78122a52009-10-07 03:11:20 -0700243 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla68110862009-06-10 02:21:16 +0000244 struct be_erx_stats *erx_stats = &hw_stats->erx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700245
Ajit Khaparde91992e42010-02-19 13:57:12 +0000246 dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
247 dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
248 dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
249 dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000250 dev_stats->multicast = drvr_stats(adapter)->be_rx_mcast_pkt;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700251
252 /* bad pkts received */
253 dev_stats->rx_errors = port_stats->rx_crc_errors +
254 port_stats->rx_alignment_symbol_errors +
255 port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000256 port_stats->rx_out_range_errors +
257 port_stats->rx_frame_too_long +
258 port_stats->rx_dropped_too_small +
259 port_stats->rx_dropped_too_short +
260 port_stats->rx_dropped_header_too_small +
261 port_stats->rx_dropped_tcp_length +
262 port_stats->rx_dropped_runt +
263 port_stats->rx_tcp_checksum_errs +
264 port_stats->rx_ip_checksum_errs +
265 port_stats->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700266
Sathya Perla68110862009-06-10 02:21:16 +0000267 /* no space in linux buffers: best possible approximation */
Sathya Perla01ed30d2009-11-22 22:01:31 +0000268 dev_stats->rx_dropped =
269 erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700270
271 /* detailed rx errors */
272 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000273 port_stats->rx_out_range_errors +
274 port_stats->rx_frame_too_long;
275
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700276 /* receive ring buffer overflow */
277 dev_stats->rx_over_errors = 0;
Sathya Perla68110862009-06-10 02:21:16 +0000278
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700279 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
280
281 /* frame alignment errors */
282 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000283
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700284 /* receiver fifo overrun */
285 /* drops_no_pbuf is no per i/f, it's per BE card */
286 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
287 port_stats->rx_input_fifo_overflow +
288 rxf_stats->rx_drops_no_pbuf;
289 /* receiver missed packetd */
290 dev_stats->rx_missed_errors = 0;
Sathya Perla68110862009-06-10 02:21:16 +0000291
292 /* packet transmit problems */
293 dev_stats->tx_errors = 0;
294
295 /* no space available in linux */
296 dev_stats->tx_dropped = 0;
297
Sathya Perla68110862009-06-10 02:21:16 +0000298 dev_stats->collisions = 0;
299
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700300 /* detailed tx_errors */
301 dev_stats->tx_aborted_errors = 0;
302 dev_stats->tx_carrier_errors = 0;
303 dev_stats->tx_fifo_errors = 0;
304 dev_stats->tx_heartbeat_errors = 0;
305 dev_stats->tx_window_errors = 0;
306}
307
Sathya Perla8788fdc2009-07-27 22:52:03 +0000308void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700309{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700310 struct net_device *netdev = adapter->netdev;
311
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700312 /* If link came up or went down */
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000313 if (adapter->link_up != link_up) {
Ajit Khaparde0dffc832009-11-29 17:57:46 +0000314 adapter->link_speed = -1;
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000315 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700316 netif_start_queue(netdev);
317 netif_carrier_on(netdev);
318 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000319 } else {
320 netif_stop_queue(netdev);
321 netif_carrier_off(netdev);
322 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700323 }
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000324 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700325 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700326}
327
328/* Update the EQ delay n BE based on the RX frags consumed / sec */
329static void be_rx_eqd_update(struct be_adapter *adapter)
330{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700331 struct be_eq_obj *rx_eq = &adapter->rx_eq;
332 struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700333 ulong now = jiffies;
334 u32 eqd;
335
336 if (!rx_eq->enable_aic)
337 return;
338
339 /* Wrapped around */
340 if (time_before(now, stats->rx_fps_jiffies)) {
341 stats->rx_fps_jiffies = now;
342 return;
343 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700344
345 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700346 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700347 return;
348
349 stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700350 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700351
Sathya Perla4097f662009-03-24 16:40:13 -0700352 stats->rx_fps_jiffies = now;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700353 stats->be_prev_rx_frags = stats->be_rx_frags;
354 eqd = stats->be_rx_fps / 110000;
355 eqd = eqd << 3;
356 if (eqd > rx_eq->max_eqd)
357 eqd = rx_eq->max_eqd;
358 if (eqd < rx_eq->min_eqd)
359 eqd = rx_eq->min_eqd;
360 if (eqd < 10)
361 eqd = 0;
362 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000363 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700364
365 rx_eq->cur_eqd = eqd;
366}
367
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700368static struct net_device_stats *be_get_stats(struct net_device *dev)
369{
Ajit Khaparde78122a52009-10-07 03:11:20 -0700370 return &dev->stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700371}
372
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700373static u32 be_calc_rate(u64 bytes, unsigned long ticks)
374{
375 u64 rate = bytes;
376
377 do_div(rate, ticks / HZ);
378 rate <<= 3; /* bytes/sec -> bits/sec */
379 do_div(rate, 1000000ul); /* MB/Sec */
380
381 return rate;
382}
383
Sathya Perla4097f662009-03-24 16:40:13 -0700384static void be_tx_rate_update(struct be_adapter *adapter)
385{
386 struct be_drvr_stats *stats = drvr_stats(adapter);
387 ulong now = jiffies;
388
389 /* Wrapped around? */
390 if (time_before(now, stats->be_tx_jiffies)) {
391 stats->be_tx_jiffies = now;
392 return;
393 }
394
395 /* Update tx rate once in two seconds */
396 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700397 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
398 - stats->be_tx_bytes_prev,
399 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700400 stats->be_tx_jiffies = now;
401 stats->be_tx_bytes_prev = stats->be_tx_bytes;
402 }
403}
404
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700405static void be_tx_stats_update(struct be_adapter *adapter,
Ajit Khaparde91992e42010-02-19 13:57:12 +0000406 u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700407{
Sathya Perla4097f662009-03-24 16:40:13 -0700408 struct be_drvr_stats *stats = drvr_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700409 stats->be_tx_reqs++;
410 stats->be_tx_wrbs += wrb_cnt;
411 stats->be_tx_bytes += copied;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000412 stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700413 if (stopped)
414 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700415}
416
417/* Determine number of WRB entries needed to xmit data in an skb */
418static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
419{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700420 int cnt = (skb->len > skb->data_len);
421
422 cnt += skb_shinfo(skb)->nr_frags;
423
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700424 /* to account for hdr wrb */
425 cnt++;
426 if (cnt & 1) {
427 /* add a dummy to make it an even num */
428 cnt++;
429 *dummy = true;
430 } else
431 *dummy = false;
432 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
433 return cnt;
434}
435
436static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
437{
438 wrb->frag_pa_hi = upper_32_bits(addr);
439 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
440 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
441}
442
443static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
444 bool vlan, u32 wrb_cnt, u32 len)
445{
446 memset(hdr, 0, sizeof(*hdr));
447
448 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
449
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000450 if (skb_is_gso(skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
452 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
453 hdr, skb_shinfo(skb)->gso_size);
Ajit Khaparde49e4b842010-06-14 04:56:07 +0000454 if (skb_is_gso_v6(skb))
455 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700456 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
457 if (is_tcp_pkt(skb))
458 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
459 else if (is_udp_pkt(skb))
460 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
461 }
462
463 if (vlan && vlan_tx_tag_present(skb)) {
464 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
465 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
466 hdr, vlan_tx_tag_get(skb));
467 }
468
469 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
470 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
471 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
472 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
473}
474
Sathya Perla7101e112010-03-22 20:41:12 +0000475static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
476 bool unmap_single)
477{
478 dma_addr_t dma;
479
480 be_dws_le_to_cpu(wrb, sizeof(*wrb));
481
482 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
FUJITA Tomonorib681ee72010-04-04 21:40:18 +0000483 if (wrb->frag_len) {
Sathya Perla7101e112010-03-22 20:41:12 +0000484 if (unmap_single)
485 pci_unmap_single(pdev, dma, wrb->frag_len,
486 PCI_DMA_TODEVICE);
487 else
488 pci_unmap_page(pdev, dma, wrb->frag_len,
489 PCI_DMA_TODEVICE);
490 }
491}
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700492
493static int make_tx_wrbs(struct be_adapter *adapter,
494 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
495{
Sathya Perla7101e112010-03-22 20:41:12 +0000496 dma_addr_t busaddr;
497 int i, copied = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498 struct pci_dev *pdev = adapter->pdev;
499 struct sk_buff *first_skb = skb;
500 struct be_queue_info *txq = &adapter->tx_obj.q;
501 struct be_eth_wrb *wrb;
502 struct be_eth_hdr_wrb *hdr;
Sathya Perla7101e112010-03-22 20:41:12 +0000503 bool map_single = false;
504 u16 map_head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700505
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700506 hdr = queue_head_node(txq);
507 queue_head_inc(txq);
Sathya Perla7101e112010-03-22 20:41:12 +0000508 map_head = txq->head;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700509
David S. Millerebc8d2a2009-06-09 01:01:31 -0700510 if (skb->len > skb->data_len) {
Eric Dumazete743d312010-04-14 15:59:40 -0700511 int len = skb_headlen(skb);
Alexander Duycka73b7962009-12-02 16:48:18 +0000512 busaddr = pci_map_single(pdev, skb->data, len,
513 PCI_DMA_TODEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000514 if (pci_dma_mapping_error(pdev, busaddr))
515 goto dma_err;
516 map_single = true;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700517 wrb = queue_head_node(txq);
518 wrb_fill(wrb, busaddr, len);
519 be_dws_cpu_to_le(wrb, sizeof(*wrb));
520 queue_head_inc(txq);
521 copied += len;
522 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700523
David S. Millerebc8d2a2009-06-09 01:01:31 -0700524 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
525 struct skb_frag_struct *frag =
526 &skb_shinfo(skb)->frags[i];
Alexander Duycka73b7962009-12-02 16:48:18 +0000527 busaddr = pci_map_page(pdev, frag->page,
528 frag->page_offset,
529 frag->size, PCI_DMA_TODEVICE);
Sathya Perla7101e112010-03-22 20:41:12 +0000530 if (pci_dma_mapping_error(pdev, busaddr))
531 goto dma_err;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700532 wrb = queue_head_node(txq);
533 wrb_fill(wrb, busaddr, frag->size);
534 be_dws_cpu_to_le(wrb, sizeof(*wrb));
535 queue_head_inc(txq);
536 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700537 }
538
539 if (dummy_wrb) {
540 wrb = queue_head_node(txq);
541 wrb_fill(wrb, 0, 0);
542 be_dws_cpu_to_le(wrb, sizeof(*wrb));
543 queue_head_inc(txq);
544 }
545
546 wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
547 wrb_cnt, copied);
548 be_dws_cpu_to_le(hdr, sizeof(*hdr));
549
550 return copied;
Sathya Perla7101e112010-03-22 20:41:12 +0000551dma_err:
552 txq->head = map_head;
553 while (copied) {
554 wrb = queue_head_node(txq);
555 unmap_tx_frag(pdev, wrb, map_single);
556 map_single = false;
557 copied -= wrb->frag_len;
558 queue_head_inc(txq);
559 }
560 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561}
562
Stephen Hemminger613573252009-08-31 19:50:58 +0000563static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700564 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700565{
566 struct be_adapter *adapter = netdev_priv(netdev);
567 struct be_tx_obj *tx_obj = &adapter->tx_obj;
568 struct be_queue_info *txq = &tx_obj->q;
569 u32 wrb_cnt = 0, copied = 0;
570 u32 start = txq->head;
571 bool dummy_wrb, stopped = false;
572
573 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
574
575 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000576 if (copied) {
577 /* record the sent skb in the sent_skb table */
578 BUG_ON(tx_obj->sent_skb_list[start]);
579 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000581 /* Ensure txq has space for the next skb; Else stop the queue
582 * *BEFORE* ringing the tx doorbell, so that we serialze the
583 * tx compls of the current transmit which'll wake up the queue
584 */
Sathya Perla7101e112010-03-22 20:41:12 +0000585 atomic_add(wrb_cnt, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000586 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
587 txq->len) {
588 netif_stop_queue(netdev);
589 stopped = true;
590 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700591
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000592 be_txq_notify(adapter, txq->id, wrb_cnt);
593
Ajit Khaparde91992e42010-02-19 13:57:12 +0000594 be_tx_stats_update(adapter, wrb_cnt, copied,
595 skb_shinfo(skb)->gso_segs, stopped);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000596 } else {
597 txq->head = start;
598 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700599 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700600 return NETDEV_TX_OK;
601}
602
603static int be_change_mtu(struct net_device *netdev, int new_mtu)
604{
605 struct be_adapter *adapter = netdev_priv(netdev);
606 if (new_mtu < BE_MIN_MTU ||
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000607 new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
608 (ETH_HLEN + ETH_FCS_LEN))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700609 dev_info(&adapter->pdev->dev,
610 "MTU must be between %d and %d bytes\n",
Ajit Khaparde34a89b82010-02-09 01:32:43 +0000611 BE_MIN_MTU,
612 (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700613 return -EINVAL;
614 }
615 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
616 netdev->mtu, new_mtu);
617 netdev->mtu = new_mtu;
618 return 0;
619}
620
621/*
Ajit Khaparde82903e42010-02-09 01:34:57 +0000622 * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
623 * If the user configures more, place BE in vlan promiscuous mode.
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700624 */
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000625static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700626{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700627 u16 vtag[BE_NUM_VLANS_SUPPORTED];
628 u16 ntags = 0, i;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000629 int status = 0;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000630 u32 if_handle;
631
632 if (vf) {
633 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
634 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
635 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
636 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700637
Ajit Khaparde82903e42010-02-09 01:34:57 +0000638 if (adapter->vlans_added <= adapter->max_vlans) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700639 /* Construct VLAN Table to give to HW */
640 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
641 if (adapter->vlan_tag[i]) {
642 vtag[ntags] = cpu_to_le16(i);
643 ntags++;
644 }
645 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700646 status = be_cmd_vlan_config(adapter, adapter->if_handle,
647 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700648 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700649 status = be_cmd_vlan_config(adapter, adapter->if_handle,
650 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700651 }
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000652
Sathya Perlab31c50a2009-09-17 10:30:13 -0700653 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700654}
655
656static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
657{
658 struct be_adapter *adapter = netdev_priv(netdev);
659 struct be_eq_obj *rx_eq = &adapter->rx_eq;
660 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700661
Sathya Perla8788fdc2009-07-27 22:52:03 +0000662 be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
663 be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700664 adapter->vlan_grp = grp;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000665 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
666 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700667}
668
669static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
670{
671 struct be_adapter *adapter = netdev_priv(netdev);
672
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000673 adapter->vlans_added++;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000674 if (!be_physfn(adapter))
675 return;
676
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677 adapter->vlan_tag[vid] = 1;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000678 if (adapter->vlans_added <= (adapter->max_vlans + 1))
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000679 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700680}
681
682static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
683{
684 struct be_adapter *adapter = netdev_priv(netdev);
685
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000686 adapter->vlans_added--;
687 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
688
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000689 if (!be_physfn(adapter))
690 return;
691
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700692 adapter->vlan_tag[vid] = 0;
Ajit Khaparde82903e42010-02-09 01:34:57 +0000693 if (adapter->vlans_added <= adapter->max_vlans)
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000694 be_vid_config(adapter, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700695}
696
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700697static void be_set_multicast_list(struct net_device *netdev)
698{
699 struct be_adapter *adapter = netdev_priv(netdev);
700
701 if (netdev->flags & IFF_PROMISC) {
Sathya Perla8788fdc2009-07-27 22:52:03 +0000702 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
Sathya Perla24307ee2009-06-18 00:09:25 +0000703 adapter->promiscuous = true;
704 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700705 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000706
707 /* BE was previously in promiscous mode; disable it */
708 if (adapter->promiscuous) {
709 adapter->promiscuous = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000710 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000711 }
712
Sathya Perlae7b909a2009-11-22 22:01:10 +0000713 /* Enable multicast promisc if num configured exceeds what we support */
Jiri Pirko4cd24ea2010-02-08 04:30:35 +0000714 if (netdev->flags & IFF_ALLMULTI ||
715 netdev_mc_count(netdev) > BE_MAX_MC) {
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000716 be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000717 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000718 goto done;
719 }
720
Jiri Pirko0ddf4772010-02-20 00:13:58 +0000721 be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
Sathya Perlaf31e50a2010-03-02 03:56:39 -0800722 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000723done:
724 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700725}
726
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000727static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
728{
729 struct be_adapter *adapter = netdev_priv(netdev);
730 int status;
731
732 if (!adapter->sriov_enabled)
733 return -EPERM;
734
735 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
736 return -EINVAL;
737
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000738 if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
739 status = be_cmd_pmac_del(adapter,
740 adapter->vf_cfg[vf].vf_if_handle,
741 adapter->vf_cfg[vf].vf_pmac_id);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000742
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000743 status = be_cmd_pmac_add(adapter, mac,
744 adapter->vf_cfg[vf].vf_if_handle,
745 &adapter->vf_cfg[vf].vf_pmac_id);
746
747 if (status)
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000748 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
749 mac, vf);
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000750 else
751 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
752
Sarveshwar Bandiba343c72010-03-31 02:56:12 +0000753 return status;
754}
755
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000756static int be_get_vf_config(struct net_device *netdev, int vf,
757 struct ifla_vf_info *vi)
758{
759 struct be_adapter *adapter = netdev_priv(netdev);
760
761 if (!adapter->sriov_enabled)
762 return -EPERM;
763
764 if (vf >= num_vfs)
765 return -EINVAL;
766
767 vi->vf = vf;
Ajit Khapardee1d18732010-07-23 01:52:13 +0000768 vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000769 vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
Ajit Khaparde64600ea2010-07-23 01:50:34 +0000770 vi->qos = 0;
771 memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
772
773 return 0;
774}
775
Ajit Khaparde1da87b72010-07-23 01:51:22 +0000776static int be_set_vf_vlan(struct net_device *netdev,
777 int vf, u16 vlan, u8 qos)
778{
779 struct be_adapter *adapter = netdev_priv(netdev);
780 int status = 0;
781
782 if (!adapter->sriov_enabled)
783 return -EPERM;
784
785 if ((vf >= num_vfs) || (vlan > 4095))
786 return -EINVAL;
787
788 if (vlan) {
789 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
790 adapter->vlans_added++;
791 } else {
792 adapter->vf_cfg[vf].vf_vlan_tag = 0;
793 adapter->vlans_added--;
794 }
795
796 status = be_vid_config(adapter, true, vf);
797
798 if (status)
799 dev_info(&adapter->pdev->dev,
800 "VLAN %d config on VF %d failed\n", vlan, vf);
801 return status;
802}
803
Ajit Khapardee1d18732010-07-23 01:52:13 +0000804static int be_set_vf_tx_rate(struct net_device *netdev,
805 int vf, int rate)
806{
807 struct be_adapter *adapter = netdev_priv(netdev);
808 int status = 0;
809
810 if (!adapter->sriov_enabled)
811 return -EPERM;
812
813 if ((vf >= num_vfs) || (rate < 0))
814 return -EINVAL;
815
816 if (rate > 10000)
817 rate = 10000;
818
819 adapter->vf_cfg[vf].vf_tx_rate = rate;
820 status = be_cmd_set_qos(adapter, rate / 10, vf);
821
822 if (status)
823 dev_info(&adapter->pdev->dev,
824 "tx rate %d on VF %d failed\n", rate, vf);
825 return status;
826}
827
Sathya Perla4097f662009-03-24 16:40:13 -0700828static void be_rx_rate_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700829{
Sathya Perla4097f662009-03-24 16:40:13 -0700830 struct be_drvr_stats *stats = drvr_stats(adapter);
831 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832
Sathya Perla4097f662009-03-24 16:40:13 -0700833 /* Wrapped around */
834 if (time_before(now, stats->be_rx_jiffies)) {
835 stats->be_rx_jiffies = now;
836 return;
837 }
838
839 /* Update the rate once in two seconds */
840 if ((now - stats->be_rx_jiffies) < 2 * HZ)
841 return;
842
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700843 stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
844 - stats->be_rx_bytes_prev,
845 now - stats->be_rx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700846 stats->be_rx_jiffies = now;
847 stats->be_rx_bytes_prev = stats->be_rx_bytes;
848}
849
850static void be_rx_stats_update(struct be_adapter *adapter,
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000851 u32 pktsize, u16 numfrags, u8 pkt_type)
Sathya Perla4097f662009-03-24 16:40:13 -0700852{
853 struct be_drvr_stats *stats = drvr_stats(adapter);
854
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700855 stats->be_rx_compl++;
856 stats->be_rx_frags += numfrags;
857 stats->be_rx_bytes += pktsize;
Ajit Khaparde91992e42010-02-19 13:57:12 +0000858 stats->be_rx_pkts++;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000859
860 if (pkt_type == BE_MULTICAST_PACKET)
861 stats->be_rx_mcast_pkt++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700862}
863
Ajit Khaparde728a9972009-04-13 15:41:22 -0700864static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
865{
866 u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
867
868 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
869 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
870 ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
871 if (ip_version) {
872 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
873 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
874 }
875 ipv6_chk = (ip_version && (tcpf || udpf));
876
877 return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
878}
879
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700880static struct be_rx_page_info *
881get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
882{
883 struct be_rx_page_info *rx_page_info;
884 struct be_queue_info *rxq = &adapter->rx_obj.q;
885
886 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
887 BUG_ON(!rx_page_info->page);
888
Ajit Khaparde205859a2010-02-09 01:34:21 +0000889 if (rx_page_info->last_page_user) {
FUJITA Tomonorifac6da52010-04-01 16:53:22 +0000890 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700891 adapter->big_page_size, PCI_DMA_FROMDEVICE);
Ajit Khaparde205859a2010-02-09 01:34:21 +0000892 rx_page_info->last_page_user = false;
893 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700894
895 atomic_dec(&rxq->used);
896 return rx_page_info;
897}
898
899/* Throwaway the data in the Rx completion */
900static void be_rx_compl_discard(struct be_adapter *adapter,
901 struct be_eth_rx_compl *rxcp)
902{
903 struct be_queue_info *rxq = &adapter->rx_obj.q;
904 struct be_rx_page_info *page_info;
905 u16 rxq_idx, i, num_rcvd;
906
907 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
908 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
909
910 for (i = 0; i < num_rcvd; i++) {
911 page_info = get_rx_page_info(adapter, rxq_idx);
912 put_page(page_info->page);
913 memset(page_info, 0, sizeof(*page_info));
914 index_inc(&rxq_idx, rxq->len);
915 }
916}
917
918/*
919 * skb_fill_rx_data forms a complete skb for an ether frame
920 * indicated by rxcp.
921 */
922static void skb_fill_rx_data(struct be_adapter *adapter,
Sathya Perla89420422010-02-17 01:35:26 +0000923 struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
924 u16 num_rcvd)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700925{
926 struct be_queue_info *rxq = &adapter->rx_obj.q;
927 struct be_rx_page_info *page_info;
Sathya Perla89420422010-02-17 01:35:26 +0000928 u16 rxq_idx, i, j;
Ajit Khapardefa774062009-07-22 09:28:55 -0700929 u32 pktsize, hdr_len, curr_frag_len, size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700930 u8 *start;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000931 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700932
933 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
934 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +0000935 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700936
937 page_info = get_rx_page_info(adapter, rxq_idx);
938
939 start = page_address(page_info->page) + page_info->page_offset;
940 prefetch(start);
941
942 /* Copy data in the first descriptor of this completion */
943 curr_frag_len = min(pktsize, rx_frag_size);
944
945 /* Copy the header portion into skb_data */
946 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
947 memcpy(skb->data, start, hdr_len);
948 skb->len = curr_frag_len;
949 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
950 /* Complete packet has now been moved to data */
951 put_page(page_info->page);
952 skb->data_len = 0;
953 skb->tail += curr_frag_len;
954 } else {
955 skb_shinfo(skb)->nr_frags = 1;
956 skb_shinfo(skb)->frags[0].page = page_info->page;
957 skb_shinfo(skb)->frags[0].page_offset =
958 page_info->page_offset + hdr_len;
959 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
960 skb->data_len = curr_frag_len - hdr_len;
961 skb->tail += hdr_len;
962 }
Ajit Khaparde205859a2010-02-09 01:34:21 +0000963 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700964
965 if (pktsize <= rx_frag_size) {
966 BUG_ON(num_rcvd != 1);
Sathya Perla76fbb422009-06-10 02:21:56 +0000967 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700968 }
969
970 /* More frags present for this completion */
Ajit Khapardefa774062009-07-22 09:28:55 -0700971 size = pktsize;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000972 for (i = 1, j = 0; i < num_rcvd; i++) {
Ajit Khapardefa774062009-07-22 09:28:55 -0700973 size -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700974 index_inc(&rxq_idx, rxq->len);
975 page_info = get_rx_page_info(adapter, rxq_idx);
976
Ajit Khapardefa774062009-07-22 09:28:55 -0700977 curr_frag_len = min(size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700978
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000979 /* Coalesce all frags from the same physical page in one slot */
980 if (page_info->page_offset == 0) {
981 /* Fresh page */
982 j++;
983 skb_shinfo(skb)->frags[j].page = page_info->page;
984 skb_shinfo(skb)->frags[j].page_offset =
985 page_info->page_offset;
986 skb_shinfo(skb)->frags[j].size = 0;
987 skb_shinfo(skb)->nr_frags++;
988 } else {
989 put_page(page_info->page);
990 }
991
992 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700993 skb->len += curr_frag_len;
994 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700995
Ajit Khaparde205859a2010-02-09 01:34:21 +0000996 page_info->page = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700997 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000998 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700999
Sathya Perla76fbb422009-06-10 02:21:56 +00001000done:
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001001 be_rx_stats_update(adapter, pktsize, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001002}
1003
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001004/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001005static void be_rx_compl_process(struct be_adapter *adapter,
1006 struct be_eth_rx_compl *rxcp)
1007{
1008 struct sk_buff *skb;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001009 u32 vlanf, vid;
Sathya Perla89420422010-02-17 01:35:26 +00001010 u16 num_rcvd;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001011 u8 vtm;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001012
Sathya Perla89420422010-02-17 01:35:26 +00001013 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1014 /* Is it a flush compl that has no data */
1015 if (unlikely(num_rcvd == 0))
1016 return;
1017
Eric Dumazet89d71a62009-10-13 05:34:20 +00001018 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
Sathya Perlaa058a632010-02-17 01:34:22 +00001019 if (unlikely(!skb)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001020 if (net_ratelimit())
1021 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
1022 be_rx_compl_discard(adapter, rxcp);
1023 return;
1024 }
1025
Sathya Perla89420422010-02-17 01:35:26 +00001026 skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001027
Ajit Khaparde728a9972009-04-13 15:41:22 -07001028 if (do_pkt_csum(rxcp, adapter->rx_csum))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001029 skb->ip_summed = CHECKSUM_NONE;
Ajit Khaparde728a9972009-04-13 15:41:22 -07001030 else
1031 skb->ip_summed = CHECKSUM_UNNECESSARY;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001032
1033 skb->truesize = skb->len + sizeof(struct sk_buff);
1034 skb->protocol = eth_type_trans(skb, adapter->netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001035
Sathya Perlaa058a632010-02-17 01:34:22 +00001036 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1037 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
1038
1039 /* vlanf could be wrongly set in some cards.
1040 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001041 if ((adapter->function_mode & 0x400) && !vtm)
Sathya Perlaa058a632010-02-17 01:34:22 +00001042 vlanf = 0;
1043
1044 if (unlikely(vlanf)) {
Ajit Khaparde82903e42010-02-09 01:34:57 +00001045 if (!adapter->vlan_grp || adapter->vlans_added == 0) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001046 kfree_skb(skb);
1047 return;
1048 }
1049 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Ajit Khaparde9cae9e42010-03-31 02:00:32 +00001050 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001051 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1052 } else {
1053 netif_receive_skb(skb);
1054 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001055}
1056
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001057/* Process the RX completion indicated by rxcp when GRO is enabled */
1058static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001059 struct be_eth_rx_compl *rxcp)
1060{
1061 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001062 struct sk_buff *skb = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001063 struct be_queue_info *rxq = &adapter->rx_obj.q;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001064 struct be_eq_obj *eq_obj = &adapter->rx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001065 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001066 u16 i, rxq_idx = 0, vid, j;
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001067 u8 vtm;
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001068 u8 pkt_type;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001069
1070 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
Sathya Perla89420422010-02-17 01:35:26 +00001071 /* Is it a flush compl that has no data */
1072 if (unlikely(num_rcvd == 0))
1073 return;
1074
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001075 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1076 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1077 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001078 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001079 pkt_type = AMAP_GET_BITS(struct amap_eth_rx_compl, cast_enc, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001080
1081 /* vlanf could be wrongly set in some cards.
1082 * ignore if vtm is not set */
Ajit Khaparde3486be22010-07-23 02:04:54 +00001083 if ((adapter->function_mode & 0x400) && !vtm)
Ajit Khapardedcb9b562009-09-30 21:58:22 -07001084 vlanf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001085
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001086 skb = napi_get_frags(&eq_obj->napi);
1087 if (!skb) {
1088 be_rx_compl_discard(adapter, rxcp);
1089 return;
1090 }
1091
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001092 remaining = pkt_size;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001093 for (i = 0, j = -1; i < num_rcvd; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001094 page_info = get_rx_page_info(adapter, rxq_idx);
1095
1096 curr_frag_len = min(remaining, rx_frag_size);
1097
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001098 /* Coalesce all frags from the same physical page in one slot */
1099 if (i == 0 || page_info->page_offset == 0) {
1100 /* First frag or Fresh page */
1101 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001102 skb_shinfo(skb)->frags[j].page = page_info->page;
1103 skb_shinfo(skb)->frags[j].page_offset =
1104 page_info->page_offset;
1105 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001106 } else {
1107 put_page(page_info->page);
1108 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001109 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001110
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001111 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001112 index_inc(&rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001113 memset(page_info, 0, sizeof(*page_info));
1114 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +00001115 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001116
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001117 skb_shinfo(skb)->nr_frags = j + 1;
1118 skb->len = pkt_size;
1119 skb->data_len = pkt_size;
1120 skb->truesize += pkt_size;
1121 skb->ip_summed = CHECKSUM_UNNECESSARY;
1122
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001123 if (likely(!vlanf)) {
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001124 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001125 } else {
1126 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
Ajit Khaparde9cae9e42010-03-31 02:00:32 +00001127 vid = swab16(vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001128
Ajit Khaparde82903e42010-02-09 01:34:57 +00001129 if (!adapter->vlan_grp || adapter->vlans_added == 0)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001130 return;
1131
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001132 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001133 }
1134
Ajit Khaparde1ef78ab2010-09-03 06:17:10 +00001135 be_rx_stats_update(adapter, pkt_size, num_rcvd, pkt_type);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001136}
1137
1138static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
1139{
1140 struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
1141
1142 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
1143 return NULL;
1144
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001145 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001146 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
1147
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001148 queue_tail_inc(&adapter->rx_obj.cq);
1149 return rxcp;
1150}
1151
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001152/* To reset the valid bit, we need to reset the whole word as
1153 * when walking the queue the valid entries are little-endian
1154 * and invalid entries are host endian
1155 */
1156static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
1157{
1158 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
1159}
1160
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001161static inline struct page *be_alloc_pages(u32 size)
1162{
1163 gfp_t alloc_flags = GFP_ATOMIC;
1164 u32 order = get_order(size);
1165 if (order > 0)
1166 alloc_flags |= __GFP_COMP;
1167 return alloc_pages(alloc_flags, order);
1168}
1169
1170/*
1171 * Allocate a page, split it to fragments of size rx_frag_size and post as
1172 * receive buffers to BE
1173 */
1174static void be_post_rx_frags(struct be_adapter *adapter)
1175{
1176 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
Sathya Perla26d92f92010-01-21 22:52:08 -08001177 struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001178 struct be_queue_info *rxq = &adapter->rx_obj.q;
1179 struct page *pagep = NULL;
1180 struct be_eth_rx_d *rxd;
1181 u64 page_dmaaddr = 0, frag_dmaaddr;
1182 u32 posted, page_offset = 0;
1183
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001184 page_info = &page_info_tbl[rxq->head];
1185 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1186 if (!pagep) {
1187 pagep = be_alloc_pages(adapter->big_page_size);
1188 if (unlikely(!pagep)) {
1189 drvr_stats(adapter)->be_ethrx_post_fail++;
1190 break;
1191 }
1192 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
1193 adapter->big_page_size,
1194 PCI_DMA_FROMDEVICE);
1195 page_info->page_offset = 0;
1196 } else {
1197 get_page(pagep);
1198 page_info->page_offset = page_offset + rx_frag_size;
1199 }
1200 page_offset = page_info->page_offset;
1201 page_info->page = pagep;
FUJITA Tomonorifac6da52010-04-01 16:53:22 +00001202 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1204
1205 rxd = queue_head_node(rxq);
1206 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1207 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001208
1209 /* Any space left in the current big page for another frag? */
1210 if ((page_offset + rx_frag_size + rx_frag_size) >
1211 adapter->big_page_size) {
1212 pagep = NULL;
1213 page_info->last_page_user = true;
1214 }
Sathya Perla26d92f92010-01-21 22:52:08 -08001215
1216 prev_page_info = page_info;
1217 queue_head_inc(rxq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001218 page_info = &page_info_tbl[rxq->head];
1219 }
1220 if (pagep)
Sathya Perla26d92f92010-01-21 22:52:08 -08001221 prev_page_info->last_page_user = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001222
1223 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001224 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001225 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001226 } else if (atomic_read(&rxq->used) == 0) {
1227 /* Let be_worker replenish when memory is available */
1228 adapter->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001229 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001230}
1231
Sathya Perla5fb379e2009-06-18 00:02:59 +00001232static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001233{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001234 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1235
1236 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1237 return NULL;
1238
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001239 rmb();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001240 be_dws_le_to_cpu(txcp, sizeof(*txcp));
1241
1242 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1243
1244 queue_tail_inc(tx_cq);
1245 return txcp;
1246}
1247
1248static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1249{
1250 struct be_queue_info *txq = &adapter->tx_obj.q;
Alexander Duycka73b7962009-12-02 16:48:18 +00001251 struct be_eth_wrb *wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001252 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1253 struct sk_buff *sent_skb;
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001254 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1255 bool unmap_skb_hdr = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001257 sent_skb = sent_skbs[txq->tail];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001258 BUG_ON(!sent_skb);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001259 sent_skbs[txq->tail] = NULL;
1260
1261 /* skip header wrb */
Alexander Duycka73b7962009-12-02 16:48:18 +00001262 queue_tail_inc(txq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001263
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001264 do {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001265 cur_index = txq->tail;
Alexander Duycka73b7962009-12-02 16:48:18 +00001266 wrb = queue_tail_node(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001267 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
Eric Dumazete743d312010-04-14 15:59:40 -07001268 skb_headlen(sent_skb)));
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001269 unmap_skb_hdr = false;
1270
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001271 num_wrbs++;
1272 queue_tail_inc(txq);
Sathya Perlaec43b1a2010-03-22 20:41:34 +00001273 } while (cur_index != last_index);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001274
1275 atomic_sub(num_wrbs, &txq->used);
Alexander Duycka73b7962009-12-02 16:48:18 +00001276
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001277 kfree_skb(sent_skb);
1278}
1279
Sathya Perla859b1e42009-08-10 03:43:51 +00001280static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1281{
1282 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1283
1284 if (!eqe->evt)
1285 return NULL;
1286
Sathya Perlaf3eb62d2010-06-29 00:11:17 +00001287 rmb();
Sathya Perla859b1e42009-08-10 03:43:51 +00001288 eqe->evt = le32_to_cpu(eqe->evt);
1289 queue_tail_inc(&eq_obj->q);
1290 return eqe;
1291}
1292
1293static int event_handle(struct be_adapter *adapter,
1294 struct be_eq_obj *eq_obj)
1295{
1296 struct be_eq_entry *eqe;
1297 u16 num = 0;
1298
1299 while ((eqe = event_get(eq_obj)) != NULL) {
1300 eqe->evt = 0;
1301 num++;
1302 }
1303
1304 /* Deal with any spurious interrupts that come
1305 * without events
1306 */
1307 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1308 if (num)
1309 napi_schedule(&eq_obj->napi);
1310
1311 return num;
1312}
1313
1314/* Just read and notify events without processing them.
1315 * Used at the time of destroying event queues */
1316static void be_eq_clean(struct be_adapter *adapter,
1317 struct be_eq_obj *eq_obj)
1318{
1319 struct be_eq_entry *eqe;
1320 u16 num = 0;
1321
1322 while ((eqe = event_get(eq_obj)) != NULL) {
1323 eqe->evt = 0;
1324 num++;
1325 }
1326
1327 if (num)
1328 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1329}
1330
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001331static void be_rx_q_clean(struct be_adapter *adapter)
1332{
1333 struct be_rx_page_info *page_info;
1334 struct be_queue_info *rxq = &adapter->rx_obj.q;
1335 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1336 struct be_eth_rx_compl *rxcp;
1337 u16 tail;
1338
1339 /* First cleanup pending rx completions */
1340 while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
1341 be_rx_compl_discard(adapter, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001342 be_rx_compl_reset(rxcp);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001343 be_cq_notify(adapter, rx_cq->id, true, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001344 }
1345
1346 /* Then free posted rx buffer that were not used */
1347 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001348 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001349 page_info = get_rx_page_info(adapter, tail);
1350 put_page(page_info->page);
1351 memset(page_info, 0, sizeof(*page_info));
1352 }
1353 BUG_ON(atomic_read(&rxq->used));
1354}
1355
Sathya Perlaa8e91792009-08-10 03:42:43 +00001356static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001357{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001358 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001360 struct be_eth_tx_compl *txcp;
1361 u16 end_idx, cmpl = 0, timeo = 0;
Sathya Perlab03388d2010-02-18 00:37:17 +00001362 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1363 struct sk_buff *sent_skb;
1364 bool dummy_wrb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001365
Sathya Perlaa8e91792009-08-10 03:42:43 +00001366 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1367 do {
1368 while ((txcp = be_tx_compl_get(tx_cq))) {
1369 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1370 wrb_index, txcp);
1371 be_tx_compl_process(adapter, end_idx);
1372 cmpl++;
1373 }
1374 if (cmpl) {
1375 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1376 cmpl = 0;
1377 }
1378
1379 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1380 break;
1381
1382 mdelay(1);
1383 } while (true);
1384
1385 if (atomic_read(&txq->used))
1386 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1387 atomic_read(&txq->used));
Sathya Perlab03388d2010-02-18 00:37:17 +00001388
1389 /* free posted tx for which compls will never arrive */
1390 while (atomic_read(&txq->used)) {
1391 sent_skb = sent_skbs[txq->tail];
1392 end_idx = txq->tail;
1393 index_adv(&end_idx,
1394 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
1395 be_tx_compl_process(adapter, end_idx);
1396 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397}
1398
Sathya Perla5fb379e2009-06-18 00:02:59 +00001399static void be_mcc_queues_destroy(struct be_adapter *adapter)
1400{
1401 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001402
Sathya Perla8788fdc2009-07-27 22:52:03 +00001403 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001404 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001405 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001406 be_queue_free(adapter, q);
1407
Sathya Perla8788fdc2009-07-27 22:52:03 +00001408 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001409 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001410 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001411 be_queue_free(adapter, q);
1412}
1413
1414/* Must be called only after TX qs are created as MCC shares TX EQ */
1415static int be_mcc_queues_create(struct be_adapter *adapter)
1416{
1417 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001418
1419 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001420 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001421 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001422 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001423 goto err;
1424
1425 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001426 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001427 goto mcc_cq_free;
1428
1429 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001430 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001431 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1432 goto mcc_cq_destroy;
1433
1434 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001435 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001436 goto mcc_q_free;
1437
1438 return 0;
1439
1440mcc_q_free:
1441 be_queue_free(adapter, q);
1442mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001443 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001444mcc_cq_free:
1445 be_queue_free(adapter, cq);
1446err:
1447 return -1;
1448}
1449
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001450static void be_tx_queues_destroy(struct be_adapter *adapter)
1451{
1452 struct be_queue_info *q;
1453
1454 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001455 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001456 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457 be_queue_free(adapter, q);
1458
1459 q = &adapter->tx_obj.cq;
1460 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001461 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001462 be_queue_free(adapter, q);
1463
Sathya Perla859b1e42009-08-10 03:43:51 +00001464 /* Clear any residual events */
1465 be_eq_clean(adapter, &adapter->tx_eq);
1466
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001467 q = &adapter->tx_eq.q;
1468 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001469 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001470 be_queue_free(adapter, q);
1471}
1472
1473static int be_tx_queues_create(struct be_adapter *adapter)
1474{
1475 struct be_queue_info *eq, *q, *cq;
1476
1477 adapter->tx_eq.max_eqd = 0;
1478 adapter->tx_eq.min_eqd = 0;
1479 adapter->tx_eq.cur_eqd = 96;
1480 adapter->tx_eq.enable_aic = false;
1481 /* Alloc Tx Event queue */
1482 eq = &adapter->tx_eq.q;
1483 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1484 return -1;
1485
1486 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001487 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001488 goto tx_eq_free;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001489 adapter->base_eq_id = adapter->tx_eq.q.id;
1490
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001491 /* Alloc TX eth compl queue */
1492 cq = &adapter->tx_obj.cq;
1493 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1494 sizeof(struct be_eth_tx_compl)))
1495 goto tx_eq_destroy;
1496
1497 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001498 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001499 goto tx_cq_free;
1500
1501 /* Alloc TX eth queue */
1502 q = &adapter->tx_obj.q;
1503 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1504 goto tx_cq_destroy;
1505
1506 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001507 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001508 goto tx_q_free;
1509 return 0;
1510
1511tx_q_free:
1512 be_queue_free(adapter, q);
1513tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001514 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001515tx_cq_free:
1516 be_queue_free(adapter, cq);
1517tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001518 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001519tx_eq_free:
1520 be_queue_free(adapter, eq);
1521 return -1;
1522}
1523
1524static void be_rx_queues_destroy(struct be_adapter *adapter)
1525{
1526 struct be_queue_info *q;
1527
1528 q = &adapter->rx_obj.q;
1529 if (q->created) {
Sathya Perla8788fdc2009-07-27 22:52:03 +00001530 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
Sathya Perla89420422010-02-17 01:35:26 +00001531
1532 /* After the rxq is invalidated, wait for a grace time
1533 * of 1ms for all dma to end and the flush compl to arrive
1534 */
1535 mdelay(1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001536 be_rx_q_clean(adapter);
1537 }
1538 be_queue_free(adapter, q);
1539
1540 q = &adapter->rx_obj.cq;
1541 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001542 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001543 be_queue_free(adapter, q);
1544
Sathya Perla859b1e42009-08-10 03:43:51 +00001545 /* Clear any residual events */
1546 be_eq_clean(adapter, &adapter->rx_eq);
1547
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001548 q = &adapter->rx_eq.q;
1549 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001550 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001551 be_queue_free(adapter, q);
1552}
1553
1554static int be_rx_queues_create(struct be_adapter *adapter)
1555{
1556 struct be_queue_info *eq, *q, *cq;
1557 int rc;
1558
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001559 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1560 adapter->rx_eq.max_eqd = BE_MAX_EQD;
1561 adapter->rx_eq.min_eqd = 0;
1562 adapter->rx_eq.cur_eqd = 0;
1563 adapter->rx_eq.enable_aic = true;
1564
1565 /* Alloc Rx Event queue */
1566 eq = &adapter->rx_eq.q;
1567 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1568 sizeof(struct be_eq_entry));
1569 if (rc)
1570 return rc;
1571
1572 /* Ask BE to create Rx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001573 rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001574 if (rc)
1575 goto rx_eq_free;
1576
1577 /* Alloc RX eth compl queue */
1578 cq = &adapter->rx_obj.cq;
1579 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1580 sizeof(struct be_eth_rx_compl));
1581 if (rc)
1582 goto rx_eq_destroy;
1583
1584 /* Ask BE to create Rx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001585 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001586 if (rc)
1587 goto rx_cq_free;
1588
1589 /* Alloc RX eth queue */
1590 q = &adapter->rx_obj.q;
1591 rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1592 if (rc)
1593 goto rx_cq_destroy;
1594
1595 /* Ask BE to create Rx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001596 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001597 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1598 if (rc)
1599 goto rx_q_free;
1600
1601 return 0;
1602rx_q_free:
1603 be_queue_free(adapter, q);
1604rx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001605 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001606rx_cq_free:
1607 be_queue_free(adapter, cq);
1608rx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001609 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001610rx_eq_free:
1611 be_queue_free(adapter, eq);
1612 return rc;
1613}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001614
Sathya Perlab628bde2009-08-17 00:58:26 +00001615/* There are 8 evt ids per func. Retruns the evt id's bit number */
1616static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1617{
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001618 return eq_id - adapter->base_eq_id;
Sathya Perlab628bde2009-08-17 00:58:26 +00001619}
1620
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001621static irqreturn_t be_intx(int irq, void *dev)
1622{
1623 struct be_adapter *adapter = dev;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001624 int isr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001625
Sathya Perla8788fdc2009-07-27 22:52:03 +00001626 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
Sathya Perla55bdeed2010-02-02 07:48:40 -08001627 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
Sathya Perlac001c212009-07-01 01:06:07 +00001628 if (!isr)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001629 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001630
Sathya Perla8788fdc2009-07-27 22:52:03 +00001631 event_handle(adapter, &adapter->tx_eq);
1632 event_handle(adapter, &adapter->rx_eq);
Sathya Perlac001c212009-07-01 01:06:07 +00001633
Sathya Perla8788fdc2009-07-27 22:52:03 +00001634 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001635}
1636
1637static irqreturn_t be_msix_rx(int irq, void *dev)
1638{
1639 struct be_adapter *adapter = dev;
1640
Sathya Perla8788fdc2009-07-27 22:52:03 +00001641 event_handle(adapter, &adapter->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001642
1643 return IRQ_HANDLED;
1644}
1645
Sathya Perla5fb379e2009-06-18 00:02:59 +00001646static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001647{
1648 struct be_adapter *adapter = dev;
1649
Sathya Perla8788fdc2009-07-27 22:52:03 +00001650 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001651
1652 return IRQ_HANDLED;
1653}
1654
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001655static inline bool do_gro(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001656 struct be_eth_rx_compl *rxcp)
1657{
1658 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1659 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1660
1661 if (err)
1662 drvr_stats(adapter)->be_rxcp_err++;
1663
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001664 return (tcp_frame && !err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001665}
1666
1667int be_poll_rx(struct napi_struct *napi, int budget)
1668{
1669 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1670 struct be_adapter *adapter =
1671 container_of(rx_eq, struct be_adapter, rx_eq);
1672 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1673 struct be_eth_rx_compl *rxcp;
1674 u32 work_done;
1675
Ajit Khapardeb7b83ac2009-11-29 17:57:22 +00001676 adapter->stats.drvr_stats.be_rx_polls++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001677 for (work_done = 0; work_done < budget; work_done++) {
1678 rxcp = be_rx_compl_get(adapter);
1679 if (!rxcp)
1680 break;
1681
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001682 if (do_gro(adapter, rxcp))
1683 be_rx_compl_process_gro(adapter, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684 else
1685 be_rx_compl_process(adapter, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001686
1687 be_rx_compl_reset(rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001688 }
1689
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001690 /* Refill the queue */
1691 if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1692 be_post_rx_frags(adapter);
1693
1694 /* All consumed */
1695 if (work_done < budget) {
1696 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001697 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001698 } else {
1699 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001700 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701 }
1702 return work_done;
1703}
1704
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001705/* As TX and MCC share the same EQ check for both TX and MCC completions.
1706 * For TX/MCC we don't honour budget; consume everything
1707 */
1708static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709{
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001710 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1711 struct be_adapter *adapter =
1712 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001713 struct be_queue_info *txq = &adapter->tx_obj.q;
1714 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715 struct be_eth_tx_compl *txcp;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001716 int tx_compl = 0, mcc_compl, status = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001717 u16 end_idx;
1718
Sathya Perla5fb379e2009-06-18 00:02:59 +00001719 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001720 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001721 wrb_index, txcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001722 be_tx_compl_process(adapter, end_idx);
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001723 tx_compl++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001724 }
1725
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001726 mcc_compl = be_process_mcc(adapter, &status);
1727
1728 napi_complete(napi);
1729
1730 if (mcc_compl) {
1731 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1732 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
1733 }
1734
1735 if (tx_compl) {
1736 be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001737
1738 /* As Tx wrbs have been freed up, wake up netdev queue if
1739 * it was stopped due to lack of tx wrbs.
1740 */
1741 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001742 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001743 netif_wake_queue(adapter->netdev);
1744 }
1745
1746 drvr_stats(adapter)->be_tx_events++;
Sathya Perlaf31e50a2010-03-02 03:56:39 -08001747 drvr_stats(adapter)->be_tx_compl += tx_compl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001748 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001749
1750 return 1;
1751}
1752
Ajit Khaparde7c185272010-07-29 06:16:33 +00001753static inline bool be_detect_ue(struct be_adapter *adapter)
1754{
1755 u32 online0 = 0, online1 = 0;
1756
1757 pci_read_config_dword(adapter->pdev, PCICFG_ONLINE0, &online0);
1758
1759 pci_read_config_dword(adapter->pdev, PCICFG_ONLINE1, &online1);
1760
1761 if (!online0 || !online1) {
1762 adapter->ue_detected = true;
1763 dev_err(&adapter->pdev->dev,
1764 "UE Detected!! online0=%d online1=%d\n",
1765 online0, online1);
1766 return true;
1767 }
1768
1769 return false;
1770}
1771
1772void be_dump_ue(struct be_adapter *adapter)
1773{
1774 u32 ue_status_lo, ue_status_hi, ue_status_lo_mask, ue_status_hi_mask;
1775 u32 i;
1776
1777 pci_read_config_dword(adapter->pdev,
1778 PCICFG_UE_STATUS_LOW, &ue_status_lo);
1779 pci_read_config_dword(adapter->pdev,
1780 PCICFG_UE_STATUS_HIGH, &ue_status_hi);
1781 pci_read_config_dword(adapter->pdev,
1782 PCICFG_UE_STATUS_LOW_MASK, &ue_status_lo_mask);
1783 pci_read_config_dword(adapter->pdev,
1784 PCICFG_UE_STATUS_HI_MASK, &ue_status_hi_mask);
1785
1786 ue_status_lo = (ue_status_lo & (~ue_status_lo_mask));
1787 ue_status_hi = (ue_status_hi & (~ue_status_hi_mask));
1788
1789 if (ue_status_lo) {
1790 for (i = 0; ue_status_lo; ue_status_lo >>= 1, i++) {
1791 if (ue_status_lo & 1)
1792 dev_err(&adapter->pdev->dev,
1793 "UE: %s bit set\n", ue_status_low_desc[i]);
1794 }
1795 }
1796 if (ue_status_hi) {
1797 for (i = 0; ue_status_hi; ue_status_hi >>= 1, i++) {
1798 if (ue_status_hi & 1)
1799 dev_err(&adapter->pdev->dev,
1800 "UE: %s bit set\n", ue_status_hi_desc[i]);
1801 }
1802 }
1803
1804}
1805
Sathya Perlaea1dae12009-03-19 23:56:20 -07001806static void be_worker(struct work_struct *work)
1807{
1808 struct be_adapter *adapter =
1809 container_of(work, struct be_adapter, work.work);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001810
Ajit Khaparde0fc48c32010-07-29 06:18:58 +00001811 if (!adapter->stats_ioctl_sent)
1812 be_cmd_get_stats(adapter, &adapter->stats.cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001813
1814 /* Set EQ delay */
1815 be_rx_eqd_update(adapter);
1816
Sathya Perla4097f662009-03-24 16:40:13 -07001817 be_tx_rate_update(adapter);
1818 be_rx_rate_update(adapter);
1819
Sathya Perlaea1dae12009-03-19 23:56:20 -07001820 if (adapter->rx_post_starved) {
1821 adapter->rx_post_starved = false;
1822 be_post_rx_frags(adapter);
1823 }
Ajit Khaparde7c185272010-07-29 06:16:33 +00001824 if (!adapter->ue_detected) {
1825 if (be_detect_ue(adapter))
1826 be_dump_ue(adapter);
1827 }
Sathya Perlaea1dae12009-03-19 23:56:20 -07001828
1829 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1830}
1831
Sathya Perla8d56ff12009-11-22 22:02:26 +00001832static void be_msix_disable(struct be_adapter *adapter)
1833{
1834 if (adapter->msix_enabled) {
1835 pci_disable_msix(adapter->pdev);
1836 adapter->msix_enabled = false;
1837 }
1838}
1839
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001840static void be_msix_enable(struct be_adapter *adapter)
1841{
1842 int i, status;
1843
1844 for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1845 adapter->msix_entries[i].entry = i;
1846
1847 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1848 BE_NUM_MSIX_VECTORS);
1849 if (status == 0)
1850 adapter->msix_enabled = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001851}
1852
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001853static void be_sriov_enable(struct be_adapter *adapter)
1854{
Sarveshwar Bandi344dbf12010-07-09 01:43:55 +00001855 be_check_sriov_fn_type(adapter);
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001856#ifdef CONFIG_PCI_IOV
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001857 if (be_physfn(adapter) && num_vfs) {
Ajit Khaparde6dedec82010-07-29 06:15:32 +00001858 int status;
1859
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001860 status = pci_enable_sriov(adapter->pdev, num_vfs);
1861 adapter->sriov_enabled = status ? false : true;
1862 }
1863#endif
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001864}
1865
1866static void be_sriov_disable(struct be_adapter *adapter)
1867{
1868#ifdef CONFIG_PCI_IOV
1869 if (adapter->sriov_enabled) {
1870 pci_disable_sriov(adapter->pdev);
1871 adapter->sriov_enabled = false;
1872 }
1873#endif
1874}
1875
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001876static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1877{
Sathya Perlab628bde2009-08-17 00:58:26 +00001878 return adapter->msix_entries[
1879 be_evt_bit_get(adapter, eq_id)].vector;
1880}
1881
1882static int be_request_irq(struct be_adapter *adapter,
1883 struct be_eq_obj *eq_obj,
1884 void *handler, char *desc)
1885{
1886 struct net_device *netdev = adapter->netdev;
1887 int vec;
1888
1889 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1890 vec = be_msix_vec_get(adapter, eq_obj->q.id);
1891 return request_irq(vec, handler, 0, eq_obj->desc, adapter);
1892}
1893
1894static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
1895{
1896 int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1897 free_irq(vec, adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001898}
1899
1900static int be_msix_register(struct be_adapter *adapter)
1901{
Sathya Perlab628bde2009-08-17 00:58:26 +00001902 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001903
Sathya Perlab628bde2009-08-17 00:58:26 +00001904 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001905 if (status)
1906 goto err;
1907
Sathya Perlab628bde2009-08-17 00:58:26 +00001908 status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
1909 if (status)
1910 goto free_tx_irq;
1911
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001912 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001913
1914free_tx_irq:
1915 be_free_irq(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001916err:
1917 dev_warn(&adapter->pdev->dev,
1918 "MSIX Request IRQ failed - err %d\n", status);
1919 pci_disable_msix(adapter->pdev);
1920 adapter->msix_enabled = false;
1921 return status;
1922}
1923
1924static int be_irq_register(struct be_adapter *adapter)
1925{
1926 struct net_device *netdev = adapter->netdev;
1927 int status;
1928
1929 if (adapter->msix_enabled) {
1930 status = be_msix_register(adapter);
1931 if (status == 0)
1932 goto done;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00001933 /* INTx is not supported for VF */
1934 if (!be_physfn(adapter))
1935 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001936 }
1937
1938 /* INTx */
1939 netdev->irq = adapter->pdev->irq;
1940 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1941 adapter);
1942 if (status) {
1943 dev_err(&adapter->pdev->dev,
1944 "INTx request IRQ failed - err %d\n", status);
1945 return status;
1946 }
1947done:
1948 adapter->isr_registered = true;
1949 return 0;
1950}
1951
1952static void be_irq_unregister(struct be_adapter *adapter)
1953{
1954 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001955
1956 if (!adapter->isr_registered)
1957 return;
1958
1959 /* INTx */
1960 if (!adapter->msix_enabled) {
1961 free_irq(netdev->irq, adapter);
1962 goto done;
1963 }
1964
1965 /* MSIx */
Sathya Perlab628bde2009-08-17 00:58:26 +00001966 be_free_irq(adapter, &adapter->tx_eq);
1967 be_free_irq(adapter, &adapter->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001968done:
1969 adapter->isr_registered = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001970}
1971
Sathya Perla889cd4b2010-05-30 23:33:45 +00001972static int be_close(struct net_device *netdev)
1973{
1974 struct be_adapter *adapter = netdev_priv(netdev);
1975 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1976 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1977 int vec;
1978
1979 cancel_delayed_work_sync(&adapter->work);
1980
1981 be_async_mcc_disable(adapter);
1982
1983 netif_stop_queue(netdev);
1984 netif_carrier_off(netdev);
1985 adapter->link_up = false;
1986
1987 be_intr_set(adapter, false);
1988
1989 if (adapter->msix_enabled) {
1990 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1991 synchronize_irq(vec);
1992 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1993 synchronize_irq(vec);
1994 } else {
1995 synchronize_irq(netdev->irq);
1996 }
1997 be_irq_unregister(adapter);
1998
1999 napi_disable(&rx_eq->napi);
2000 napi_disable(&tx_eq->napi);
2001
2002 /* Wait for all pending tx completions to arrive so that
2003 * all tx skbs are freed.
2004 */
2005 be_tx_compl_clean(adapter);
2006
2007 return 0;
2008}
2009
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002010static int be_open(struct net_device *netdev)
2011{
2012 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013 struct be_eq_obj *rx_eq = &adapter->rx_eq;
2014 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002015 bool link_up;
2016 int status;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002017 u8 mac_speed;
2018 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002019
2020 /* First time posting */
2021 be_post_rx_frags(adapter);
2022
2023 napi_enable(&rx_eq->napi);
2024 napi_enable(&tx_eq->napi);
2025
2026 be_irq_register(adapter);
2027
Sathya Perla8788fdc2009-07-27 22:52:03 +00002028 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002029
2030 /* The evt queues are created in unarmed state; arm them */
Sathya Perla8788fdc2009-07-27 22:52:03 +00002031 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
2032 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002033
2034 /* Rx compl queue may be in unarmed state; rearm it */
Sathya Perla8788fdc2009-07-27 22:52:03 +00002035 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002036
Sathya Perla7a1e9b22010-02-17 01:35:11 +00002037 /* Now that interrupts are on we can process async mcc */
2038 be_async_mcc_enable(adapter);
2039
Sathya Perla889cd4b2010-05-30 23:33:45 +00002040 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
2041
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07002042 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
2043 &link_speed);
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002044 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002045 goto err;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00002046 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002047
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002048 if (be_physfn(adapter)) {
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002049 status = be_vid_config(adapter, false, 0);
Sathya Perla889cd4b2010-05-30 23:33:45 +00002050 if (status)
2051 goto err;
2052
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002053 status = be_cmd_set_flow_control(adapter,
2054 adapter->tx_fc, adapter->rx_fc);
2055 if (status)
Sathya Perla889cd4b2010-05-30 23:33:45 +00002056 goto err;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002057 }
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00002058
Sathya Perla889cd4b2010-05-30 23:33:45 +00002059 return 0;
2060err:
2061 be_close(adapter->netdev);
2062 return -EIO;
Sathya Perla5fb379e2009-06-18 00:02:59 +00002063}
2064
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002065static int be_setup_wol(struct be_adapter *adapter, bool enable)
2066{
2067 struct be_dma_mem cmd;
2068 int status = 0;
2069 u8 mac[ETH_ALEN];
2070
2071 memset(mac, 0, ETH_ALEN);
2072
2073 cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2074 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
2075 if (cmd.va == NULL)
2076 return -1;
2077 memset(cmd.va, 0, cmd.size);
2078
2079 if (enable) {
2080 status = pci_write_config_dword(adapter->pdev,
2081 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2082 if (status) {
2083 dev_err(&adapter->pdev->dev,
Frans Pop2381a552010-03-24 07:57:36 +00002084 "Could not enable Wake-on-lan\n");
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002085 pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
2086 cmd.dma);
2087 return status;
2088 }
2089 status = be_cmd_enable_magic_wol(adapter,
2090 adapter->netdev->dev_addr, &cmd);
2091 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2092 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2093 } else {
2094 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2095 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2096 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2097 }
2098
2099 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2100 return status;
2101}
2102
Sathya Perla5fb379e2009-06-18 00:02:59 +00002103static int be_setup(struct be_adapter *adapter)
2104{
Sathya Perla5fb379e2009-06-18 00:02:59 +00002105 struct net_device *netdev = adapter->netdev;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002106 u32 cap_flags, en_flags, vf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002107 int status;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002108 u8 mac[ETH_ALEN];
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002109
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002110 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
2111
2112 if (be_physfn(adapter)) {
2113 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
2114 BE_IF_FLAGS_PROMISCUOUS |
2115 BE_IF_FLAGS_PASS_L3L4_ERRORS;
2116 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
2117 }
Sathya Perla73d540f2009-10-14 20:20:42 +00002118
2119 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2120 netdev->dev_addr, false/* pmac_invalid */,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002121 &adapter->if_handle, &adapter->pmac_id, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002122 if (status != 0)
2123 goto do_none;
2124
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002125 if (be_physfn(adapter)) {
2126 while (vf < num_vfs) {
2127 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
2128 | BE_IF_FLAGS_BROADCAST;
2129 status = be_cmd_if_create(adapter, cap_flags, en_flags,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002130 mac, true,
2131 &adapter->vf_cfg[vf].vf_if_handle,
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002132 NULL, vf+1);
2133 if (status) {
2134 dev_err(&adapter->pdev->dev,
2135 "Interface Create failed for VF %d\n", vf);
2136 goto if_destroy;
2137 }
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002138 adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002139 vf++;
Sarveshwar Bandi84e5b9f2010-05-27 16:28:15 -07002140 }
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002141 } else if (!be_physfn(adapter)) {
2142 status = be_cmd_mac_addr_query(adapter, mac,
2143 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
2144 if (!status) {
2145 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2146 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2147 }
2148 }
2149
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002150 status = be_tx_queues_create(adapter);
2151 if (status != 0)
2152 goto if_destroy;
2153
2154 status = be_rx_queues_create(adapter);
2155 if (status != 0)
2156 goto tx_qs_destroy;
2157
Sathya Perla5fb379e2009-06-18 00:02:59 +00002158 status = be_mcc_queues_create(adapter);
2159 if (status != 0)
2160 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002161
Ajit Khaparde0dffc832009-11-29 17:57:46 +00002162 adapter->link_speed = -1;
2163
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002164 return 0;
2165
Sathya Perla5fb379e2009-06-18 00:02:59 +00002166rx_qs_destroy:
2167 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002168tx_qs_destroy:
2169 be_tx_queues_destroy(adapter);
2170if_destroy:
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002171 for (vf = 0; vf < num_vfs; vf++)
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002172 if (adapter->vf_cfg[vf].vf_if_handle)
2173 be_cmd_if_destroy(adapter,
2174 adapter->vf_cfg[vf].vf_if_handle);
Sathya Perla8788fdc2009-07-27 22:52:03 +00002175 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002176do_none:
2177 return status;
2178}
2179
Sathya Perla5fb379e2009-06-18 00:02:59 +00002180static int be_clear(struct be_adapter *adapter)
2181{
Sathya Perla1a8887d2009-08-17 00:58:41 +00002182 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002183 be_rx_queues_destroy(adapter);
2184 be_tx_queues_destroy(adapter);
2185
Sathya Perla8788fdc2009-07-27 22:52:03 +00002186 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002187
Sathya Perla2243e2e2009-11-22 22:02:03 +00002188 /* tell fw we're done with firing cmds */
2189 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002190 return 0;
2191}
2192
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002193
Ajit Khaparde84517482009-09-04 03:12:16 +00002194#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
2195char flash_cookie[2][16] = {"*** SE FLAS",
2196 "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002197
2198static bool be_flash_redboot(struct be_adapter *adapter,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002199 const u8 *p, u32 img_start, int image_size,
2200 int hdr_size)
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002201{
2202 u32 crc_offset;
2203 u8 flashed_crc[4];
2204 int status;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002205
2206 crc_offset = hdr_size + img_start + image_size - 4;
2207
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002208 p += crc_offset;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002209
2210 status = be_cmd_get_flash_crc(adapter, flashed_crc,
Ajit Khapardef510fc62010-03-31 01:47:45 +00002211 (image_size - 4));
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002212 if (status) {
2213 dev_err(&adapter->pdev->dev,
2214 "could not get crc from flash, not flashing redboot\n");
2215 return false;
2216 }
2217
2218 /*update redboot only if crc does not match*/
2219 if (!memcmp(flashed_crc, p, 4))
2220 return false;
2221 else
2222 return true;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08002223}
2224
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002225static int be_flash_data(struct be_adapter *adapter,
Ajit Khaparde84517482009-09-04 03:12:16 +00002226 const struct firmware *fw,
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002227 struct be_dma_mem *flash_cmd, int num_of_images)
2228
Ajit Khaparde84517482009-09-04 03:12:16 +00002229{
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002230 int status = 0, i, filehdr_size = 0;
2231 u32 total_bytes = 0, flash_op;
Ajit Khaparde84517482009-09-04 03:12:16 +00002232 int num_bytes;
2233 const u8 *p = fw->data;
2234 struct be_cmd_write_flashrom *req = flash_cmd->va;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002235 struct flash_comp *pflashcomp;
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002236 int num_comp;
Ajit Khaparde84517482009-09-04 03:12:16 +00002237
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002238 struct flash_comp gen3_flash_types[9] = {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002239 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2240 FLASH_IMAGE_MAX_SIZE_g3},
2241 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2242 FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2243 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2244 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2245 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2246 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2247 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2248 FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2249 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2250 FLASH_IMAGE_MAX_SIZE_g3},
2251 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2252 FLASH_IMAGE_MAX_SIZE_g3},
2253 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002254 FLASH_IMAGE_MAX_SIZE_g3},
2255 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2256 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002257 };
2258 struct flash_comp gen2_flash_types[8] = {
2259 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2260 FLASH_IMAGE_MAX_SIZE_g2},
2261 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2262 FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2263 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2264 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2265 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2266 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2267 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2268 FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2269 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2270 FLASH_IMAGE_MAX_SIZE_g2},
2271 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2272 FLASH_IMAGE_MAX_SIZE_g2},
2273 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2274 FLASH_IMAGE_MAX_SIZE_g2}
2275 };
2276
2277 if (adapter->generation == BE_GEN3) {
2278 pflashcomp = gen3_flash_types;
2279 filehdr_size = sizeof(struct flash_file_hdr_g3);
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002280 num_comp = 9;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002281 } else {
2282 pflashcomp = gen2_flash_types;
2283 filehdr_size = sizeof(struct flash_file_hdr_g2);
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002284 num_comp = 8;
Ajit Khaparde84517482009-09-04 03:12:16 +00002285 }
Sarveshwar Bandi9fe96932010-03-02 22:37:28 +00002286 for (i = 0; i < num_comp; i++) {
2287 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2288 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2289 continue;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002290 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2291 (!be_flash_redboot(adapter, fw->data,
2292 pflashcomp[i].offset, pflashcomp[i].size,
2293 filehdr_size)))
2294 continue;
2295 p = fw->data;
2296 p += filehdr_size + pflashcomp[i].offset
2297 + (num_of_images * sizeof(struct image_hdr));
2298 if (p + pflashcomp[i].size > fw->data + fw->size)
Ajit Khaparde84517482009-09-04 03:12:16 +00002299 return -1;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002300 total_bytes = pflashcomp[i].size;
2301 while (total_bytes) {
2302 if (total_bytes > 32*1024)
2303 num_bytes = 32*1024;
2304 else
2305 num_bytes = total_bytes;
2306 total_bytes -= num_bytes;
Ajit Khaparde84517482009-09-04 03:12:16 +00002307
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002308 if (!total_bytes)
2309 flash_op = FLASHROM_OPER_FLASH;
2310 else
2311 flash_op = FLASHROM_OPER_SAVE;
2312 memcpy(req->params.data_buf, p, num_bytes);
2313 p += num_bytes;
2314 status = be_cmd_write_flashrom(adapter, flash_cmd,
2315 pflashcomp[i].optype, flash_op, num_bytes);
2316 if (status) {
2317 dev_err(&adapter->pdev->dev,
2318 "cmd to write to flash rom failed.\n");
2319 return -1;
2320 }
2321 yield();
Ajit Khaparde84517482009-09-04 03:12:16 +00002322 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002323 }
Ajit Khaparde84517482009-09-04 03:12:16 +00002324 return 0;
2325}
2326
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002327static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2328{
2329 if (fhdr == NULL)
2330 return 0;
2331 if (fhdr->build[0] == '3')
2332 return BE_GEN3;
2333 else if (fhdr->build[0] == '2')
2334 return BE_GEN2;
2335 else
2336 return 0;
2337}
2338
Ajit Khaparde84517482009-09-04 03:12:16 +00002339int be_load_fw(struct be_adapter *adapter, u8 *func)
2340{
2341 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
2342 const struct firmware *fw;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002343 struct flash_file_hdr_g2 *fhdr;
2344 struct flash_file_hdr_g3 *fhdr3;
2345 struct image_hdr *img_hdr_ptr = NULL;
Ajit Khaparde84517482009-09-04 03:12:16 +00002346 struct be_dma_mem flash_cmd;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002347 int status, i = 0, num_imgs = 0;
Ajit Khaparde84517482009-09-04 03:12:16 +00002348 const u8 *p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002349
Ajit Khaparde84517482009-09-04 03:12:16 +00002350 strcpy(fw_file, func);
2351
2352 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
2353 if (status)
2354 goto fw_exit;
2355
2356 p = fw->data;
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002357 fhdr = (struct flash_file_hdr_g2 *) p;
Ajit Khaparde84517482009-09-04 03:12:16 +00002358 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
2359
Ajit Khaparde84517482009-09-04 03:12:16 +00002360 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2361 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
2362 &flash_cmd.dma);
2363 if (!flash_cmd.va) {
2364 status = -ENOMEM;
2365 dev_err(&adapter->pdev->dev,
2366 "Memory allocation failure while flashing\n");
2367 goto fw_exit;
2368 }
2369
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002370 if ((adapter->generation == BE_GEN3) &&
2371 (get_ufigen_type(fhdr) == BE_GEN3)) {
2372 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002373 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2374 for (i = 0; i < num_imgs; i++) {
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002375 img_hdr_ptr = (struct image_hdr *) (fw->data +
2376 (sizeof(struct flash_file_hdr_g3) +
Ajit Khaparde8b93b712010-03-31 01:57:10 +00002377 i * sizeof(struct image_hdr)));
2378 if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2379 status = be_flash_data(adapter, fw, &flash_cmd,
2380 num_imgs);
Ajit Khaparde3f0d4562010-02-09 01:30:35 +00002381 }
2382 } else if ((adapter->generation == BE_GEN2) &&
2383 (get_ufigen_type(fhdr) == BE_GEN2)) {
2384 status = be_flash_data(adapter, fw, &flash_cmd, 0);
2385 } else {
2386 dev_err(&adapter->pdev->dev,
2387 "UFI and Interface are not compatible for flashing\n");
2388 status = -1;
Ajit Khaparde84517482009-09-04 03:12:16 +00002389 }
2390
2391 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
2392 flash_cmd.dma);
2393 if (status) {
2394 dev_err(&adapter->pdev->dev, "Firmware load error\n");
2395 goto fw_exit;
2396 }
2397
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02002398 dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
Ajit Khaparde84517482009-09-04 03:12:16 +00002399
2400fw_exit:
2401 release_firmware(fw);
2402 return status;
2403}
2404
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002405static struct net_device_ops be_netdev_ops = {
2406 .ndo_open = be_open,
2407 .ndo_stop = be_close,
2408 .ndo_start_xmit = be_xmit,
2409 .ndo_get_stats = be_get_stats,
2410 .ndo_set_rx_mode = be_set_multicast_list,
2411 .ndo_set_mac_address = be_mac_addr_set,
2412 .ndo_change_mtu = be_change_mtu,
2413 .ndo_validate_addr = eth_validate_addr,
2414 .ndo_vlan_rx_register = be_vlan_register,
2415 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2416 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002417 .ndo_set_vf_mac = be_set_vf_mac,
Ajit Khaparde1da87b72010-07-23 01:51:22 +00002418 .ndo_set_vf_vlan = be_set_vf_vlan,
Ajit Khapardee1d18732010-07-23 01:52:13 +00002419 .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
Ajit Khaparde64600ea2010-07-23 01:50:34 +00002420 .ndo_get_vf_config = be_get_vf_config
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002421};
2422
2423static void be_netdev_init(struct net_device *netdev)
2424{
2425 struct be_adapter *adapter = netdev_priv(netdev);
2426
2427 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
Ajit Khaparde583e3f32009-10-05 02:22:19 +00002428 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
Ajit Khaparde49e4b842010-06-14 04:56:07 +00002429 NETIF_F_GRO | NETIF_F_TSO6;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002430
Ajit Khaparde51c59872009-11-29 17:54:54 +00002431 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
2432
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002433 netdev->flags |= IFF_MULTICAST;
2434
Ajit Khaparde728a9972009-04-13 15:41:22 -07002435 adapter->rx_csum = true;
2436
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002437 /* Default settings for Rx and Tx flow control */
2438 adapter->rx_fc = true;
2439 adapter->tx_fc = true;
2440
Ajit Khapardec190e3c2009-09-04 03:12:29 +00002441 netif_set_gso_max_size(netdev, 65535);
2442
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002443 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
2444
2445 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
2446
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002447 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
2448 BE_NAPI_WEIGHT);
Sathya Perla5fb379e2009-06-18 00:02:59 +00002449 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002450 BE_NAPI_WEIGHT);
2451
2452 netif_carrier_off(netdev);
2453 netif_stop_queue(netdev);
2454}
2455
2456static void be_unmap_pci_bars(struct be_adapter *adapter)
2457{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002458 if (adapter->csr)
2459 iounmap(adapter->csr);
2460 if (adapter->db)
2461 iounmap(adapter->db);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002462 if (adapter->pcicfg && be_physfn(adapter))
Sathya Perla8788fdc2009-07-27 22:52:03 +00002463 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002464}
2465
2466static int be_map_pci_bars(struct be_adapter *adapter)
2467{
2468 u8 __iomem *addr;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002469 int pcicfg_reg, db_reg;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002470
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002471 if (be_physfn(adapter)) {
2472 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2473 pci_resource_len(adapter->pdev, 2));
2474 if (addr == NULL)
2475 return -ENOMEM;
2476 adapter->csr = addr;
2477 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002478
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002479 if (adapter->generation == BE_GEN2) {
2480 pcicfg_reg = 1;
2481 db_reg = 4;
2482 } else {
2483 pcicfg_reg = 0;
2484 if (be_physfn(adapter))
2485 db_reg = 4;
2486 else
2487 db_reg = 0;
2488 }
2489 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2490 pci_resource_len(adapter->pdev, db_reg));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002491 if (addr == NULL)
2492 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002493 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002494
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002495 if (be_physfn(adapter)) {
2496 addr = ioremap_nocache(
2497 pci_resource_start(adapter->pdev, pcicfg_reg),
2498 pci_resource_len(adapter->pdev, pcicfg_reg));
2499 if (addr == NULL)
2500 goto pci_map_err;
2501 adapter->pcicfg = addr;
2502 } else
2503 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002504
2505 return 0;
2506pci_map_err:
2507 be_unmap_pci_bars(adapter);
2508 return -ENOMEM;
2509}
2510
2511
2512static void be_ctrl_cleanup(struct be_adapter *adapter)
2513{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002514 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002515
2516 be_unmap_pci_bars(adapter);
2517
2518 if (mem->va)
2519 pci_free_consistent(adapter->pdev, mem->size,
2520 mem->va, mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002521
2522 mem = &adapter->mc_cmd_mem;
2523 if (mem->va)
2524 pci_free_consistent(adapter->pdev, mem->size,
2525 mem->va, mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002526}
2527
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002528static int be_ctrl_init(struct be_adapter *adapter)
2529{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002530 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2531 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002532 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002533 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002534
2535 status = be_map_pci_bars(adapter);
2536 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002537 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002538
2539 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2540 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2541 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2542 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002543 status = -ENOMEM;
2544 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002545 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002546
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002547 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2548 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2549 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2550 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002551
2552 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2553 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2554 &mc_cmd_mem->dma);
2555 if (mc_cmd_mem->va == NULL) {
2556 status = -ENOMEM;
2557 goto free_mbox;
2558 }
2559 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2560
Sathya Perla8788fdc2009-07-27 22:52:03 +00002561 spin_lock_init(&adapter->mbox_lock);
2562 spin_lock_init(&adapter->mcc_lock);
2563 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002564
Sarveshwar Bandidd131e72010-05-25 16:16:32 -07002565 init_completion(&adapter->flash_compl);
Sathya Perlacf588472010-02-14 21:22:01 +00002566 pci_save_state(adapter->pdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002567 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002568
2569free_mbox:
2570 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2571 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2572
2573unmap_pci_bars:
2574 be_unmap_pci_bars(adapter);
2575
2576done:
2577 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002578}
2579
2580static void be_stats_cleanup(struct be_adapter *adapter)
2581{
2582 struct be_stats_obj *stats = &adapter->stats;
2583 struct be_dma_mem *cmd = &stats->cmd;
2584
2585 if (cmd->va)
2586 pci_free_consistent(adapter->pdev, cmd->size,
2587 cmd->va, cmd->dma);
2588}
2589
2590static int be_stats_init(struct be_adapter *adapter)
2591{
2592 struct be_stats_obj *stats = &adapter->stats;
2593 struct be_dma_mem *cmd = &stats->cmd;
2594
2595 cmd->size = sizeof(struct be_cmd_req_get_stats);
2596 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2597 if (cmd->va == NULL)
2598 return -1;
David S. Millerd291b9a2010-01-28 21:36:21 -08002599 memset(cmd->va, 0, cmd->size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002600 return 0;
2601}
2602
2603static void __devexit be_remove(struct pci_dev *pdev)
2604{
2605 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002606
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002607 if (!adapter)
2608 return;
2609
2610 unregister_netdev(adapter->netdev);
2611
Sathya Perla5fb379e2009-06-18 00:02:59 +00002612 be_clear(adapter);
2613
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002614 be_stats_cleanup(adapter);
2615
2616 be_ctrl_cleanup(adapter);
2617
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002618 be_sriov_disable(adapter);
2619
Sathya Perla8d56ff12009-11-22 22:02:26 +00002620 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002621
2622 pci_set_drvdata(pdev, NULL);
2623 pci_release_regions(pdev);
2624 pci_disable_device(pdev);
2625
2626 free_netdev(adapter->netdev);
2627}
2628
Sathya Perla2243e2e2009-11-22 22:02:03 +00002629static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002630{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002631 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002632 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002633
Sathya Perla8788fdc2009-07-27 22:52:03 +00002634 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002635 if (status)
2636 return status;
2637
Ajit Khapardedcb9b562009-09-30 21:58:22 -07002638 status = be_cmd_query_fw_cfg(adapter,
Ajit Khaparde3486be22010-07-23 02:04:54 +00002639 &adapter->port_num, &adapter->function_mode);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002640 if (status)
2641 return status;
2642
2643 memset(mac, 0, ETH_ALEN);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002644
2645 if (be_physfn(adapter)) {
2646 status = be_cmd_mac_addr_query(adapter, mac,
Sathya Perla2243e2e2009-11-22 22:02:03 +00002647 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002648
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002649 if (status)
2650 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002651
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002652 if (!is_valid_ether_addr(mac))
2653 return -EADDRNOTAVAIL;
2654
2655 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2656 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2657 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002658
Ajit Khaparde3486be22010-07-23 02:04:54 +00002659 if (adapter->function_mode & 0x400)
Ajit Khaparde82903e42010-02-09 01:34:57 +00002660 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
2661 else
2662 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2663
Sathya Perla2243e2e2009-11-22 22:02:03 +00002664 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002665}
2666
2667static int __devinit be_probe(struct pci_dev *pdev,
2668 const struct pci_device_id *pdev_id)
2669{
2670 int status = 0;
2671 struct be_adapter *adapter;
2672 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002673
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002674
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002675 status = pci_enable_device(pdev);
2676 if (status)
2677 goto do_none;
2678
2679 status = pci_request_regions(pdev, DRV_NAME);
2680 if (status)
2681 goto disable_dev;
2682 pci_set_master(pdev);
2683
2684 netdev = alloc_etherdev(sizeof(struct be_adapter));
2685 if (netdev == NULL) {
2686 status = -ENOMEM;
2687 goto rel_reg;
2688 }
2689 adapter = netdev_priv(netdev);
Ajit Khaparde7b139c82010-01-27 21:56:44 +00002690
2691 switch (pdev->device) {
2692 case BE_DEVICE_ID1:
2693 case OC_DEVICE_ID1:
2694 adapter->generation = BE_GEN2;
2695 break;
2696 case BE_DEVICE_ID2:
2697 case OC_DEVICE_ID2:
2698 adapter->generation = BE_GEN3;
2699 break;
2700 default:
2701 adapter->generation = 0;
2702 }
2703
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002704 adapter->pdev = pdev;
2705 pci_set_drvdata(pdev, adapter);
2706 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002707 be_netdev_init(netdev);
2708 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002709
2710 be_msix_enable(adapter);
2711
Yang Hongyange9304382009-04-13 14:40:14 -07002712 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002713 if (!status) {
2714 netdev->features |= NETIF_F_HIGHDMA;
2715 } else {
Yang Hongyange9304382009-04-13 14:40:14 -07002716 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002717 if (status) {
2718 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2719 goto free_netdev;
2720 }
2721 }
2722
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002723 be_sriov_enable(adapter);
2724
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002725 status = be_ctrl_init(adapter);
2726 if (status)
2727 goto free_netdev;
2728
Sathya Perla2243e2e2009-11-22 22:02:03 +00002729 /* sync up with fw's ready state */
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002730 if (be_physfn(adapter)) {
2731 status = be_cmd_POST(adapter);
2732 if (status)
2733 goto ctrl_clean;
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002734 }
Sathya Perla2243e2e2009-11-22 22:02:03 +00002735
2736 /* tell fw we're ready to fire cmds */
2737 status = be_cmd_fw_init(adapter);
2738 if (status)
2739 goto ctrl_clean;
2740
Sarveshwar Bandi556ae192010-05-24 18:38:25 -07002741 if (be_physfn(adapter)) {
2742 status = be_cmd_reset_function(adapter);
2743 if (status)
2744 goto ctrl_clean;
2745 }
2746
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002747 status = be_stats_init(adapter);
2748 if (status)
2749 goto ctrl_clean;
2750
Sathya Perla2243e2e2009-11-22 22:02:03 +00002751 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002752 if (status)
2753 goto stats_clean;
2754
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002755 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002756
Sathya Perla5fb379e2009-06-18 00:02:59 +00002757 status = be_setup(adapter);
2758 if (status)
2759 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002760
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002761 status = register_netdev(netdev);
2762 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00002763 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002764
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002765 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002766 return 0;
2767
Sathya Perla5fb379e2009-06-18 00:02:59 +00002768unsetup:
2769 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002770stats_clean:
2771 be_stats_cleanup(adapter);
2772ctrl_clean:
2773 be_ctrl_cleanup(adapter);
2774free_netdev:
Sathya Perla8d56ff12009-11-22 22:02:26 +00002775 be_msix_disable(adapter);
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002776 be_sriov_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002777 free_netdev(adapter->netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002778 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002779rel_reg:
2780 pci_release_regions(pdev);
2781disable_dev:
2782 pci_disable_device(pdev);
2783do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002784 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002785 return status;
2786}
2787
2788static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2789{
2790 struct be_adapter *adapter = pci_get_drvdata(pdev);
2791 struct net_device *netdev = adapter->netdev;
2792
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002793 if (adapter->wol)
2794 be_setup_wol(adapter, true);
2795
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002796 netif_device_detach(netdev);
2797 if (netif_running(netdev)) {
2798 rtnl_lock();
2799 be_close(netdev);
2800 rtnl_unlock();
2801 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002802 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00002803 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002804
2805 pci_save_state(pdev);
2806 pci_disable_device(pdev);
2807 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2808 return 0;
2809}
2810
2811static int be_resume(struct pci_dev *pdev)
2812{
2813 int status = 0;
2814 struct be_adapter *adapter = pci_get_drvdata(pdev);
2815 struct net_device *netdev = adapter->netdev;
2816
2817 netif_device_detach(netdev);
2818
2819 status = pci_enable_device(pdev);
2820 if (status)
2821 return status;
2822
2823 pci_set_power_state(pdev, 0);
2824 pci_restore_state(pdev);
2825
Sathya Perla2243e2e2009-11-22 22:02:03 +00002826 /* tell fw we're ready to fire cmds */
2827 status = be_cmd_fw_init(adapter);
2828 if (status)
2829 return status;
2830
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00002831 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002832 if (netif_running(netdev)) {
2833 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002834 be_open(netdev);
2835 rtnl_unlock();
2836 }
2837 netif_device_attach(netdev);
Ajit Khaparde71d8d1b2009-12-03 06:16:59 +00002838
2839 if (adapter->wol)
2840 be_setup_wol(adapter, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002841 return 0;
2842}
2843
Sathya Perla82456b02010-02-17 01:35:37 +00002844/*
2845 * An FLR will stop BE from DMAing any data.
2846 */
2847static void be_shutdown(struct pci_dev *pdev)
2848{
2849 struct be_adapter *adapter = pci_get_drvdata(pdev);
2850 struct net_device *netdev = adapter->netdev;
2851
2852 netif_device_detach(netdev);
2853
2854 be_cmd_reset_function(adapter);
2855
2856 if (adapter->wol)
2857 be_setup_wol(adapter, true);
2858
2859 pci_disable_device(pdev);
Sathya Perla82456b02010-02-17 01:35:37 +00002860}
2861
Sathya Perlacf588472010-02-14 21:22:01 +00002862static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
2863 pci_channel_state_t state)
2864{
2865 struct be_adapter *adapter = pci_get_drvdata(pdev);
2866 struct net_device *netdev = adapter->netdev;
2867
2868 dev_err(&adapter->pdev->dev, "EEH error detected\n");
2869
2870 adapter->eeh_err = true;
2871
2872 netif_device_detach(netdev);
2873
2874 if (netif_running(netdev)) {
2875 rtnl_lock();
2876 be_close(netdev);
2877 rtnl_unlock();
2878 }
2879 be_clear(adapter);
2880
2881 if (state == pci_channel_io_perm_failure)
2882 return PCI_ERS_RESULT_DISCONNECT;
2883
2884 pci_disable_device(pdev);
2885
2886 return PCI_ERS_RESULT_NEED_RESET;
2887}
2888
2889static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
2890{
2891 struct be_adapter *adapter = pci_get_drvdata(pdev);
2892 int status;
2893
2894 dev_info(&adapter->pdev->dev, "EEH reset\n");
2895 adapter->eeh_err = false;
2896
2897 status = pci_enable_device(pdev);
2898 if (status)
2899 return PCI_ERS_RESULT_DISCONNECT;
2900
2901 pci_set_master(pdev);
2902 pci_set_power_state(pdev, 0);
2903 pci_restore_state(pdev);
2904
2905 /* Check if card is ok and fw is ready */
2906 status = be_cmd_POST(adapter);
2907 if (status)
2908 return PCI_ERS_RESULT_DISCONNECT;
2909
2910 return PCI_ERS_RESULT_RECOVERED;
2911}
2912
2913static void be_eeh_resume(struct pci_dev *pdev)
2914{
2915 int status = 0;
2916 struct be_adapter *adapter = pci_get_drvdata(pdev);
2917 struct net_device *netdev = adapter->netdev;
2918
2919 dev_info(&adapter->pdev->dev, "EEH resume\n");
2920
2921 pci_save_state(pdev);
2922
2923 /* tell fw we're ready to fire cmds */
2924 status = be_cmd_fw_init(adapter);
2925 if (status)
2926 goto err;
2927
2928 status = be_setup(adapter);
2929 if (status)
2930 goto err;
2931
2932 if (netif_running(netdev)) {
2933 status = be_open(netdev);
2934 if (status)
2935 goto err;
2936 }
2937 netif_device_attach(netdev);
2938 return;
2939err:
2940 dev_err(&adapter->pdev->dev, "EEH resume failed\n");
Sathya Perlacf588472010-02-14 21:22:01 +00002941}
2942
2943static struct pci_error_handlers be_eeh_handlers = {
2944 .error_detected = be_eeh_err_detected,
2945 .slot_reset = be_eeh_reset,
2946 .resume = be_eeh_resume,
2947};
2948
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002949static struct pci_driver be_driver = {
2950 .name = DRV_NAME,
2951 .id_table = be_dev_ids,
2952 .probe = be_probe,
2953 .remove = be_remove,
2954 .suspend = be_suspend,
Sathya Perlacf588472010-02-14 21:22:01 +00002955 .resume = be_resume,
Sathya Perla82456b02010-02-17 01:35:37 +00002956 .shutdown = be_shutdown,
Sathya Perlacf588472010-02-14 21:22:01 +00002957 .err_handler = &be_eeh_handlers
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002958};
2959
2960static int __init be_init_module(void)
2961{
Joe Perches8e95a202009-12-03 07:58:21 +00002962 if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
2963 rx_frag_size != 2048) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002964 printk(KERN_WARNING DRV_NAME
2965 " : Module param rx_frag_size must be 2048/4096/8192."
2966 " Using 2048\n");
2967 rx_frag_size = 2048;
2968 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002969
Sarveshwar Bandiba343c72010-03-31 02:56:12 +00002970 if (num_vfs > 32) {
2971 printk(KERN_WARNING DRV_NAME
2972 " : Module param num_vfs must not be greater than 32."
2973 "Using 32\n");
2974 num_vfs = 32;
2975 }
2976
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002977 return pci_register_driver(&be_driver);
2978}
2979module_init(be_init_module);
2980
2981static void __exit be_exit_module(void)
2982{
2983 pci_unregister_driver(&be_driver);
2984}
2985module_exit(be_exit_module);