blob: 10262fab686bdd762df118583b3ea7d7f70ed9dd [file] [log] [blame]
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001/*
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@serverengines.com
12 *
13 * ServerEngines
14 * 209 N. Fair Oaks Ave
15 * Sunnyvale, CA 94085
16 */
17
18#include "be.h"
Sathya Perla8788fdc2009-07-27 22:52:03 +000019#include "be_cmds.h"
Stephen Hemminger65f71b82009-03-27 00:25:24 -070020#include <asm/div64.h>
Sathya Perla6b7c5b92009-03-11 23:32:03 -070021
22MODULE_VERSION(DRV_VER);
23MODULE_DEVICE_TABLE(pci, be_dev_ids);
24MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
25MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL");
27
28static unsigned int rx_frag_size = 2048;
29module_param(rx_frag_size, uint, S_IRUGO);
30MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
31
Sathya Perla6b7c5b92009-03-11 23:32:03 -070032static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
Ajit Khapardec4ca2372009-05-18 15:38:55 -070033 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070034 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
Ajit Khapardec4ca2372009-05-18 15:38:55 -070035 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
36 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
Ajit Khaparde59fd5d82009-10-29 01:11:06 -070037 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
Sathya Perla6b7c5b92009-03-11 23:32:03 -070038 { 0 }
39};
40MODULE_DEVICE_TABLE(pci, be_dev_ids);
41
42static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
43{
44 struct be_dma_mem *mem = &q->dma_mem;
45 if (mem->va)
46 pci_free_consistent(adapter->pdev, mem->size,
47 mem->va, mem->dma);
48}
49
50static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
51 u16 len, u16 entry_size)
52{
53 struct be_dma_mem *mem = &q->dma_mem;
54
55 memset(q, 0, sizeof(*q));
56 q->len = len;
57 q->entry_size = entry_size;
58 mem->size = len * entry_size;
59 mem->va = pci_alloc_consistent(adapter->pdev, mem->size, &mem->dma);
60 if (!mem->va)
61 return -1;
62 memset(mem->va, 0, mem->size);
63 return 0;
64}
65
Sathya Perla8788fdc2009-07-27 22:52:03 +000066static void be_intr_set(struct be_adapter *adapter, bool enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -070067{
Sathya Perla8788fdc2009-07-27 22:52:03 +000068 u8 __iomem *addr = adapter->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
Sathya Perla6b7c5b92009-03-11 23:32:03 -070069 u32 reg = ioread32(addr);
70 u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +000071
72 if (!enabled && enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -070073 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +000074 else if (enabled && !enable)
Sathya Perla6b7c5b92009-03-11 23:32:03 -070075 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
Sathya Perla5f0b8492009-07-27 22:52:56 +000076 else
Sathya Perla6b7c5b92009-03-11 23:32:03 -070077 return;
Sathya Perla5f0b8492009-07-27 22:52:56 +000078
Sathya Perla6b7c5b92009-03-11 23:32:03 -070079 iowrite32(reg, addr);
80}
81
Sathya Perla8788fdc2009-07-27 22:52:03 +000082static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -070083{
84 u32 val = 0;
85 val |= qid & DB_RQ_RING_ID_MASK;
86 val |= posted << DB_RQ_NUM_POSTED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +000087 iowrite32(val, adapter->db + DB_RQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070088}
89
Sathya Perla8788fdc2009-07-27 22:52:03 +000090static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
Sathya Perla6b7c5b92009-03-11 23:32:03 -070091{
92 u32 val = 0;
93 val |= qid & DB_TXULP_RING_ID_MASK;
94 val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +000095 iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -070096}
97
Sathya Perla8788fdc2009-07-27 22:52:03 +000098static void be_eq_notify(struct be_adapter *adapter, u16 qid,
Sathya Perla6b7c5b92009-03-11 23:32:03 -070099 bool arm, bool clear_int, u16 num_popped)
100{
101 u32 val = 0;
102 val |= qid & DB_EQ_RING_ID_MASK;
103 if (arm)
104 val |= 1 << DB_EQ_REARM_SHIFT;
105 if (clear_int)
106 val |= 1 << DB_EQ_CLR_SHIFT;
107 val |= 1 << DB_EQ_EVNT_SHIFT;
108 val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000109 iowrite32(val, adapter->db + DB_EQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700110}
111
Sathya Perla8788fdc2009-07-27 22:52:03 +0000112void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700113{
114 u32 val = 0;
115 val |= qid & DB_CQ_RING_ID_MASK;
116 if (arm)
117 val |= 1 << DB_CQ_REARM_SHIFT;
118 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000119 iowrite32(val, adapter->db + DB_CQ_OFFSET);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700120}
121
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700122static int be_mac_addr_set(struct net_device *netdev, void *p)
123{
124 struct be_adapter *adapter = netdev_priv(netdev);
125 struct sockaddr *addr = p;
126 int status = 0;
127
Ajit Khapardeca9e4982009-11-29 17:56:26 +0000128 if (!is_valid_ether_addr(addr->sa_data))
129 return -EADDRNOTAVAIL;
130
Sathya Perlaa65027e2009-08-17 00:58:04 +0000131 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
132 if (status)
133 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700134
Sathya Perlaa65027e2009-08-17 00:58:04 +0000135 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
136 adapter->if_handle, &adapter->pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700137 if (!status)
138 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
139
140 return status;
141}
142
Sathya Perlab31c50a2009-09-17 10:30:13 -0700143void netdev_stats_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700144{
145 struct be_hw_stats *hw_stats = hw_stats_from_cmd(adapter->stats.cmd.va);
146 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
147 struct be_port_rxf_stats *port_stats =
148 &rxf_stats->port[adapter->port_num];
Ajit Khaparde78122a52009-10-07 03:11:20 -0700149 struct net_device_stats *dev_stats = &adapter->netdev->stats;
Sathya Perla68110862009-06-10 02:21:16 +0000150 struct be_erx_stats *erx_stats = &hw_stats->erx;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700151
152 dev_stats->rx_packets = port_stats->rx_total_frames;
153 dev_stats->tx_packets = port_stats->tx_unicastframes +
154 port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
155 dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
156 (u64) port_stats->rx_bytes_lsd;
157 dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
158 (u64) port_stats->tx_bytes_lsd;
159
160 /* bad pkts received */
161 dev_stats->rx_errors = port_stats->rx_crc_errors +
162 port_stats->rx_alignment_symbol_errors +
163 port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000164 port_stats->rx_out_range_errors +
165 port_stats->rx_frame_too_long +
166 port_stats->rx_dropped_too_small +
167 port_stats->rx_dropped_too_short +
168 port_stats->rx_dropped_header_too_small +
169 port_stats->rx_dropped_tcp_length +
170 port_stats->rx_dropped_runt +
171 port_stats->rx_tcp_checksum_errs +
172 port_stats->rx_ip_checksum_errs +
173 port_stats->rx_udp_checksum_errs;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700174
Sathya Perla68110862009-06-10 02:21:16 +0000175 /* no space in linux buffers: best possible approximation */
Sathya Perla01ed30d2009-11-22 22:01:31 +0000176 dev_stats->rx_dropped =
177 erx_stats->rx_drops_no_fragments[adapter->rx_obj.q.id];
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700178
179 /* detailed rx errors */
180 dev_stats->rx_length_errors = port_stats->rx_in_range_errors +
Sathya Perla68110862009-06-10 02:21:16 +0000181 port_stats->rx_out_range_errors +
182 port_stats->rx_frame_too_long;
183
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700184 /* receive ring buffer overflow */
185 dev_stats->rx_over_errors = 0;
Sathya Perla68110862009-06-10 02:21:16 +0000186
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700187 dev_stats->rx_crc_errors = port_stats->rx_crc_errors;
188
189 /* frame alignment errors */
190 dev_stats->rx_frame_errors = port_stats->rx_alignment_symbol_errors;
Sathya Perla68110862009-06-10 02:21:16 +0000191
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700192 /* receiver fifo overrun */
193 /* drops_no_pbuf is no per i/f, it's per BE card */
194 dev_stats->rx_fifo_errors = port_stats->rx_fifo_overflow +
195 port_stats->rx_input_fifo_overflow +
196 rxf_stats->rx_drops_no_pbuf;
197 /* receiver missed packetd */
198 dev_stats->rx_missed_errors = 0;
Sathya Perla68110862009-06-10 02:21:16 +0000199
200 /* packet transmit problems */
201 dev_stats->tx_errors = 0;
202
203 /* no space available in linux */
204 dev_stats->tx_dropped = 0;
205
Ajit Khapardec5b9b922009-10-05 02:21:51 +0000206 dev_stats->multicast = port_stats->rx_multicast_frames;
Sathya Perla68110862009-06-10 02:21:16 +0000207 dev_stats->collisions = 0;
208
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700209 /* detailed tx_errors */
210 dev_stats->tx_aborted_errors = 0;
211 dev_stats->tx_carrier_errors = 0;
212 dev_stats->tx_fifo_errors = 0;
213 dev_stats->tx_heartbeat_errors = 0;
214 dev_stats->tx_window_errors = 0;
215}
216
Sathya Perla8788fdc2009-07-27 22:52:03 +0000217void be_link_status_update(struct be_adapter *adapter, bool link_up)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700218{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700219 struct net_device *netdev = adapter->netdev;
220
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700221 /* If link came up or went down */
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000222 if (adapter->link_up != link_up) {
223 if (link_up) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700224 netif_start_queue(netdev);
225 netif_carrier_on(netdev);
226 printk(KERN_INFO "%s: Link up\n", netdev->name);
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000227 } else {
228 netif_stop_queue(netdev);
229 netif_carrier_off(netdev);
230 printk(KERN_INFO "%s: Link down\n", netdev->name);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700231 }
Sathya Perlaa8f447b2009-06-18 00:10:27 +0000232 adapter->link_up = link_up;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700233 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700234}
235
236/* Update the EQ delay n BE based on the RX frags consumed / sec */
237static void be_rx_eqd_update(struct be_adapter *adapter)
238{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700239 struct be_eq_obj *rx_eq = &adapter->rx_eq;
240 struct be_drvr_stats *stats = &adapter->stats.drvr_stats;
Sathya Perla4097f662009-03-24 16:40:13 -0700241 ulong now = jiffies;
242 u32 eqd;
243
244 if (!rx_eq->enable_aic)
245 return;
246
247 /* Wrapped around */
248 if (time_before(now, stats->rx_fps_jiffies)) {
249 stats->rx_fps_jiffies = now;
250 return;
251 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700252
253 /* Update once a second */
Sathya Perla4097f662009-03-24 16:40:13 -0700254 if ((now - stats->rx_fps_jiffies) < HZ)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700255 return;
256
257 stats->be_rx_fps = (stats->be_rx_frags - stats->be_prev_rx_frags) /
Sathya Perla4097f662009-03-24 16:40:13 -0700258 ((now - stats->rx_fps_jiffies) / HZ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700259
Sathya Perla4097f662009-03-24 16:40:13 -0700260 stats->rx_fps_jiffies = now;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700261 stats->be_prev_rx_frags = stats->be_rx_frags;
262 eqd = stats->be_rx_fps / 110000;
263 eqd = eqd << 3;
264 if (eqd > rx_eq->max_eqd)
265 eqd = rx_eq->max_eqd;
266 if (eqd < rx_eq->min_eqd)
267 eqd = rx_eq->min_eqd;
268 if (eqd < 10)
269 eqd = 0;
270 if (eqd != rx_eq->cur_eqd)
Sathya Perla8788fdc2009-07-27 22:52:03 +0000271 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700272
273 rx_eq->cur_eqd = eqd;
274}
275
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700276static struct net_device_stats *be_get_stats(struct net_device *dev)
277{
Ajit Khaparde78122a52009-10-07 03:11:20 -0700278 return &dev->stats;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700279}
280
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700281static u32 be_calc_rate(u64 bytes, unsigned long ticks)
282{
283 u64 rate = bytes;
284
285 do_div(rate, ticks / HZ);
286 rate <<= 3; /* bytes/sec -> bits/sec */
287 do_div(rate, 1000000ul); /* MB/Sec */
288
289 return rate;
290}
291
Sathya Perla4097f662009-03-24 16:40:13 -0700292static void be_tx_rate_update(struct be_adapter *adapter)
293{
294 struct be_drvr_stats *stats = drvr_stats(adapter);
295 ulong now = jiffies;
296
297 /* Wrapped around? */
298 if (time_before(now, stats->be_tx_jiffies)) {
299 stats->be_tx_jiffies = now;
300 return;
301 }
302
303 /* Update tx rate once in two seconds */
304 if ((now - stats->be_tx_jiffies) > 2 * HZ) {
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700305 stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
306 - stats->be_tx_bytes_prev,
307 now - stats->be_tx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700308 stats->be_tx_jiffies = now;
309 stats->be_tx_bytes_prev = stats->be_tx_bytes;
310 }
311}
312
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700313static void be_tx_stats_update(struct be_adapter *adapter,
314 u32 wrb_cnt, u32 copied, bool stopped)
315{
Sathya Perla4097f662009-03-24 16:40:13 -0700316 struct be_drvr_stats *stats = drvr_stats(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700317 stats->be_tx_reqs++;
318 stats->be_tx_wrbs += wrb_cnt;
319 stats->be_tx_bytes += copied;
320 if (stopped)
321 stats->be_tx_stops++;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700322}
323
324/* Determine number of WRB entries needed to xmit data in an skb */
325static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
326{
David S. Millerebc8d2a2009-06-09 01:01:31 -0700327 int cnt = (skb->len > skb->data_len);
328
329 cnt += skb_shinfo(skb)->nr_frags;
330
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700331 /* to account for hdr wrb */
332 cnt++;
333 if (cnt & 1) {
334 /* add a dummy to make it an even num */
335 cnt++;
336 *dummy = true;
337 } else
338 *dummy = false;
339 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
340 return cnt;
341}
342
343static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
344{
345 wrb->frag_pa_hi = upper_32_bits(addr);
346 wrb->frag_pa_lo = addr & 0xFFFFFFFF;
347 wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
348}
349
350static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
351 bool vlan, u32 wrb_cnt, u32 len)
352{
353 memset(hdr, 0, sizeof(*hdr));
354
355 AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
356
357 if (skb_shinfo(skb)->gso_segs > 1 && skb_shinfo(skb)->gso_size) {
358 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
359 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
360 hdr, skb_shinfo(skb)->gso_size);
361 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
362 if (is_tcp_pkt(skb))
363 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
364 else if (is_udp_pkt(skb))
365 AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
366 }
367
368 if (vlan && vlan_tx_tag_present(skb)) {
369 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
370 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag,
371 hdr, vlan_tx_tag_get(skb));
372 }
373
374 AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
375 AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
376 AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
377 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
378}
379
380
381static int make_tx_wrbs(struct be_adapter *adapter,
382 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
383{
384 u64 busaddr;
385 u32 i, copied = 0;
386 struct pci_dev *pdev = adapter->pdev;
387 struct sk_buff *first_skb = skb;
388 struct be_queue_info *txq = &adapter->tx_obj.q;
389 struct be_eth_wrb *wrb;
390 struct be_eth_hdr_wrb *hdr;
391
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700392 hdr = queue_head_node(txq);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000393 atomic_add(wrb_cnt, &txq->used);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700394 queue_head_inc(txq);
395
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000396 if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
397 dev_err(&pdev->dev, "TX DMA mapping failed\n");
398 return 0;
399 }
400
David S. Millerebc8d2a2009-06-09 01:01:31 -0700401 if (skb->len > skb->data_len) {
402 int len = skb->len - skb->data_len;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700403 wrb = queue_head_node(txq);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000404 busaddr = skb_shinfo(skb)->dma_head;
David S. Millerebc8d2a2009-06-09 01:01:31 -0700405 wrb_fill(wrb, busaddr, len);
406 be_dws_cpu_to_le(wrb, sizeof(*wrb));
407 queue_head_inc(txq);
408 copied += len;
409 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700410
David S. Millerebc8d2a2009-06-09 01:01:31 -0700411 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
412 struct skb_frag_struct *frag =
413 &skb_shinfo(skb)->frags[i];
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000414
415 busaddr = skb_shinfo(skb)->dma_maps[i];
David S. Millerebc8d2a2009-06-09 01:01:31 -0700416 wrb = queue_head_node(txq);
417 wrb_fill(wrb, busaddr, frag->size);
418 be_dws_cpu_to_le(wrb, sizeof(*wrb));
419 queue_head_inc(txq);
420 copied += frag->size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700421 }
422
423 if (dummy_wrb) {
424 wrb = queue_head_node(txq);
425 wrb_fill(wrb, 0, 0);
426 be_dws_cpu_to_le(wrb, sizeof(*wrb));
427 queue_head_inc(txq);
428 }
429
430 wrb_fill_hdr(hdr, first_skb, adapter->vlan_grp ? true : false,
431 wrb_cnt, copied);
432 be_dws_cpu_to_le(hdr, sizeof(*hdr));
433
434 return copied;
435}
436
Stephen Hemminger613573252009-08-31 19:50:58 +0000437static netdev_tx_t be_xmit(struct sk_buff *skb,
Sathya Perlab31c50a2009-09-17 10:30:13 -0700438 struct net_device *netdev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700439{
440 struct be_adapter *adapter = netdev_priv(netdev);
441 struct be_tx_obj *tx_obj = &adapter->tx_obj;
442 struct be_queue_info *txq = &tx_obj->q;
443 u32 wrb_cnt = 0, copied = 0;
444 u32 start = txq->head;
445 bool dummy_wrb, stopped = false;
446
447 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
448
449 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000450 if (copied) {
451 /* record the sent skb in the sent_skb table */
452 BUG_ON(tx_obj->sent_skb_list[start]);
453 tx_obj->sent_skb_list[start] = skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700454
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000455 /* Ensure txq has space for the next skb; Else stop the queue
456 * *BEFORE* ringing the tx doorbell, so that we serialze the
457 * tx compls of the current transmit which'll wake up the queue
458 */
459 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
460 txq->len) {
461 netif_stop_queue(netdev);
462 stopped = true;
463 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700464
Ajit Khapardec190e3c2009-09-04 03:12:29 +0000465 be_txq_notify(adapter, txq->id, wrb_cnt);
466
467 be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
468 } else {
469 txq->head = start;
470 dev_kfree_skb_any(skb);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700471 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700472 return NETDEV_TX_OK;
473}
474
475static int be_change_mtu(struct net_device *netdev, int new_mtu)
476{
477 struct be_adapter *adapter = netdev_priv(netdev);
478 if (new_mtu < BE_MIN_MTU ||
479 new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
480 dev_info(&adapter->pdev->dev,
481 "MTU must be between %d and %d bytes\n",
482 BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
483 return -EINVAL;
484 }
485 dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
486 netdev->mtu, new_mtu);
487 netdev->mtu = new_mtu;
488 return 0;
489}
490
491/*
492 * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
493 * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
494 * set the BE in promiscuous VLAN mode.
495 */
Sathya Perlab31c50a2009-09-17 10:30:13 -0700496static int be_vid_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700497{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700498 u16 vtag[BE_NUM_VLANS_SUPPORTED];
499 u16 ntags = 0, i;
Sathya Perlab31c50a2009-09-17 10:30:13 -0700500 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700501
502 if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
503 /* Construct VLAN Table to give to HW */
504 for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
505 if (adapter->vlan_tag[i]) {
506 vtag[ntags] = cpu_to_le16(i);
507 ntags++;
508 }
509 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700510 status = be_cmd_vlan_config(adapter, adapter->if_handle,
511 vtag, ntags, 1, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700512 } else {
Sathya Perlab31c50a2009-09-17 10:30:13 -0700513 status = be_cmd_vlan_config(adapter, adapter->if_handle,
514 NULL, 0, 1, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700515 }
Sathya Perlab31c50a2009-09-17 10:30:13 -0700516 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700517}
518
519static void be_vlan_register(struct net_device *netdev, struct vlan_group *grp)
520{
521 struct be_adapter *adapter = netdev_priv(netdev);
522 struct be_eq_obj *rx_eq = &adapter->rx_eq;
523 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700524
Sathya Perla8788fdc2009-07-27 22:52:03 +0000525 be_eq_notify(adapter, rx_eq->q.id, false, false, 0);
526 be_eq_notify(adapter, tx_eq->q.id, false, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700527 adapter->vlan_grp = grp;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000528 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
529 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700530}
531
532static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
533{
534 struct be_adapter *adapter = netdev_priv(netdev);
535
536 adapter->num_vlans++;
537 adapter->vlan_tag[vid] = 1;
538
Sathya Perlab31c50a2009-09-17 10:30:13 -0700539 be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700540}
541
542static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
543{
544 struct be_adapter *adapter = netdev_priv(netdev);
545
546 adapter->num_vlans--;
547 adapter->vlan_tag[vid] = 0;
548
549 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
Sathya Perlab31c50a2009-09-17 10:30:13 -0700550 be_vid_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700551}
552
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700553static void be_set_multicast_list(struct net_device *netdev)
554{
555 struct be_adapter *adapter = netdev_priv(netdev);
556
557 if (netdev->flags & IFF_PROMISC) {
Sathya Perla8788fdc2009-07-27 22:52:03 +0000558 be_cmd_promiscuous_config(adapter, adapter->port_num, 1);
Sathya Perla24307ee2009-06-18 00:09:25 +0000559 adapter->promiscuous = true;
560 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700561 }
Sathya Perla24307ee2009-06-18 00:09:25 +0000562
563 /* BE was previously in promiscous mode; disable it */
564 if (adapter->promiscuous) {
565 adapter->promiscuous = false;
Sathya Perla8788fdc2009-07-27 22:52:03 +0000566 be_cmd_promiscuous_config(adapter, adapter->port_num, 0);
Sathya Perla24307ee2009-06-18 00:09:25 +0000567 }
568
Sathya Perlae7b909a2009-11-22 22:01:10 +0000569 /* Enable multicast promisc if num configured exceeds what we support */
570 if (netdev->flags & IFF_ALLMULTI || netdev->mc_count > BE_MAX_MC) {
571 be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0,
572 &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000573 goto done;
574 }
575
Sathya Perla8788fdc2009-07-27 22:52:03 +0000576 be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
Sathya Perlae7b909a2009-11-22 22:01:10 +0000577 netdev->mc_count, &adapter->mc_cmd_mem);
Sathya Perla24307ee2009-06-18 00:09:25 +0000578done:
579 return;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700580}
581
Sathya Perla4097f662009-03-24 16:40:13 -0700582static void be_rx_rate_update(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700583{
Sathya Perla4097f662009-03-24 16:40:13 -0700584 struct be_drvr_stats *stats = drvr_stats(adapter);
585 ulong now = jiffies;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700586
Sathya Perla4097f662009-03-24 16:40:13 -0700587 /* Wrapped around */
588 if (time_before(now, stats->be_rx_jiffies)) {
589 stats->be_rx_jiffies = now;
590 return;
591 }
592
593 /* Update the rate once in two seconds */
594 if ((now - stats->be_rx_jiffies) < 2 * HZ)
595 return;
596
Stephen Hemminger65f71b82009-03-27 00:25:24 -0700597 stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
598 - stats->be_rx_bytes_prev,
599 now - stats->be_rx_jiffies);
Sathya Perla4097f662009-03-24 16:40:13 -0700600 stats->be_rx_jiffies = now;
601 stats->be_rx_bytes_prev = stats->be_rx_bytes;
602}
603
604static void be_rx_stats_update(struct be_adapter *adapter,
605 u32 pktsize, u16 numfrags)
606{
607 struct be_drvr_stats *stats = drvr_stats(adapter);
608
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700609 stats->be_rx_compl++;
610 stats->be_rx_frags += numfrags;
611 stats->be_rx_bytes += pktsize;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700612}
613
Ajit Khaparde728a9972009-04-13 15:41:22 -0700614static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
615{
616 u8 l4_cksm, ip_version, ipcksm, tcpf = 0, udpf = 0, ipv6_chk;
617
618 l4_cksm = AMAP_GET_BITS(struct amap_eth_rx_compl, l4_cksm, rxcp);
619 ipcksm = AMAP_GET_BITS(struct amap_eth_rx_compl, ipcksm, rxcp);
620 ip_version = AMAP_GET_BITS(struct amap_eth_rx_compl, ip_version, rxcp);
621 if (ip_version) {
622 tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
623 udpf = AMAP_GET_BITS(struct amap_eth_rx_compl, udpf, rxcp);
624 }
625 ipv6_chk = (ip_version && (tcpf || udpf));
626
627 return ((l4_cksm && ipv6_chk && ipcksm) && cso) ? false : true;
628}
629
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700630static struct be_rx_page_info *
631get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
632{
633 struct be_rx_page_info *rx_page_info;
634 struct be_queue_info *rxq = &adapter->rx_obj.q;
635
636 rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
637 BUG_ON(!rx_page_info->page);
638
639 if (rx_page_info->last_page_user)
640 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
641 adapter->big_page_size, PCI_DMA_FROMDEVICE);
642
643 atomic_dec(&rxq->used);
644 return rx_page_info;
645}
646
647/* Throwaway the data in the Rx completion */
648static void be_rx_compl_discard(struct be_adapter *adapter,
649 struct be_eth_rx_compl *rxcp)
650{
651 struct be_queue_info *rxq = &adapter->rx_obj.q;
652 struct be_rx_page_info *page_info;
653 u16 rxq_idx, i, num_rcvd;
654
655 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
656 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
657
658 for (i = 0; i < num_rcvd; i++) {
659 page_info = get_rx_page_info(adapter, rxq_idx);
660 put_page(page_info->page);
661 memset(page_info, 0, sizeof(*page_info));
662 index_inc(&rxq_idx, rxq->len);
663 }
664}
665
666/*
667 * skb_fill_rx_data forms a complete skb for an ether frame
668 * indicated by rxcp.
669 */
670static void skb_fill_rx_data(struct be_adapter *adapter,
671 struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
672{
673 struct be_queue_info *rxq = &adapter->rx_obj.q;
674 struct be_rx_page_info *page_info;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000675 u16 rxq_idx, i, num_rcvd, j;
Ajit Khapardefa774062009-07-22 09:28:55 -0700676 u32 pktsize, hdr_len, curr_frag_len, size;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700677 u8 *start;
678
679 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
680 pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
681 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
682
683 page_info = get_rx_page_info(adapter, rxq_idx);
684
685 start = page_address(page_info->page) + page_info->page_offset;
686 prefetch(start);
687
688 /* Copy data in the first descriptor of this completion */
689 curr_frag_len = min(pktsize, rx_frag_size);
690
691 /* Copy the header portion into skb_data */
692 hdr_len = min((u32)BE_HDR_LEN, curr_frag_len);
693 memcpy(skb->data, start, hdr_len);
694 skb->len = curr_frag_len;
695 if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
696 /* Complete packet has now been moved to data */
697 put_page(page_info->page);
698 skb->data_len = 0;
699 skb->tail += curr_frag_len;
700 } else {
701 skb_shinfo(skb)->nr_frags = 1;
702 skb_shinfo(skb)->frags[0].page = page_info->page;
703 skb_shinfo(skb)->frags[0].page_offset =
704 page_info->page_offset + hdr_len;
705 skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len;
706 skb->data_len = curr_frag_len - hdr_len;
707 skb->tail += hdr_len;
708 }
709 memset(page_info, 0, sizeof(*page_info));
710
711 if (pktsize <= rx_frag_size) {
712 BUG_ON(num_rcvd != 1);
Sathya Perla76fbb422009-06-10 02:21:56 +0000713 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700714 }
715
716 /* More frags present for this completion */
Ajit Khapardefa774062009-07-22 09:28:55 -0700717 size = pktsize;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000718 for (i = 1, j = 0; i < num_rcvd; i++) {
Ajit Khapardefa774062009-07-22 09:28:55 -0700719 size -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700720 index_inc(&rxq_idx, rxq->len);
721 page_info = get_rx_page_info(adapter, rxq_idx);
722
Ajit Khapardefa774062009-07-22 09:28:55 -0700723 curr_frag_len = min(size, rx_frag_size);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700724
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000725 /* Coalesce all frags from the same physical page in one slot */
726 if (page_info->page_offset == 0) {
727 /* Fresh page */
728 j++;
729 skb_shinfo(skb)->frags[j].page = page_info->page;
730 skb_shinfo(skb)->frags[j].page_offset =
731 page_info->page_offset;
732 skb_shinfo(skb)->frags[j].size = 0;
733 skb_shinfo(skb)->nr_frags++;
734 } else {
735 put_page(page_info->page);
736 }
737
738 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700739 skb->len += curr_frag_len;
740 skb->data_len += curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700741
742 memset(page_info, 0, sizeof(*page_info));
743 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000744 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700745
Sathya Perla76fbb422009-06-10 02:21:56 +0000746done:
Sathya Perla4097f662009-03-24 16:40:13 -0700747 be_rx_stats_update(adapter, pktsize, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700748 return;
749}
750
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700751/* Process the RX completion indicated by rxcp when GRO is disabled */
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700752static void be_rx_compl_process(struct be_adapter *adapter,
753 struct be_eth_rx_compl *rxcp)
754{
755 struct sk_buff *skb;
Ajit Khapardedcb9b562009-09-30 21:58:22 -0700756 u32 vlanf, vid;
757 u8 vtm;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700758
Ajit Khapardedcb9b562009-09-30 21:58:22 -0700759 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
760 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
761
762 /* vlanf could be wrongly set in some cards.
763 * ignore if vtm is not set */
764 if ((adapter->cap == 0x400) && !vtm)
765 vlanf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700766
Eric Dumazet89d71a62009-10-13 05:34:20 +0000767 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700768 if (!skb) {
769 if (net_ratelimit())
770 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
771 be_rx_compl_discard(adapter, rxcp);
772 return;
773 }
774
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700775 skb_fill_rx_data(adapter, skb, rxcp);
776
Ajit Khaparde728a9972009-04-13 15:41:22 -0700777 if (do_pkt_csum(rxcp, adapter->rx_csum))
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700778 skb->ip_summed = CHECKSUM_NONE;
Ajit Khaparde728a9972009-04-13 15:41:22 -0700779 else
780 skb->ip_summed = CHECKSUM_UNNECESSARY;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700781
782 skb->truesize = skb->len + sizeof(struct sk_buff);
783 skb->protocol = eth_type_trans(skb, adapter->netdev);
784 skb->dev = adapter->netdev;
785
Ajit Khapardedcb9b562009-09-30 21:58:22 -0700786 if (vlanf) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700787 if (!adapter->vlan_grp || adapter->num_vlans == 0) {
788 kfree_skb(skb);
789 return;
790 }
791 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
792 vid = be16_to_cpu(vid);
793 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
794 } else {
795 netif_receive_skb(skb);
796 }
797
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700798 return;
799}
800
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700801/* Process the RX completion indicated by rxcp when GRO is enabled */
802static void be_rx_compl_process_gro(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700803 struct be_eth_rx_compl *rxcp)
804{
805 struct be_rx_page_info *page_info;
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700806 struct sk_buff *skb = NULL;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700807 struct be_queue_info *rxq = &adapter->rx_obj.q;
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700808 struct be_eq_obj *eq_obj = &adapter->rx_eq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700809 u32 num_rcvd, pkt_size, remaining, vlanf, curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000810 u16 i, rxq_idx = 0, vid, j;
Ajit Khapardedcb9b562009-09-30 21:58:22 -0700811 u8 vtm;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700812
813 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
814 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
815 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
816 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
Ajit Khapardedcb9b562009-09-30 21:58:22 -0700817 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
818
819 /* vlanf could be wrongly set in some cards.
820 * ignore if vtm is not set */
821 if ((adapter->cap == 0x400) && !vtm)
822 vlanf = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700823
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700824 skb = napi_get_frags(&eq_obj->napi);
825 if (!skb) {
826 be_rx_compl_discard(adapter, rxcp);
827 return;
828 }
829
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700830 remaining = pkt_size;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000831 for (i = 0, j = -1; i < num_rcvd; i++) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700832 page_info = get_rx_page_info(adapter, rxq_idx);
833
834 curr_frag_len = min(remaining, rx_frag_size);
835
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000836 /* Coalesce all frags from the same physical page in one slot */
837 if (i == 0 || page_info->page_offset == 0) {
838 /* First frag or Fresh page */
839 j++;
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700840 skb_shinfo(skb)->frags[j].page = page_info->page;
841 skb_shinfo(skb)->frags[j].page_offset =
842 page_info->page_offset;
843 skb_shinfo(skb)->frags[j].size = 0;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000844 } else {
845 put_page(page_info->page);
846 }
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700847 skb_shinfo(skb)->frags[j].size += curr_frag_len;
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000848
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700849 remaining -= curr_frag_len;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700850 index_inc(&rxq_idx, rxq->len);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700851 memset(page_info, 0, sizeof(*page_info));
852 }
Ajit Khapardebd46cb62009-06-26 02:51:07 +0000853 BUG_ON(j > MAX_SKB_FRAGS);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700854
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700855 skb_shinfo(skb)->nr_frags = j + 1;
856 skb->len = pkt_size;
857 skb->data_len = pkt_size;
858 skb->truesize += pkt_size;
859 skb->ip_summed = CHECKSUM_UNNECESSARY;
860
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700861 if (likely(!vlanf)) {
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700862 napi_gro_frags(&eq_obj->napi);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700863 } else {
864 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
865 vid = be16_to_cpu(vid);
866
867 if (!adapter->vlan_grp || adapter->num_vlans == 0)
868 return;
869
Ajit Khaparde5be93b92009-07-21 12:36:19 -0700870 vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700871 }
872
Sathya Perla4097f662009-03-24 16:40:13 -0700873 be_rx_stats_update(adapter, pkt_size, num_rcvd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700874 return;
875}
876
877static struct be_eth_rx_compl *be_rx_compl_get(struct be_adapter *adapter)
878{
879 struct be_eth_rx_compl *rxcp = queue_tail_node(&adapter->rx_obj.cq);
880
881 if (rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] == 0)
882 return NULL;
883
884 be_dws_le_to_cpu(rxcp, sizeof(*rxcp));
885
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700886 queue_tail_inc(&adapter->rx_obj.cq);
887 return rxcp;
888}
889
Sathya Perlaa7a0ef32009-06-10 02:23:28 +0000890/* To reset the valid bit, we need to reset the whole word as
891 * when walking the queue the valid entries are little-endian
892 * and invalid entries are host endian
893 */
894static inline void be_rx_compl_reset(struct be_eth_rx_compl *rxcp)
895{
896 rxcp->dw[offsetof(struct amap_eth_rx_compl, valid) / 32] = 0;
897}
898
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700899static inline struct page *be_alloc_pages(u32 size)
900{
901 gfp_t alloc_flags = GFP_ATOMIC;
902 u32 order = get_order(size);
903 if (order > 0)
904 alloc_flags |= __GFP_COMP;
905 return alloc_pages(alloc_flags, order);
906}
907
908/*
909 * Allocate a page, split it to fragments of size rx_frag_size and post as
910 * receive buffers to BE
911 */
912static void be_post_rx_frags(struct be_adapter *adapter)
913{
914 struct be_rx_page_info *page_info_tbl = adapter->rx_obj.page_info_tbl;
915 struct be_rx_page_info *page_info = NULL;
916 struct be_queue_info *rxq = &adapter->rx_obj.q;
917 struct page *pagep = NULL;
918 struct be_eth_rx_d *rxd;
919 u64 page_dmaaddr = 0, frag_dmaaddr;
920 u32 posted, page_offset = 0;
921
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700922 page_info = &page_info_tbl[rxq->head];
923 for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
924 if (!pagep) {
925 pagep = be_alloc_pages(adapter->big_page_size);
926 if (unlikely(!pagep)) {
927 drvr_stats(adapter)->be_ethrx_post_fail++;
928 break;
929 }
930 page_dmaaddr = pci_map_page(adapter->pdev, pagep, 0,
931 adapter->big_page_size,
932 PCI_DMA_FROMDEVICE);
933 page_info->page_offset = 0;
934 } else {
935 get_page(pagep);
936 page_info->page_offset = page_offset + rx_frag_size;
937 }
938 page_offset = page_info->page_offset;
939 page_info->page = pagep;
940 pci_unmap_addr_set(page_info, bus, page_dmaaddr);
941 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
942
943 rxd = queue_head_node(rxq);
944 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
945 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
946 queue_head_inc(rxq);
947
948 /* Any space left in the current big page for another frag? */
949 if ((page_offset + rx_frag_size + rx_frag_size) >
950 adapter->big_page_size) {
951 pagep = NULL;
952 page_info->last_page_user = true;
953 }
954 page_info = &page_info_tbl[rxq->head];
955 }
956 if (pagep)
957 page_info->last_page_user = true;
958
959 if (posted) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700960 atomic_add(posted, &rxq->used);
Sathya Perla8788fdc2009-07-27 22:52:03 +0000961 be_rxq_notify(adapter, rxq->id, posted);
Sathya Perlaea1dae12009-03-19 23:56:20 -0700962 } else if (atomic_read(&rxq->used) == 0) {
963 /* Let be_worker replenish when memory is available */
964 adapter->rx_post_starved = true;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700965 }
966
967 return;
968}
969
Sathya Perla5fb379e2009-06-18 00:02:59 +0000970static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700971{
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700972 struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
973
974 if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
975 return NULL;
976
977 be_dws_le_to_cpu(txcp, sizeof(*txcp));
978
979 txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
980
981 queue_tail_inc(tx_cq);
982 return txcp;
983}
984
985static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
986{
987 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700988 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
989 struct sk_buff *sent_skb;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700990 u16 cur_index, num_wrbs = 0;
991
992 cur_index = txq->tail;
993 sent_skb = sent_skbs[cur_index];
994 BUG_ON(!sent_skb);
995 sent_skbs[cur_index] = NULL;
996
997 do {
998 cur_index = txq->tail;
Sathya Perla6b7c5b92009-03-11 23:32:03 -0700999 num_wrbs++;
1000 queue_tail_inc(txq);
1001 } while (cur_index != last_index);
1002
1003 atomic_sub(num_wrbs, &txq->used);
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001004 skb_dma_unmap(&adapter->pdev->dev, sent_skb, DMA_TO_DEVICE);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001005 kfree_skb(sent_skb);
1006}
1007
Sathya Perla859b1e42009-08-10 03:43:51 +00001008static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1009{
1010 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1011
1012 if (!eqe->evt)
1013 return NULL;
1014
1015 eqe->evt = le32_to_cpu(eqe->evt);
1016 queue_tail_inc(&eq_obj->q);
1017 return eqe;
1018}
1019
1020static int event_handle(struct be_adapter *adapter,
1021 struct be_eq_obj *eq_obj)
1022{
1023 struct be_eq_entry *eqe;
1024 u16 num = 0;
1025
1026 while ((eqe = event_get(eq_obj)) != NULL) {
1027 eqe->evt = 0;
1028 num++;
1029 }
1030
1031 /* Deal with any spurious interrupts that come
1032 * without events
1033 */
1034 be_eq_notify(adapter, eq_obj->q.id, true, true, num);
1035 if (num)
1036 napi_schedule(&eq_obj->napi);
1037
1038 return num;
1039}
1040
1041/* Just read and notify events without processing them.
1042 * Used at the time of destroying event queues */
1043static void be_eq_clean(struct be_adapter *adapter,
1044 struct be_eq_obj *eq_obj)
1045{
1046 struct be_eq_entry *eqe;
1047 u16 num = 0;
1048
1049 while ((eqe = event_get(eq_obj)) != NULL) {
1050 eqe->evt = 0;
1051 num++;
1052 }
1053
1054 if (num)
1055 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1056}
1057
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001058static void be_rx_q_clean(struct be_adapter *adapter)
1059{
1060 struct be_rx_page_info *page_info;
1061 struct be_queue_info *rxq = &adapter->rx_obj.q;
1062 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1063 struct be_eth_rx_compl *rxcp;
1064 u16 tail;
1065
1066 /* First cleanup pending rx completions */
1067 while ((rxcp = be_rx_compl_get(adapter)) != NULL) {
1068 be_rx_compl_discard(adapter, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001069 be_rx_compl_reset(rxcp);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001070 be_cq_notify(adapter, rx_cq->id, true, 1);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001071 }
1072
1073 /* Then free posted rx buffer that were not used */
1074 tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
Sathya Perlacdab23b2009-08-10 03:43:23 +00001075 for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001076 page_info = get_rx_page_info(adapter, tail);
1077 put_page(page_info->page);
1078 memset(page_info, 0, sizeof(*page_info));
1079 }
1080 BUG_ON(atomic_read(&rxq->used));
1081}
1082
Sathya Perlaa8e91792009-08-10 03:42:43 +00001083static void be_tx_compl_clean(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001084{
Sathya Perlaa8e91792009-08-10 03:42:43 +00001085 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001086 struct be_queue_info *txq = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001087 struct be_eth_tx_compl *txcp;
1088 u16 end_idx, cmpl = 0, timeo = 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001089
Sathya Perlaa8e91792009-08-10 03:42:43 +00001090 /* Wait for a max of 200ms for all the tx-completions to arrive. */
1091 do {
1092 while ((txcp = be_tx_compl_get(tx_cq))) {
1093 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1094 wrb_index, txcp);
1095 be_tx_compl_process(adapter, end_idx);
1096 cmpl++;
1097 }
1098 if (cmpl) {
1099 be_cq_notify(adapter, tx_cq->id, false, cmpl);
1100 cmpl = 0;
1101 }
1102
1103 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1104 break;
1105
1106 mdelay(1);
1107 } while (true);
1108
1109 if (atomic_read(&txq->used))
1110 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1111 atomic_read(&txq->used));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001112}
1113
Sathya Perla5fb379e2009-06-18 00:02:59 +00001114static void be_mcc_queues_destroy(struct be_adapter *adapter)
1115{
1116 struct be_queue_info *q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001117
Sathya Perla8788fdc2009-07-27 22:52:03 +00001118 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001119 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001120 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001121 be_queue_free(adapter, q);
1122
Sathya Perla8788fdc2009-07-27 22:52:03 +00001123 q = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001124 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001125 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001126 be_queue_free(adapter, q);
1127}
1128
1129/* Must be called only after TX qs are created as MCC shares TX EQ */
1130static int be_mcc_queues_create(struct be_adapter *adapter)
1131{
1132 struct be_queue_info *q, *cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001133
1134 /* Alloc MCC compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001135 cq = &adapter->mcc_obj.cq;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001136 if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
Sathya Perlaefd2e402009-07-27 22:53:10 +00001137 sizeof(struct be_mcc_compl)))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001138 goto err;
1139
1140 /* Ask BE to create MCC compl queue; share TX's eq */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001141 if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001142 goto mcc_cq_free;
1143
1144 /* Alloc MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001145 q = &adapter->mcc_obj.q;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001146 if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1147 goto mcc_cq_destroy;
1148
1149 /* Ask BE to create MCC queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001150 if (be_cmd_mccq_create(adapter, q, cq))
Sathya Perla5fb379e2009-06-18 00:02:59 +00001151 goto mcc_q_free;
1152
1153 return 0;
1154
1155mcc_q_free:
1156 be_queue_free(adapter, q);
1157mcc_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001158 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001159mcc_cq_free:
1160 be_queue_free(adapter, cq);
1161err:
1162 return -1;
1163}
1164
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001165static void be_tx_queues_destroy(struct be_adapter *adapter)
1166{
1167 struct be_queue_info *q;
1168
1169 q = &adapter->tx_obj.q;
Sathya Perlaa8e91792009-08-10 03:42:43 +00001170 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001171 be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001172 be_queue_free(adapter, q);
1173
1174 q = &adapter->tx_obj.cq;
1175 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001176 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001177 be_queue_free(adapter, q);
1178
Sathya Perla859b1e42009-08-10 03:43:51 +00001179 /* Clear any residual events */
1180 be_eq_clean(adapter, &adapter->tx_eq);
1181
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001182 q = &adapter->tx_eq.q;
1183 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001184 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001185 be_queue_free(adapter, q);
1186}
1187
1188static int be_tx_queues_create(struct be_adapter *adapter)
1189{
1190 struct be_queue_info *eq, *q, *cq;
1191
1192 adapter->tx_eq.max_eqd = 0;
1193 adapter->tx_eq.min_eqd = 0;
1194 adapter->tx_eq.cur_eqd = 96;
1195 adapter->tx_eq.enable_aic = false;
1196 /* Alloc Tx Event queue */
1197 eq = &adapter->tx_eq.q;
1198 if (be_queue_alloc(adapter, eq, EVNT_Q_LEN, sizeof(struct be_eq_entry)))
1199 return -1;
1200
1201 /* Ask BE to create Tx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001202 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001203 goto tx_eq_free;
1204 /* Alloc TX eth compl queue */
1205 cq = &adapter->tx_obj.cq;
1206 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1207 sizeof(struct be_eth_tx_compl)))
1208 goto tx_eq_destroy;
1209
1210 /* Ask BE to create Tx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001211 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001212 goto tx_cq_free;
1213
1214 /* Alloc TX eth queue */
1215 q = &adapter->tx_obj.q;
1216 if (be_queue_alloc(adapter, q, TX_Q_LEN, sizeof(struct be_eth_wrb)))
1217 goto tx_cq_destroy;
1218
1219 /* Ask BE to create Tx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001220 if (be_cmd_txq_create(adapter, q, cq))
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001221 goto tx_q_free;
1222 return 0;
1223
1224tx_q_free:
1225 be_queue_free(adapter, q);
1226tx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001227 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001228tx_cq_free:
1229 be_queue_free(adapter, cq);
1230tx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001231 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001232tx_eq_free:
1233 be_queue_free(adapter, eq);
1234 return -1;
1235}
1236
1237static void be_rx_queues_destroy(struct be_adapter *adapter)
1238{
1239 struct be_queue_info *q;
1240
1241 q = &adapter->rx_obj.q;
1242 if (q->created) {
Sathya Perla8788fdc2009-07-27 22:52:03 +00001243 be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001244 be_rx_q_clean(adapter);
1245 }
1246 be_queue_free(adapter, q);
1247
1248 q = &adapter->rx_obj.cq;
1249 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001250 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001251 be_queue_free(adapter, q);
1252
Sathya Perla859b1e42009-08-10 03:43:51 +00001253 /* Clear any residual events */
1254 be_eq_clean(adapter, &adapter->rx_eq);
1255
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001256 q = &adapter->rx_eq.q;
1257 if (q->created)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001258 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001259 be_queue_free(adapter, q);
1260}
1261
1262static int be_rx_queues_create(struct be_adapter *adapter)
1263{
1264 struct be_queue_info *eq, *q, *cq;
1265 int rc;
1266
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001267 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1268 adapter->rx_eq.max_eqd = BE_MAX_EQD;
1269 adapter->rx_eq.min_eqd = 0;
1270 adapter->rx_eq.cur_eqd = 0;
1271 adapter->rx_eq.enable_aic = true;
1272
1273 /* Alloc Rx Event queue */
1274 eq = &adapter->rx_eq.q;
1275 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1276 sizeof(struct be_eq_entry));
1277 if (rc)
1278 return rc;
1279
1280 /* Ask BE to create Rx Event queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001281 rc = be_cmd_eq_create(adapter, eq, adapter->rx_eq.cur_eqd);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001282 if (rc)
1283 goto rx_eq_free;
1284
1285 /* Alloc RX eth compl queue */
1286 cq = &adapter->rx_obj.cq;
1287 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1288 sizeof(struct be_eth_rx_compl));
1289 if (rc)
1290 goto rx_eq_destroy;
1291
1292 /* Ask BE to create Rx eth compl queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001293 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001294 if (rc)
1295 goto rx_cq_free;
1296
1297 /* Alloc RX eth queue */
1298 q = &adapter->rx_obj.q;
1299 rc = be_queue_alloc(adapter, q, RX_Q_LEN, sizeof(struct be_eth_rx_d));
1300 if (rc)
1301 goto rx_cq_destroy;
1302
1303 /* Ask BE to create Rx eth queue */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001304 rc = be_cmd_rxq_create(adapter, q, cq->id, rx_frag_size,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001305 BE_MAX_JUMBO_FRAME_SIZE, adapter->if_handle, false);
1306 if (rc)
1307 goto rx_q_free;
1308
1309 return 0;
1310rx_q_free:
1311 be_queue_free(adapter, q);
1312rx_cq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001313 be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001314rx_cq_free:
1315 be_queue_free(adapter, cq);
1316rx_eq_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001317 be_cmd_q_destroy(adapter, eq, QTYPE_EQ);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001318rx_eq_free:
1319 be_queue_free(adapter, eq);
1320 return rc;
1321}
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001322
Sathya Perlab628bde2009-08-17 00:58:26 +00001323/* There are 8 evt ids per func. Retruns the evt id's bit number */
1324static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1325{
1326 return eq_id - 8 * be_pci_func(adapter);
1327}
1328
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001329static irqreturn_t be_intx(int irq, void *dev)
1330{
1331 struct be_adapter *adapter = dev;
Sathya Perla8788fdc2009-07-27 22:52:03 +00001332 int isr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001333
Sathya Perla8788fdc2009-07-27 22:52:03 +00001334 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
Sathya Perlaeec368f2009-07-27 22:52:23 +00001335 be_pci_func(adapter) * CEV_ISR_SIZE);
Sathya Perlac001c212009-07-01 01:06:07 +00001336 if (!isr)
Sathya Perla8788fdc2009-07-27 22:52:03 +00001337 return IRQ_NONE;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001338
Sathya Perla8788fdc2009-07-27 22:52:03 +00001339 event_handle(adapter, &adapter->tx_eq);
1340 event_handle(adapter, &adapter->rx_eq);
Sathya Perlac001c212009-07-01 01:06:07 +00001341
Sathya Perla8788fdc2009-07-27 22:52:03 +00001342 return IRQ_HANDLED;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001343}
1344
1345static irqreturn_t be_msix_rx(int irq, void *dev)
1346{
1347 struct be_adapter *adapter = dev;
1348
Sathya Perla8788fdc2009-07-27 22:52:03 +00001349 event_handle(adapter, &adapter->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001350
1351 return IRQ_HANDLED;
1352}
1353
Sathya Perla5fb379e2009-06-18 00:02:59 +00001354static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001355{
1356 struct be_adapter *adapter = dev;
1357
Sathya Perla8788fdc2009-07-27 22:52:03 +00001358 event_handle(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001359
1360 return IRQ_HANDLED;
1361}
1362
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001363static inline bool do_gro(struct be_adapter *adapter,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001364 struct be_eth_rx_compl *rxcp)
1365{
1366 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1367 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1368
1369 if (err)
1370 drvr_stats(adapter)->be_rxcp_err++;
1371
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001372 return (tcp_frame && !err) ? true : false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001373}
1374
1375int be_poll_rx(struct napi_struct *napi, int budget)
1376{
1377 struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1378 struct be_adapter *adapter =
1379 container_of(rx_eq, struct be_adapter, rx_eq);
1380 struct be_queue_info *rx_cq = &adapter->rx_obj.cq;
1381 struct be_eth_rx_compl *rxcp;
1382 u32 work_done;
1383
1384 for (work_done = 0; work_done < budget; work_done++) {
1385 rxcp = be_rx_compl_get(adapter);
1386 if (!rxcp)
1387 break;
1388
Ajit Khaparde5be93b92009-07-21 12:36:19 -07001389 if (do_gro(adapter, rxcp))
1390 be_rx_compl_process_gro(adapter, rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001391 else
1392 be_rx_compl_process(adapter, rxcp);
Sathya Perlaa7a0ef32009-06-10 02:23:28 +00001393
1394 be_rx_compl_reset(rxcp);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001395 }
1396
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001397 /* Refill the queue */
1398 if (atomic_read(&adapter->rx_obj.q.used) < RX_FRAGS_REFILL_WM)
1399 be_post_rx_frags(adapter);
1400
1401 /* All consumed */
1402 if (work_done < budget) {
1403 napi_complete(napi);
Sathya Perla8788fdc2009-07-27 22:52:03 +00001404 be_cq_notify(adapter, rx_cq->id, true, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001405 } else {
1406 /* More to be consumed; continue with interrupts disabled */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001407 be_cq_notify(adapter, rx_cq->id, false, work_done);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001408 }
1409 return work_done;
1410}
1411
Sathya Perla5fb379e2009-06-18 00:02:59 +00001412void be_process_tx(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001413{
Sathya Perla5fb379e2009-06-18 00:02:59 +00001414 struct be_queue_info *txq = &adapter->tx_obj.q;
1415 struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001416 struct be_eth_tx_compl *txcp;
1417 u32 num_cmpl = 0;
1418 u16 end_idx;
1419
Sathya Perla5fb379e2009-06-18 00:02:59 +00001420 while ((txcp = be_tx_compl_get(tx_cq))) {
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001421 end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1422 wrb_index, txcp);
1423 be_tx_compl_process(adapter, end_idx);
1424 num_cmpl++;
1425 }
1426
Sathya Perla5fb379e2009-06-18 00:02:59 +00001427 if (num_cmpl) {
Sathya Perla8788fdc2009-07-27 22:52:03 +00001428 be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001429
1430 /* As Tx wrbs have been freed up, wake up netdev queue if
1431 * it was stopped due to lack of tx wrbs.
1432 */
1433 if (netif_queue_stopped(adapter->netdev) &&
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001434 atomic_read(&txq->used) < txq->len / 2) {
Sathya Perla5fb379e2009-06-18 00:02:59 +00001435 netif_wake_queue(adapter->netdev);
1436 }
1437
1438 drvr_stats(adapter)->be_tx_events++;
1439 drvr_stats(adapter)->be_tx_compl += num_cmpl;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001440 }
Sathya Perla5fb379e2009-06-18 00:02:59 +00001441}
1442
1443/* As TX and MCC share the same EQ check for both TX and MCC completions.
1444 * For TX/MCC we don't honour budget; consume everything
1445 */
1446static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1447{
1448 struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1449 struct be_adapter *adapter =
1450 container_of(tx_eq, struct be_adapter, tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001451
1452 napi_complete(napi);
1453
Sathya Perla5fb379e2009-06-18 00:02:59 +00001454 be_process_tx(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001455
Sathya Perla8788fdc2009-07-27 22:52:03 +00001456 be_process_mcc(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001457
1458 return 1;
1459}
1460
Sathya Perlaea1dae12009-03-19 23:56:20 -07001461static void be_worker(struct work_struct *work)
1462{
1463 struct be_adapter *adapter =
1464 container_of(work, struct be_adapter, work.work);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001465
Sathya Perlab31c50a2009-09-17 10:30:13 -07001466 be_cmd_get_stats(adapter, &adapter->stats.cmd);
Sathya Perlaea1dae12009-03-19 23:56:20 -07001467
1468 /* Set EQ delay */
1469 be_rx_eqd_update(adapter);
1470
Sathya Perla4097f662009-03-24 16:40:13 -07001471 be_tx_rate_update(adapter);
1472 be_rx_rate_update(adapter);
1473
Sathya Perlaea1dae12009-03-19 23:56:20 -07001474 if (adapter->rx_post_starved) {
1475 adapter->rx_post_starved = false;
1476 be_post_rx_frags(adapter);
1477 }
1478
1479 schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
1480}
1481
Sathya Perla8d56ff12009-11-22 22:02:26 +00001482static void be_msix_disable(struct be_adapter *adapter)
1483{
1484 if (adapter->msix_enabled) {
1485 pci_disable_msix(adapter->pdev);
1486 adapter->msix_enabled = false;
1487 }
1488}
1489
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001490static void be_msix_enable(struct be_adapter *adapter)
1491{
1492 int i, status;
1493
1494 for (i = 0; i < BE_NUM_MSIX_VECTORS; i++)
1495 adapter->msix_entries[i].entry = i;
1496
1497 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
1498 BE_NUM_MSIX_VECTORS);
1499 if (status == 0)
1500 adapter->msix_enabled = true;
1501 return;
1502}
1503
1504static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1505{
Sathya Perlab628bde2009-08-17 00:58:26 +00001506 return adapter->msix_entries[
1507 be_evt_bit_get(adapter, eq_id)].vector;
1508}
1509
1510static int be_request_irq(struct be_adapter *adapter,
1511 struct be_eq_obj *eq_obj,
1512 void *handler, char *desc)
1513{
1514 struct net_device *netdev = adapter->netdev;
1515 int vec;
1516
1517 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1518 vec = be_msix_vec_get(adapter, eq_obj->q.id);
1519 return request_irq(vec, handler, 0, eq_obj->desc, adapter);
1520}
1521
1522static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj)
1523{
1524 int vec = be_msix_vec_get(adapter, eq_obj->q.id);
1525 free_irq(vec, adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001526}
1527
1528static int be_msix_register(struct be_adapter *adapter)
1529{
Sathya Perlab628bde2009-08-17 00:58:26 +00001530 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001531
Sathya Perlab628bde2009-08-17 00:58:26 +00001532 status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx");
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001533 if (status)
1534 goto err;
1535
Sathya Perlab628bde2009-08-17 00:58:26 +00001536 status = be_request_irq(adapter, &adapter->rx_eq, be_msix_rx, "rx");
1537 if (status)
1538 goto free_tx_irq;
1539
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001540 return 0;
Sathya Perlab628bde2009-08-17 00:58:26 +00001541
1542free_tx_irq:
1543 be_free_irq(adapter, &adapter->tx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001544err:
1545 dev_warn(&adapter->pdev->dev,
1546 "MSIX Request IRQ failed - err %d\n", status);
1547 pci_disable_msix(adapter->pdev);
1548 adapter->msix_enabled = false;
1549 return status;
1550}
1551
1552static int be_irq_register(struct be_adapter *adapter)
1553{
1554 struct net_device *netdev = adapter->netdev;
1555 int status;
1556
1557 if (adapter->msix_enabled) {
1558 status = be_msix_register(adapter);
1559 if (status == 0)
1560 goto done;
1561 }
1562
1563 /* INTx */
1564 netdev->irq = adapter->pdev->irq;
1565 status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
1566 adapter);
1567 if (status) {
1568 dev_err(&adapter->pdev->dev,
1569 "INTx request IRQ failed - err %d\n", status);
1570 return status;
1571 }
1572done:
1573 adapter->isr_registered = true;
1574 return 0;
1575}
1576
1577static void be_irq_unregister(struct be_adapter *adapter)
1578{
1579 struct net_device *netdev = adapter->netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001580
1581 if (!adapter->isr_registered)
1582 return;
1583
1584 /* INTx */
1585 if (!adapter->msix_enabled) {
1586 free_irq(netdev->irq, adapter);
1587 goto done;
1588 }
1589
1590 /* MSIx */
Sathya Perlab628bde2009-08-17 00:58:26 +00001591 be_free_irq(adapter, &adapter->tx_eq);
1592 be_free_irq(adapter, &adapter->rx_eq);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001593done:
1594 adapter->isr_registered = false;
1595 return;
1596}
1597
1598static int be_open(struct net_device *netdev)
1599{
1600 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001601 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1602 struct be_eq_obj *tx_eq = &adapter->tx_eq;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00001603 bool link_up;
1604 int status;
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001605 u8 mac_speed;
1606 u16 link_speed;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001607
1608 /* First time posting */
1609 be_post_rx_frags(adapter);
1610
1611 napi_enable(&rx_eq->napi);
1612 napi_enable(&tx_eq->napi);
1613
1614 be_irq_register(adapter);
1615
Sathya Perla8788fdc2009-07-27 22:52:03 +00001616 be_intr_set(adapter, true);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001617
1618 /* The evt queues are created in unarmed state; arm them */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001619 be_eq_notify(adapter, rx_eq->q.id, true, false, 0);
1620 be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001621
1622 /* Rx compl queue may be in unarmed state; rearm it */
Sathya Perla8788fdc2009-07-27 22:52:03 +00001623 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001624
Sarveshwar Bandi0388f252009-10-28 04:15:20 -07001625 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
1626 &link_speed);
Sathya Perlaa8f447b2009-06-18 00:10:27 +00001627 if (status)
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00001628 goto ret_sts;
Sathya Perlaa8f447b2009-06-18 00:10:27 +00001629 be_link_status_update(adapter, link_up);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001630
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00001631 status = be_vid_config(adapter);
1632 if (status)
1633 goto ret_sts;
1634
1635 status = be_cmd_set_flow_control(adapter,
1636 adapter->tx_fc, adapter->rx_fc);
1637 if (status)
1638 goto ret_sts;
1639
Sathya Perla5fb379e2009-06-18 00:02:59 +00001640 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
Ajit Khaparde4f2aa892009-11-06 02:07:32 +00001641ret_sts:
1642 return status;
Sathya Perla5fb379e2009-06-18 00:02:59 +00001643}
1644
1645static int be_setup(struct be_adapter *adapter)
1646{
Sathya Perla5fb379e2009-06-18 00:02:59 +00001647 struct net_device *netdev = adapter->netdev;
Sathya Perla73d540f2009-10-14 20:20:42 +00001648 u32 cap_flags, en_flags;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001649 int status;
1650
Sathya Perla73d540f2009-10-14 20:20:42 +00001651 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1652 BE_IF_FLAGS_MCAST_PROMISCUOUS |
1653 BE_IF_FLAGS_PROMISCUOUS |
1654 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1655 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
1656 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1657
1658 status = be_cmd_if_create(adapter, cap_flags, en_flags,
1659 netdev->dev_addr, false/* pmac_invalid */,
1660 &adapter->if_handle, &adapter->pmac_id);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001661 if (status != 0)
1662 goto do_none;
1663
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001664 status = be_tx_queues_create(adapter);
1665 if (status != 0)
1666 goto if_destroy;
1667
1668 status = be_rx_queues_create(adapter);
1669 if (status != 0)
1670 goto tx_qs_destroy;
1671
Sathya Perla5fb379e2009-06-18 00:02:59 +00001672 status = be_mcc_queues_create(adapter);
1673 if (status != 0)
1674 goto rx_qs_destroy;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001675
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001676 return 0;
1677
Sathya Perla5fb379e2009-06-18 00:02:59 +00001678rx_qs_destroy:
1679 be_rx_queues_destroy(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001680tx_qs_destroy:
1681 be_tx_queues_destroy(adapter);
1682if_destroy:
Sathya Perla8788fdc2009-07-27 22:52:03 +00001683 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001684do_none:
1685 return status;
1686}
1687
Sathya Perla5fb379e2009-06-18 00:02:59 +00001688static int be_clear(struct be_adapter *adapter)
1689{
Sathya Perla1a8887d2009-08-17 00:58:41 +00001690 be_mcc_queues_destroy(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001691 be_rx_queues_destroy(adapter);
1692 be_tx_queues_destroy(adapter);
1693
Sathya Perla8788fdc2009-07-27 22:52:03 +00001694 be_cmd_if_destroy(adapter, adapter->if_handle);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001695
Sathya Perla2243e2e2009-11-22 22:02:03 +00001696 /* tell fw we're done with firing cmds */
1697 be_cmd_fw_clean(adapter);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001698 return 0;
1699}
1700
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001701static int be_close(struct net_device *netdev)
1702{
1703 struct be_adapter *adapter = netdev_priv(netdev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001704 struct be_eq_obj *rx_eq = &adapter->rx_eq;
1705 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1706 int vec;
1707
Sathya Perlab305be72009-06-10 02:18:35 +00001708 cancel_delayed_work_sync(&adapter->work);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001709
1710 netif_stop_queue(netdev);
1711 netif_carrier_off(netdev);
Sathya Perlaa8f447b2009-06-18 00:10:27 +00001712 adapter->link_up = false;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001713
Sathya Perla8788fdc2009-07-27 22:52:03 +00001714 be_intr_set(adapter, false);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001715
1716 if (adapter->msix_enabled) {
1717 vec = be_msix_vec_get(adapter, tx_eq->q.id);
1718 synchronize_irq(vec);
1719 vec = be_msix_vec_get(adapter, rx_eq->q.id);
1720 synchronize_irq(vec);
1721 } else {
1722 synchronize_irq(netdev->irq);
1723 }
1724 be_irq_unregister(adapter);
1725
1726 napi_disable(&rx_eq->napi);
1727 napi_disable(&tx_eq->napi);
1728
Sathya Perlaa8e91792009-08-10 03:42:43 +00001729 /* Wait for all pending tx completions to arrive so that
1730 * all tx skbs are freed.
1731 */
1732 be_tx_compl_clean(adapter);
1733
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001734 return 0;
1735}
1736
Ajit Khaparde84517482009-09-04 03:12:16 +00001737#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
1738char flash_cookie[2][16] = {"*** SE FLAS",
1739 "H DIRECTORY *** "};
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001740
1741static bool be_flash_redboot(struct be_adapter *adapter,
1742 const u8 *p)
1743{
1744 u32 crc_offset;
1745 u8 flashed_crc[4];
1746 int status;
1747 crc_offset = FLASH_REDBOOT_START + FLASH_REDBOOT_IMAGE_MAX_SIZE - 4
1748 + sizeof(struct flash_file_hdr) - 32*1024;
1749 p += crc_offset;
1750 status = be_cmd_get_flash_crc(adapter, flashed_crc);
1751 if (status) {
1752 dev_err(&adapter->pdev->dev,
1753 "could not get crc from flash, not flashing redboot\n");
1754 return false;
1755 }
1756
1757 /*update redboot only if crc does not match*/
1758 if (!memcmp(flashed_crc, p, 4))
1759 return false;
1760 else
1761 return true;
1762
1763}
1764
Ajit Khaparde84517482009-09-04 03:12:16 +00001765static int be_flash_image(struct be_adapter *adapter,
1766 const struct firmware *fw,
1767 struct be_dma_mem *flash_cmd, u32 flash_type)
1768{
1769 int status;
1770 u32 flash_op, image_offset = 0, total_bytes, image_size = 0;
1771 int num_bytes;
1772 const u8 *p = fw->data;
1773 struct be_cmd_write_flashrom *req = flash_cmd->va;
1774
1775 switch (flash_type) {
1776 case FLASHROM_TYPE_ISCSI_ACTIVE:
1777 image_offset = FLASH_iSCSI_PRIMARY_IMAGE_START;
1778 image_size = FLASH_IMAGE_MAX_SIZE;
1779 break;
1780 case FLASHROM_TYPE_ISCSI_BACKUP:
1781 image_offset = FLASH_iSCSI_BACKUP_IMAGE_START;
1782 image_size = FLASH_IMAGE_MAX_SIZE;
1783 break;
1784 case FLASHROM_TYPE_FCOE_FW_ACTIVE:
1785 image_offset = FLASH_FCoE_PRIMARY_IMAGE_START;
1786 image_size = FLASH_IMAGE_MAX_SIZE;
1787 break;
1788 case FLASHROM_TYPE_FCOE_FW_BACKUP:
1789 image_offset = FLASH_FCoE_BACKUP_IMAGE_START;
1790 image_size = FLASH_IMAGE_MAX_SIZE;
1791 break;
1792 case FLASHROM_TYPE_BIOS:
1793 image_offset = FLASH_iSCSI_BIOS_START;
1794 image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
1795 break;
1796 case FLASHROM_TYPE_FCOE_BIOS:
1797 image_offset = FLASH_FCoE_BIOS_START;
1798 image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
1799 break;
1800 case FLASHROM_TYPE_PXE_BIOS:
1801 image_offset = FLASH_PXE_BIOS_START;
1802 image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
1803 break;
Sarveshwar Bandifa9a6fe2009-11-20 14:23:47 -08001804 case FLASHROM_TYPE_REDBOOT:
1805 if (!be_flash_redboot(adapter, fw->data))
1806 return 0;
1807 image_offset = FLASH_REDBOOT_ISM_START;
1808 image_size = FLASH_REDBOOT_IMAGE_MAX_SIZE;
1809 break;
Ajit Khaparde84517482009-09-04 03:12:16 +00001810 default:
1811 return 0;
1812 }
1813
1814 p += sizeof(struct flash_file_hdr) + image_offset;
1815 if (p + image_size > fw->data + fw->size)
1816 return -1;
1817
1818 total_bytes = image_size;
1819
1820 while (total_bytes) {
1821 if (total_bytes > 32*1024)
1822 num_bytes = 32*1024;
1823 else
1824 num_bytes = total_bytes;
1825 total_bytes -= num_bytes;
1826
1827 if (!total_bytes)
1828 flash_op = FLASHROM_OPER_FLASH;
1829 else
1830 flash_op = FLASHROM_OPER_SAVE;
1831 memcpy(req->params.data_buf, p, num_bytes);
1832 p += num_bytes;
1833 status = be_cmd_write_flashrom(adapter, flash_cmd,
1834 flash_type, flash_op, num_bytes);
1835 if (status) {
1836 dev_err(&adapter->pdev->dev,
1837 "cmd to write to flash rom failed. type/op %d/%d\n",
1838 flash_type, flash_op);
1839 return -1;
1840 }
1841 yield();
1842 }
1843
1844 return 0;
1845}
1846
1847int be_load_fw(struct be_adapter *adapter, u8 *func)
1848{
1849 char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
1850 const struct firmware *fw;
1851 struct flash_file_hdr *fhdr;
1852 struct flash_section_info *fsec = NULL;
1853 struct be_dma_mem flash_cmd;
1854 int status;
1855 const u8 *p;
1856 bool entry_found = false;
1857 int flash_type;
1858 char fw_ver[FW_VER_LEN];
1859 char fw_cfg;
1860
1861 status = be_cmd_get_fw_ver(adapter, fw_ver);
1862 if (status)
1863 return status;
1864
1865 fw_cfg = *(fw_ver + 2);
1866 if (fw_cfg == '0')
1867 fw_cfg = '1';
1868 strcpy(fw_file, func);
1869
1870 status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
1871 if (status)
1872 goto fw_exit;
1873
1874 p = fw->data;
1875 fhdr = (struct flash_file_hdr *) p;
1876 if (memcmp(fhdr->sign, FW_FILE_HDR_SIGN, strlen(FW_FILE_HDR_SIGN))) {
1877 dev_err(&adapter->pdev->dev,
1878 "Firmware(%s) load error (signature did not match)\n",
1879 fw_file);
1880 status = -1;
1881 goto fw_exit;
1882 }
1883
1884 dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
1885
1886 p += sizeof(struct flash_file_hdr);
1887 while (p < (fw->data + fw->size)) {
1888 fsec = (struct flash_section_info *)p;
1889 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) {
1890 entry_found = true;
1891 break;
1892 }
1893 p += 32;
1894 }
1895
1896 if (!entry_found) {
1897 status = -1;
1898 dev_err(&adapter->pdev->dev,
1899 "Flash cookie not found in firmware image\n");
1900 goto fw_exit;
1901 }
1902
1903 flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
1904 flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
1905 &flash_cmd.dma);
1906 if (!flash_cmd.va) {
1907 status = -ENOMEM;
1908 dev_err(&adapter->pdev->dev,
1909 "Memory allocation failure while flashing\n");
1910 goto fw_exit;
1911 }
1912
1913 for (flash_type = FLASHROM_TYPE_ISCSI_ACTIVE;
1914 flash_type <= FLASHROM_TYPE_FCOE_FW_BACKUP; flash_type++) {
1915 status = be_flash_image(adapter, fw, &flash_cmd,
1916 flash_type);
1917 if (status)
1918 break;
1919 }
1920
1921 pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
1922 flash_cmd.dma);
1923 if (status) {
1924 dev_err(&adapter->pdev->dev, "Firmware load error\n");
1925 goto fw_exit;
1926 }
1927
1928 dev_info(&adapter->pdev->dev, "Firmware flashed succesfully\n");
1929
1930fw_exit:
1931 release_firmware(fw);
1932 return status;
1933}
1934
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001935static struct net_device_ops be_netdev_ops = {
1936 .ndo_open = be_open,
1937 .ndo_stop = be_close,
1938 .ndo_start_xmit = be_xmit,
1939 .ndo_get_stats = be_get_stats,
1940 .ndo_set_rx_mode = be_set_multicast_list,
1941 .ndo_set_mac_address = be_mac_addr_set,
1942 .ndo_change_mtu = be_change_mtu,
1943 .ndo_validate_addr = eth_validate_addr,
1944 .ndo_vlan_rx_register = be_vlan_register,
1945 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
1946 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
1947};
1948
1949static void be_netdev_init(struct net_device *netdev)
1950{
1951 struct be_adapter *adapter = netdev_priv(netdev);
1952
1953 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
Ajit Khaparde583e3f32009-10-05 02:22:19 +00001954 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
1955 NETIF_F_GRO;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001956
Ajit Khaparde51c59872009-11-29 17:54:54 +00001957 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
1958
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001959 netdev->flags |= IFF_MULTICAST;
1960
Ajit Khaparde728a9972009-04-13 15:41:22 -07001961 adapter->rx_csum = true;
1962
Ajit Khaparde9e90c962009-11-06 02:06:59 +00001963 /* Default settings for Rx and Tx flow control */
1964 adapter->rx_fc = true;
1965 adapter->tx_fc = true;
1966
Ajit Khapardec190e3c2009-09-04 03:12:29 +00001967 netif_set_gso_max_size(netdev, 65535);
1968
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001969 BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
1970
1971 SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
1972
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001973 netif_napi_add(netdev, &adapter->rx_eq.napi, be_poll_rx,
1974 BE_NAPI_WEIGHT);
Sathya Perla5fb379e2009-06-18 00:02:59 +00001975 netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001976 BE_NAPI_WEIGHT);
1977
1978 netif_carrier_off(netdev);
1979 netif_stop_queue(netdev);
1980}
1981
1982static void be_unmap_pci_bars(struct be_adapter *adapter)
1983{
Sathya Perla8788fdc2009-07-27 22:52:03 +00001984 if (adapter->csr)
1985 iounmap(adapter->csr);
1986 if (adapter->db)
1987 iounmap(adapter->db);
1988 if (adapter->pcicfg)
1989 iounmap(adapter->pcicfg);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07001990}
1991
1992static int be_map_pci_bars(struct be_adapter *adapter)
1993{
1994 u8 __iomem *addr;
1995
1996 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
1997 pci_resource_len(adapter->pdev, 2));
1998 if (addr == NULL)
1999 return -ENOMEM;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002000 adapter->csr = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002001
2002 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
2003 128 * 1024);
2004 if (addr == NULL)
2005 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002006 adapter->db = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002007
2008 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 1),
2009 pci_resource_len(adapter->pdev, 1));
2010 if (addr == NULL)
2011 goto pci_map_err;
Sathya Perla8788fdc2009-07-27 22:52:03 +00002012 adapter->pcicfg = addr;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002013
2014 return 0;
2015pci_map_err:
2016 be_unmap_pci_bars(adapter);
2017 return -ENOMEM;
2018}
2019
2020
2021static void be_ctrl_cleanup(struct be_adapter *adapter)
2022{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002023 struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002024
2025 be_unmap_pci_bars(adapter);
2026
2027 if (mem->va)
2028 pci_free_consistent(adapter->pdev, mem->size,
2029 mem->va, mem->dma);
Sathya Perlae7b909a2009-11-22 22:01:10 +00002030
2031 mem = &adapter->mc_cmd_mem;
2032 if (mem->va)
2033 pci_free_consistent(adapter->pdev, mem->size,
2034 mem->va, mem->dma);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002035}
2036
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002037static int be_ctrl_init(struct be_adapter *adapter)
2038{
Sathya Perla8788fdc2009-07-27 22:52:03 +00002039 struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
2040 struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002041 struct be_dma_mem *mc_cmd_mem = &adapter->mc_cmd_mem;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002042 int status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002043
2044 status = be_map_pci_bars(adapter);
2045 if (status)
Sathya Perlae7b909a2009-11-22 22:01:10 +00002046 goto done;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002047
2048 mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
2049 mbox_mem_alloc->va = pci_alloc_consistent(adapter->pdev,
2050 mbox_mem_alloc->size, &mbox_mem_alloc->dma);
2051 if (!mbox_mem_alloc->va) {
Sathya Perlae7b909a2009-11-22 22:01:10 +00002052 status = -ENOMEM;
2053 goto unmap_pci_bars;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002054 }
Sathya Perlae7b909a2009-11-22 22:01:10 +00002055
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002056 mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
2057 mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
2058 mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
2059 memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
Sathya Perlae7b909a2009-11-22 22:01:10 +00002060
2061 mc_cmd_mem->size = sizeof(struct be_cmd_req_mcast_mac_config);
2062 mc_cmd_mem->va = pci_alloc_consistent(adapter->pdev, mc_cmd_mem->size,
2063 &mc_cmd_mem->dma);
2064 if (mc_cmd_mem->va == NULL) {
2065 status = -ENOMEM;
2066 goto free_mbox;
2067 }
2068 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2069
Sathya Perla8788fdc2009-07-27 22:52:03 +00002070 spin_lock_init(&adapter->mbox_lock);
2071 spin_lock_init(&adapter->mcc_lock);
2072 spin_lock_init(&adapter->mcc_cq_lock);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002073
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002074 return 0;
Sathya Perlae7b909a2009-11-22 22:01:10 +00002075
2076free_mbox:
2077 pci_free_consistent(adapter->pdev, mbox_mem_alloc->size,
2078 mbox_mem_alloc->va, mbox_mem_alloc->dma);
2079
2080unmap_pci_bars:
2081 be_unmap_pci_bars(adapter);
2082
2083done:
2084 return status;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002085}
2086
2087static void be_stats_cleanup(struct be_adapter *adapter)
2088{
2089 struct be_stats_obj *stats = &adapter->stats;
2090 struct be_dma_mem *cmd = &stats->cmd;
2091
2092 if (cmd->va)
2093 pci_free_consistent(adapter->pdev, cmd->size,
2094 cmd->va, cmd->dma);
2095}
2096
2097static int be_stats_init(struct be_adapter *adapter)
2098{
2099 struct be_stats_obj *stats = &adapter->stats;
2100 struct be_dma_mem *cmd = &stats->cmd;
2101
2102 cmd->size = sizeof(struct be_cmd_req_get_stats);
2103 cmd->va = pci_alloc_consistent(adapter->pdev, cmd->size, &cmd->dma);
2104 if (cmd->va == NULL)
2105 return -1;
2106 return 0;
2107}
2108
2109static void __devexit be_remove(struct pci_dev *pdev)
2110{
2111 struct be_adapter *adapter = pci_get_drvdata(pdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002112
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002113 if (!adapter)
2114 return;
2115
2116 unregister_netdev(adapter->netdev);
2117
Sathya Perla5fb379e2009-06-18 00:02:59 +00002118 be_clear(adapter);
2119
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002120 be_stats_cleanup(adapter);
2121
2122 be_ctrl_cleanup(adapter);
2123
Sathya Perla8d56ff12009-11-22 22:02:26 +00002124 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002125
2126 pci_set_drvdata(pdev, NULL);
2127 pci_release_regions(pdev);
2128 pci_disable_device(pdev);
2129
2130 free_netdev(adapter->netdev);
2131}
2132
Sathya Perla2243e2e2009-11-22 22:02:03 +00002133static int be_get_config(struct be_adapter *adapter)
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002134{
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002135 int status;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002136 u8 mac[ETH_ALEN];
Sathya Perla43a04fdc2009-10-14 20:21:17 +00002137
Sathya Perla8788fdc2009-07-27 22:52:03 +00002138 status = be_cmd_get_fw_ver(adapter, adapter->fw_ver);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002139 if (status)
2140 return status;
2141
Ajit Khapardedcb9b562009-09-30 21:58:22 -07002142 status = be_cmd_query_fw_cfg(adapter,
2143 &adapter->port_num, &adapter->cap);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002144 if (status)
2145 return status;
2146
2147 memset(mac, 0, ETH_ALEN);
2148 status = be_cmd_mac_addr_query(adapter, mac,
2149 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2150 if (status)
2151 return status;
Ajit Khapardeca9e4982009-11-29 17:56:26 +00002152
2153 if (!is_valid_ether_addr(mac))
2154 return -EADDRNOTAVAIL;
2155
Sathya Perla2243e2e2009-11-22 22:02:03 +00002156 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
Ajit Khaparde35a65282009-11-29 17:55:39 +00002157 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
Sathya Perla2243e2e2009-11-22 22:02:03 +00002158
2159 return 0;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002160}
2161
2162static int __devinit be_probe(struct pci_dev *pdev,
2163 const struct pci_device_id *pdev_id)
2164{
2165 int status = 0;
2166 struct be_adapter *adapter;
2167 struct net_device *netdev;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002168
2169 status = pci_enable_device(pdev);
2170 if (status)
2171 goto do_none;
2172
2173 status = pci_request_regions(pdev, DRV_NAME);
2174 if (status)
2175 goto disable_dev;
2176 pci_set_master(pdev);
2177
2178 netdev = alloc_etherdev(sizeof(struct be_adapter));
2179 if (netdev == NULL) {
2180 status = -ENOMEM;
2181 goto rel_reg;
2182 }
2183 adapter = netdev_priv(netdev);
2184 adapter->pdev = pdev;
2185 pci_set_drvdata(pdev, adapter);
2186 adapter->netdev = netdev;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002187 be_netdev_init(netdev);
2188 SET_NETDEV_DEV(netdev, &pdev->dev);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002189
2190 be_msix_enable(adapter);
2191
Yang Hongyange9304382009-04-13 14:40:14 -07002192 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002193 if (!status) {
2194 netdev->features |= NETIF_F_HIGHDMA;
2195 } else {
Yang Hongyange9304382009-04-13 14:40:14 -07002196 status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002197 if (status) {
2198 dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
2199 goto free_netdev;
2200 }
2201 }
2202
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002203 status = be_ctrl_init(adapter);
2204 if (status)
2205 goto free_netdev;
2206
Sathya Perla2243e2e2009-11-22 22:02:03 +00002207 /* sync up with fw's ready state */
2208 status = be_cmd_POST(adapter);
2209 if (status)
2210 goto ctrl_clean;
2211
2212 /* tell fw we're ready to fire cmds */
2213 status = be_cmd_fw_init(adapter);
2214 if (status)
2215 goto ctrl_clean;
2216
2217 status = be_cmd_reset_function(adapter);
2218 if (status)
2219 goto ctrl_clean;
2220
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002221 status = be_stats_init(adapter);
2222 if (status)
2223 goto ctrl_clean;
2224
Sathya Perla2243e2e2009-11-22 22:02:03 +00002225 status = be_get_config(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002226 if (status)
2227 goto stats_clean;
2228
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002229 INIT_DELAYED_WORK(&adapter->work, be_worker);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002230
Sathya Perla5fb379e2009-06-18 00:02:59 +00002231 status = be_setup(adapter);
2232 if (status)
2233 goto stats_clean;
Sathya Perla2243e2e2009-11-22 22:02:03 +00002234
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002235 status = register_netdev(netdev);
2236 if (status != 0)
Sathya Perla5fb379e2009-06-18 00:02:59 +00002237 goto unsetup;
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002238
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002239 dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002240 return 0;
2241
Sathya Perla5fb379e2009-06-18 00:02:59 +00002242unsetup:
2243 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002244stats_clean:
2245 be_stats_cleanup(adapter);
2246ctrl_clean:
2247 be_ctrl_cleanup(adapter);
2248free_netdev:
Sathya Perla8d56ff12009-11-22 22:02:26 +00002249 be_msix_disable(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002250 free_netdev(adapter->netdev);
Sathya Perla8d56ff12009-11-22 22:02:26 +00002251 pci_set_drvdata(pdev, NULL);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002252rel_reg:
2253 pci_release_regions(pdev);
2254disable_dev:
2255 pci_disable_device(pdev);
2256do_none:
Ajit Khapardec4ca2372009-05-18 15:38:55 -07002257 dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002258 return status;
2259}
2260
2261static int be_suspend(struct pci_dev *pdev, pm_message_t state)
2262{
2263 struct be_adapter *adapter = pci_get_drvdata(pdev);
2264 struct net_device *netdev = adapter->netdev;
2265
2266 netif_device_detach(netdev);
2267 if (netif_running(netdev)) {
2268 rtnl_lock();
2269 be_close(netdev);
2270 rtnl_unlock();
2271 }
Ajit Khaparde9e90c962009-11-06 02:06:59 +00002272 be_cmd_get_flow_control(adapter, &adapter->tx_fc, &adapter->rx_fc);
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00002273 be_clear(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002274
2275 pci_save_state(pdev);
2276 pci_disable_device(pdev);
2277 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2278 return 0;
2279}
2280
2281static int be_resume(struct pci_dev *pdev)
2282{
2283 int status = 0;
2284 struct be_adapter *adapter = pci_get_drvdata(pdev);
2285 struct net_device *netdev = adapter->netdev;
2286
2287 netif_device_detach(netdev);
2288
2289 status = pci_enable_device(pdev);
2290 if (status)
2291 return status;
2292
2293 pci_set_power_state(pdev, 0);
2294 pci_restore_state(pdev);
2295
Sathya Perla2243e2e2009-11-22 22:02:03 +00002296 /* tell fw we're ready to fire cmds */
2297 status = be_cmd_fw_init(adapter);
2298 if (status)
2299 return status;
2300
Sarveshwar Bandi9b0365f2009-08-12 21:01:29 +00002301 be_setup(adapter);
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002302 if (netif_running(netdev)) {
2303 rtnl_lock();
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002304 be_open(netdev);
2305 rtnl_unlock();
2306 }
2307 netif_device_attach(netdev);
2308 return 0;
2309}
2310
2311static struct pci_driver be_driver = {
2312 .name = DRV_NAME,
2313 .id_table = be_dev_ids,
2314 .probe = be_probe,
2315 .remove = be_remove,
2316 .suspend = be_suspend,
2317 .resume = be_resume
2318};
2319
2320static int __init be_init_module(void)
2321{
2322 if (rx_frag_size != 8192 && rx_frag_size != 4096
2323 && rx_frag_size != 2048) {
2324 printk(KERN_WARNING DRV_NAME
2325 " : Module param rx_frag_size must be 2048/4096/8192."
2326 " Using 2048\n");
2327 rx_frag_size = 2048;
2328 }
Sathya Perla6b7c5b92009-03-11 23:32:03 -07002329
2330 return pci_register_driver(&be_driver);
2331}
2332module_init(be_init_module);
2333
2334static void __exit be_exit_module(void)
2335{
2336 pci_unregister_driver(&be_driver);
2337}
2338module_exit(be_exit_module);